diff --git a/storage/client/pom.xml b/storage/client/pom.xml index 97074511bf21d..80aa347a87a09 100644 --- a/storage/client/pom.xml +++ b/storage/client/pom.xml @@ -65,6 +65,26 @@ reactor-test test + + com.microsoft.azure + adal4j + test + + + org.spockframework + spock-core + test + + + cglib + cglib-nodep + test + + + uk.org.lidalia + slf4j-test + test + @@ -79,7 +99,6 @@ false - org.apache.maven.plugins maven-javadoc-plugin diff --git a/storage/client/src/main/java/com/azure/storage/blob/AccountSASPermission.java b/storage/client/src/main/java/com/azure/storage/blob/AccountSASPermission.java new file mode 100644 index 0000000000000..2e909098792d6 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/AccountSASPermission.java @@ -0,0 +1,252 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import java.util.Locale; + +/** + * This is a helper class to construct a string representing the permissions granted by an AccountSAS. Setting a value + * to true means that any SAS which uses these permissions will grant permissions for that operation. Once all the + * values are set, this should be serialized with toString and set as the permissions field on an + * {@link AccountSASSignatureValues} object. It is possible to construct the permissions string without this class, but + * the order of the permissions is particular and this class guarantees correctness. + */ +final class AccountSASPermission { + + private boolean read; + + private boolean add; + + private boolean create; + + private boolean write; + + private boolean delete; + + private boolean list; + + private boolean update; + + private boolean processMessages; + + /** + * Initializes an {@code AccountSASPermission} object with all fields set to false. + */ + public AccountSASPermission() { + } + + /** + * Creates an {@code AccountSASPermission} from the specified permissions string. This method will throw an + * {@code IllegalArgumentException} if it encounters a character that does not correspond to a valid permission. + * + * @param permString + * A {@code String} which represents the {@code SharedAccessAccountPermissions}. + * + * @return An {@code AccountSASPermission} object generated from the given {@code String}. + */ + public static AccountSASPermission parse(String permString) { + AccountSASPermission permissions = new AccountSASPermission(); + + for (int i = 0; i < permString.length(); i++) { + char c = permString.charAt(i); + switch (c) { + case 'r': + permissions.read = true; + break; + case 'w': + permissions.write = true; + break; + case 'd': + permissions.delete = true; + break; + case 'l': + permissions.list = true; + break; + case 'a': + permissions.add = true; + break; + case 'c': + permissions.create = true; + break; + case 'u': + permissions.update = true; + break; + case 'p': + permissions.processMessages = true; + break; + default: + throw new IllegalArgumentException( + String.format(Locale.ROOT, SR.ENUM_COULD_NOT_BE_PARSED_INVALID_VALUE, "Permissions", permString, c)); + } + } + return permissions; + } + + /** + * Permission to read resources and list queues and tables granted. + */ + public boolean read() { + return read; + } + + /** + * Permission to read resources and list queues and tables granted. + */ + public AccountSASPermission withRead(boolean read) { + this.read = read; + return this; + } + + /** + * Permission to add messages, table entities, and append to blobs granted. + */ + public boolean add() { + return add; + } + + /** + * Permission to add messages, table entities, and append to blobs granted. + */ + public AccountSASPermission withAdd(boolean add) { + this.add = add; + return this; + } + + /** + * Permission to create blobs and files granted. + */ + public boolean create() { + return create; + } + + /** + * Permission to create blobs and files granted. + */ + public AccountSASPermission withCreate(boolean create) { + this.create = create; + return this; + } + + /** + * Permission to write resources granted. + */ + public boolean write() { + return write; + } + + /** + * Permission to write resources granted. + */ + public AccountSASPermission withWrite(boolean write) { + this.write = write; + return this; + } + + /** + * Permission to delete resources granted. + */ + public boolean delete() { + return delete; + } + + /** + * Permission to delete resources granted. + */ + public AccountSASPermission withDelete(boolean delete) { + this.delete = delete; + return this; + } + + /** + * Permission to list blob containers, blobs, shares, directories, and files granted. + */ + public boolean list() { + return list; + } + + /** + * Permission to list blob containers, blobs, shares, directories, and files granted. + */ + public AccountSASPermission withList(boolean list) { + this.list = list; + return this; + } + + /** + * Permissions to update messages and table entities granted. + */ + public boolean update() { + return update; + } + + /** + * Permissions to update messages and table entities granted. + */ + public AccountSASPermission withUpdate(boolean update) { + this.update = update; + return this; + } + + /** + * Permission to get and delete messages granted. + */ + public boolean processMessages() { + return processMessages; + } + + /** + * Permission to get and delete messages granted. + */ + public AccountSASPermission withProcessMessages(boolean processMessages) { + this.processMessages = processMessages; + return this; + } + + /** + * Converts the given permissions to a {@code String}. Using this method will guarantee the permissions are in an + * order accepted by the service. + * + * @return A {@code String} which represents the {@code AccountSASPermissions}. + */ + @Override + public String toString() { + // The order of the characters should be as specified here to ensure correctness: + // https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas + final StringBuilder builder = new StringBuilder(); + + if (this.read) { + builder.append('r'); + } + + if (this.write) { + builder.append('w'); + } + + if (this.delete) { + builder.append('d'); + } + + if (this.list) { + builder.append('l'); + } + + if (this.add) { + builder.append('a'); + } + + if (this.create) { + builder.append('c'); + } + + if (this.update) { + builder.append('u'); + } + + if (this.processMessages) { + builder.append('p'); + } + + return builder.toString(); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/AccountSASResourceType.java b/storage/client/src/main/java/com/azure/storage/blob/AccountSASResourceType.java new file mode 100644 index 0000000000000..a7d6c477e22c8 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/AccountSASResourceType.java @@ -0,0 +1,133 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import java.util.Locale; + +/** + * This is a helper class to construct a string representing the resources accessible by an AccountSAS. Setting a value + * to true means that any SAS which uses these permissions will grant access to that resource type. Once all the + * values are set, this should be serialized with toString and set as the resources field on an + * {@link AccountSASSignatureValues} object. It is possible to construct the resources string without this class, but + * the order of the resources is particular and this class guarantees correctness. + */ +final class AccountSASResourceType { + + private boolean service; + + private boolean container; + + private boolean object; + + /** + * Initializes an {@code AccountSASResourceType} object with all fields set to false. + */ + public AccountSASResourceType() { + } + + /** + * Creates an {@code AccountSASResourceType} from the specified resource types string. This method will throw an + * {@code IllegalArgumentException} if it encounters a character that does not correspond to a valid resource type. + * + * @param resourceTypesString + * A {@code String} which represents the {@code AccountSASResourceTypes}. + * + * @return A {@code AccountSASResourceType} generated from the given {@code String}. + */ + public static AccountSASResourceType parse(String resourceTypesString) { + AccountSASResourceType resourceType = new AccountSASResourceType(); + + for (int i = 0; i < resourceTypesString.length(); i++) { + char c = resourceTypesString.charAt(i); + switch (c) { + case 's': + resourceType.service = true; + break; + case 'c': + resourceType.container = true; + break; + case 'o': + resourceType.object = true; + break; + default: + throw new IllegalArgumentException( + String.format(Locale.ROOT, SR.ENUM_COULD_NOT_BE_PARSED_INVALID_VALUE, + "Resource Types", resourceTypesString, c)); + } + } + return resourceType; + } + + /** + * Permission to access service level APIs granted. + */ + public boolean service() { + return service; + } + + /** + * Permission to access service level APIs granted. + */ + public AccountSASResourceType withService(boolean service) { + this.service = service; + return this; + } + + /** + * Permission to access container level APIs (Blob Containers, Tables, Queues, File Shares) granted. + */ + public boolean container() { + return container; + } + + /** + * Permission to access container level APIs (Blob Containers, Tables, Queues, File Shares) granted. + */ + public AccountSASResourceType withContainer(boolean container) { + this.container = container; + return this; + } + + /** + * Permission to access object level APIs (Blobs, Table Entities, Queue Messages, Files) granted. + */ + public boolean object() { + return object; + } + + /** + * Permission to access object level APIs (Blobs, Table Entities, Queue Messages, Files) granted. + */ + public AccountSASResourceType withObject(boolean object) { + this.object = object; + return this; + } + + /** + * Converts the given resource types to a {@code String}. Using this method will guarantee the resource types are in + * an order accepted by the service. + * + * @return A {@code String} which represents the {@code AccountSASResourceTypes}. + */ + @Override + public String toString() { + // The order of the characters should be as specified here to ensure correctness: + // https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas + StringBuilder builder = new StringBuilder(); + + if (this.service) { + builder.append('s'); + } + + if (this.container) { + builder.append('c'); + } + + if (this.object) { + builder.append('o'); + } + + return builder.toString(); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/AccountSASService.java b/storage/client/src/main/java/com/azure/storage/blob/AccountSASService.java new file mode 100644 index 0000000000000..1ba5564c542ce --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/AccountSASService.java @@ -0,0 +1,154 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import java.util.Locale; + +/** + * This is a helper class to construct a string representing the services accessible by an AccountSAS. Setting a value + * to true means that any SAS which uses these permissions will grant access to that service. Once all the + * values are set, this should be serialized with toString and set as the services field on an + * {@link AccountSASSignatureValues} object. It is possible to construct the services string without this class, but + * the order of the services is particular and this class guarantees correctness. + */ +final class AccountSASService { + + private boolean blob; + + private boolean file; + + private boolean queue; + + private boolean table; + + /** + * Initializes an {@code AccountSASService} object with all fields set to false. + */ + public AccountSASService() { + } + + /** + * Creates an {@code AccountSASService} from the specified services string. This method will throw an + * {@code IllegalArgumentException} if it encounters a character that does not correspond to a valid service. + * + * @param servicesString + * A {@code String} which represents the {@code SharedAccessAccountServices}. + * + * @return A {@code AccountSASService} generated from the given {@code String}. + */ + public static AccountSASService parse(String servicesString) { + AccountSASService services = new AccountSASService(); + + for (int i = 0; i < servicesString.length(); i++) { + char c = servicesString.charAt(i); + switch (c) { + case 'b': + services.blob = true; + break; + case 'f': + services.file = true; + break; + case 'q': + services.queue = true; + break; + case 't': + services.table = true; + break; + default: + throw new IllegalArgumentException( + String.format(Locale.ROOT, SR.ENUM_COULD_NOT_BE_PARSED_INVALID_VALUE, "Services", + servicesString, c)); + } + } + return services; + } + + /** + * Permission to access blob resources granted. + */ + public boolean blob() { + return blob; + } + + /** + * Permission to access blob resources granted. + */ + public AccountSASService withBlob(boolean blob) { + this.blob = blob; + return this; + } + + /** + * Permission to access file resources granted. + */ + public boolean file() { + return file; + } + + /** + * Permission to access file resources granted. + */ + public AccountSASService withFile(boolean file) { + this.file = file; + return this; + } + + /** + * Permission to access queue resources granted. + */ + public boolean queue() { + return queue; + } + + /** + * Permission to access queue resources granted. + */ + public AccountSASService withQueue(boolean queue) { + this.queue = queue; + return this; + } + + /** + * Permission to access table resources granted. + */ + public boolean table() { + return table; + } + + /** + * Permission to access table resources granted. + */ + public AccountSASService withTable(boolean table) { + this.table = table; + return this; + } + + /** + * Converts the given services to a {@code String}. Using this method will guarantee the services are in an order + * accepted by the service. + * + * @return A {@code String} which represents the {@code AccountSASServices}. + */ + @Override + public String toString() { + // The order of the characters should be as specified here to ensure correctness: + // https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas + StringBuilder value = new StringBuilder(); + + if (this.blob) { + value.append('b'); + } + if (this.queue) { + value.append('q'); + } + if (this.table) { + value.append('t'); + } + if (this.file) { + value.append('f'); + } + + return value.toString(); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/AccountSASSignatureValues.java b/storage/client/src/main/java/com/azure/storage/blob/AccountSASSignatureValues.java new file mode 100644 index 0000000000000..c14d218574a01 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/AccountSASSignatureValues.java @@ -0,0 +1,226 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import java.security.InvalidKeyException; +import java.time.OffsetDateTime; + +/** + * AccountSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account. Once + * all the values here are set appropriately, call generateSASQueryParameters to obtain a representation of the SAS + * which can actually be applied to blob urls. Note: that both this class and {@link SASQueryParameters} exist because + * the former is mutable and a logical representation while the latter is immutable and used to generate actual REST + * requests. + *

+ * Please see + * here + * for more conceptual information on SAS: + *

+ *

+ * Please see + * here for further + * descriptions of the parameters, including which are required: + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=account_sas "Sample code for AccountSASSignatureValues")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ +final class AccountSASSignatureValues { + + private String version = Constants.HeaderConstants.TARGET_STORAGE_VERSION; + + private SASProtocol protocol; + + private OffsetDateTime startTime; + + private OffsetDateTime expiryTime; + + private String permissions; + + private IPRange ipRange; + + private String services; + + private String resourceTypes; + + /** + * Initializes an {@code AccountSASSignatureValues} object with the version number set to the default and all + * other values empty. + */ + public AccountSASSignatureValues() { + } + + /** + * If null or empty, this defaults to the service version targeted by this version of the library. + */ + public String version() { + return version; + } + + /** + * If null or empty, this defaults to the service version targeted by this version of the library. + */ + public AccountSASSignatureValues withVersion(String version) { + this.version = version; + return this; + } + + /** + * {@link SASProtocol} + */ + public SASProtocol protocol() { + return protocol; + } + + /** + * {@link SASProtocol} + */ + public AccountSASSignatureValues withProtocol(SASProtocol protocol) { + this.protocol = protocol; + return this; + } + + /** + * When the SAS will take effect. + */ + public OffsetDateTime startTime() { + return startTime; + } + + /** + * When the SAS will take effect. + */ + public AccountSASSignatureValues withStartTime(OffsetDateTime startTime) { + this.startTime = startTime; + return this; + } + + /** + * The time after which the SAS will no longer work. + */ + public OffsetDateTime expiryTime() { + return expiryTime; + } + + /** + * The time after which the SAS will no longer work. + */ + public AccountSASSignatureValues withExpiryTime(OffsetDateTime expiryTime) { + this.expiryTime = expiryTime; + return this; + } + + /** + * Specifies which operations the SAS user may perform. Please refer to {@link AccountSASPermission} for help + * constructing the permissions string. + */ + public String permissions() { + return permissions; + } + + /** + * Specifies which operations the SAS user may perform. Please refer to {@link AccountSASPermission} for help + * constructing the permissions string. + */ + public AccountSASSignatureValues withPermissions(String permissions) { + this.permissions = permissions; + return this; + } + + /** + * {@link IPRange} + */ + public IPRange ipRange() { + return ipRange; + } + + /** + * {@link IPRange} + */ + public AccountSASSignatureValues withIpRange(IPRange ipRange) { + this.ipRange = ipRange; + return this; + } + + /** + * The values that indicate the services accessible with this SAS. Please refer to {@link AccountSASService} to + * construct this value. + */ + public String services() { + return services; + } + + /** + * The values that indicate the services accessible with this SAS. Please refer to {@link AccountSASService} to + * construct this value. + */ + public AccountSASSignatureValues withServices(String services) { + this.services = services; + return this; + } + + /** + * The values that indicate the resource types accessible with this SAS. Please refer + * to {@link AccountSASResourceType} to construct this value. + */ + public String resourceTypes() { + return resourceTypes; + } + + /** + * The values that indicate the resource types accessible with this SAS. Please refer + * to {@link AccountSASResourceType} to construct this value. + */ + public AccountSASSignatureValues withResourceTypes(String resourceTypes) { + this.resourceTypes = resourceTypes; + return this; + } + + /** + * Generates a {@link SASQueryParameters} object which contains all SAS query parameters needed to make an actual + * REST request. + * + * @param sharedKeyCredentials + * Credentials for the storage account and corresponding primary or secondary key. + * + * @return {@link SASQueryParameters} + */ + public SASQueryParameters generateSASQueryParameters(SharedKeyCredentials sharedKeyCredentials) { + Utility.assertNotNull("SharedKeyCredentials", sharedKeyCredentials); + Utility.assertNotNull("services", this.services); + Utility.assertNotNull("resourceTypes", this.resourceTypes); + Utility.assertNotNull("expiryTime", this.expiryTime); + Utility.assertNotNull("permissions", this.permissions); + Utility.assertNotNull("version", this.version); + + // Signature is generated on the un-url-encoded values. + final String stringToSign = stringToSign(sharedKeyCredentials); + + String signature; + try { + signature = sharedKeyCredentials.computeHmac256(stringToSign); + } catch (InvalidKeyException e) { + throw new Error(e); // The key should have been validated by now. If it is no longer valid here, we fail. + } + + return new SASQueryParameters(this.version, this.services, resourceTypes, + this.protocol, this.startTime, this.expiryTime, this.ipRange, null, + null, this.permissions, signature, null, null, null, null, null, null); + } + + private String stringToSign(final SharedKeyCredentials sharedKeyCredentials) { + return String.join("\n", + sharedKeyCredentials.getAccountName(), + AccountSASPermission.parse(this.permissions).toString(), // guarantees ordering + this.services, + resourceTypes, + this.startTime == null ? "" : Utility.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime), + Utility.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime), + this.ipRange == null ? (new IPRange()).toString() : this.ipRange.toString(), + this.protocol == null ? "" : this.protocol.toString(), + this.version, + Constants.EMPTY_STRING // Account SAS requires an additional newline character + ); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/AnonymousCredentials.java b/storage/client/src/main/java/com/azure/storage/blob/AnonymousCredentials.java new file mode 100644 index 0000000000000..923b1c941dd9f --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/AnonymousCredentials.java @@ -0,0 +1,31 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.http.HttpPipelineCallContext; +import com.azure.core.http.HttpPipelineNextPolicy; +import com.azure.core.http.HttpResponse; +import reactor.core.publisher.Mono; + +/** + * Anonymous credentials are to be used with with HTTP(S) requests that read blobs from public containers or requests + * that use a Shared Access Signature (SAS). This is because Anonymous credentials will not set an Authorization header. + * Pass an instance of this class as the credentials parameter when creating a new pipeline (typically with + * {@link StorageURL}). + */ +public final class AnonymousCredentials implements ICredentials { + + /** + * Returns an empty instance of {@code AnonymousCredentials}. + */ + public AnonymousCredentials() { + } + + + + @Override + public Mono process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { + return next.process(); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/AppendBlobAccessConditions.java b/storage/client/src/main/java/com/azure/storage/blob/AppendBlobAccessConditions.java new file mode 100644 index 0000000000000..2d137418118e6 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/AppendBlobAccessConditions.java @@ -0,0 +1,88 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.storage.blob.models.AppendPositionAccessConditions; +import com.azure.storage.blob.models.LeaseAccessConditions; +import com.azure.storage.blob.models.ModifiedAccessConditions; + +/** + * This class contains values that restrict the successful completion of AppendBlock operations to certain conditions. + * Any field may be set to null if no access conditions are desired. + *

+ * Please refer to the request header section + * here for more conceptual + * information. + */ +public final class AppendBlobAccessConditions { + + private AppendPositionAccessConditions appendPositionAccessConditions; + + private ModifiedAccessConditions modifiedAccessConditions; + + private LeaseAccessConditions leaseAccessConditions; + + /** + * Creates an instance which has fields set to non-null, empty values. + */ + public AppendBlobAccessConditions() { + appendPositionAccessConditions = new AppendPositionAccessConditions(); + modifiedAccessConditions = new ModifiedAccessConditions(); + leaseAccessConditions = new LeaseAccessConditions(); + } + + /** + * Access conditions used for appending data only if the operation meets the provided conditions related to the + * size of the append blob. + */ + public AppendPositionAccessConditions appendPositionAccessConditions() { + return appendPositionAccessConditions; + } + + /** + * Access conditions used for appending data only if the operation meets the provided conditions related to the + * size of the append blob. + */ + public AppendBlobAccessConditions withAppendPositionAccessConditions( + AppendPositionAccessConditions appendPositionAccessConditions) { + this.appendPositionAccessConditions = appendPositionAccessConditions; + return this; + } + + /** + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used to + * construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + */ + public ModifiedAccessConditions modifiedAccessConditions() { + return modifiedAccessConditions; + } + + /** + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used to + * construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + */ + public AppendBlobAccessConditions withModifiedAccessConditions(ModifiedAccessConditions modifiedAccessConditions) { + this.modifiedAccessConditions = modifiedAccessConditions; + return this; + } + + /** + * By setting lease access conditions, requests will fail if the provided lease does not match the active lease on + * the blob. + */ + public LeaseAccessConditions leaseAccessConditions() { + return leaseAccessConditions; + } + + /** + * By setting lease access conditions, requests will fail if the provided lease does not match the active lease on + * the blob. + */ + public AppendBlobAccessConditions withLeaseAccessConditions(LeaseAccessConditions leaseAccessConditions) { + this.leaseAccessConditions = leaseAccessConditions; + return this; + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/AppendBlobAsyncClient.java b/storage/client/src/main/java/com/azure/storage/blob/AppendBlobAsyncClient.java new file mode 100644 index 0000000000000..73607c2ed3f26 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/AppendBlobAsyncClient.java @@ -0,0 +1,216 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.http.rest.ResponseBase; +import com.azure.core.util.Context; +import com.azure.storage.blob.implementation.AzureBlobStorageImpl; +import com.azure.storage.blob.models.AppendBlobAppendBlockFromUrlHeaders; +import com.azure.storage.blob.models.AppendBlobAppendBlockHeaders; +import com.azure.storage.blob.models.AppendBlobCreateHeaders; +import com.azure.storage.blob.models.BlobHTTPHeaders; +import com.azure.storage.blob.models.SourceModifiedAccessConditions; +import io.netty.buffer.ByteBuf; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.net.URL; + + +/** + * Client to an append blob. It may only be instantiated through a {@link AppendBlobClientBuilder}, via + * the method {@link BlobAsyncClient#asAppendBlobAsyncClient()}, or via the method + * {@link ContainerAsyncClient#getAppendBlobAsyncClient(String)}. This class does not hold + * any state about a particular blob, but is instead a convenient way of sending appropriate + * requests to the resource on the service. + * + *

+ * This client contains operations on a blob. Operations on a container are available on {@link ContainerAsyncClient}, + * and operations on the service are available on {@link StorageAsyncClient}. + * + *

+ * Please refer + * to the Azure Docs + * for more information. + * + *

+ * Note this client is an async client that returns reactive responses from Spring Reactor Core + * project (https://projectreactor.io/). Calling the methods in this client will NOT + * start the actual network operation, until {@code .subscribe()} is called on the reactive response. + * You can simply convert one of these responses to a {@link java.util.concurrent.CompletableFuture} + * object through {@link Mono#toFuture()}. + */ +public final class AppendBlobAsyncClient extends BlobAsyncClient { + AppendBlobAsyncRawClient appendBlobAsyncRawClient; + + /** + * Indicates the maximum number of bytes that can be sent in a call to appendBlock. + */ + public static final int MAX_APPEND_BLOCK_BYTES = 4 * Constants.MB; + + /** + * Indicates the maximum number of blocks allowed in an append blob. + */ + public static final int MAX_BLOCKS = 50000; + + /** + * Package-private constructor for use by {@link AppendBlobClientBuilder}. + * @param azureBlobStorage the API client for blob storage API + */ + AppendBlobAsyncClient(AzureBlobStorageImpl azureBlobStorage) { + super(azureBlobStorage); + appendBlobAsyncRawClient = new AppendBlobAsyncRawClient(azureBlobStorage); + } + + /** + * Static method for getting a new builder for this class. + * + * @return + * A new {@link AppendBlobClientBuilder} instance. + */ + public static AppendBlobClientBuilder appendBlobClientBuilder() { + return new AppendBlobClientBuilder(); + } + + /** + * Creates a 0-length append blob. Call appendBlock to append data to an append blob. + * + * @return + * A reactive response containing the information of the created appended blob. + */ + public Mono create() { + return this.create(null, null, null, null); + } + + /** + * Creates a 0-length append blob. Call appendBlock to append data to an append blob. + * + * @param headers + * {@link BlobHTTPHeaders} + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response containing the information of the created appended blob. + */ + public Mono create(BlobHTTPHeaders headers, Metadata metadata, + BlobAccessConditions accessConditions, Context context) { + return appendBlobAsyncRawClient + .create(headers, metadata, accessConditions, context) + .map(ResponseBase::deserializedHeaders); + } + + /** + * Commits a new block of data to the end of the existing append blob. + *

+ * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flux} must produce the same data each time it is subscribed to. + * + * @param data + * The data to write to the blob. Note that this {@code Flux} must be replayable if retries are enabled + * (the default). In other words, the Flux must produce the same data each time it is subscribed to. + * @param length + * The exact length of the data. It is important that this value match precisely the length of the data + * emitted by the {@code Flux}. + * + * @return + * A reactive response containing the information of the append blob operation. + */ + public Mono appendBlock(Flux data, long length) { + return this.appendBlock(data, length, null, null); + } + + /** + * Commits a new block of data to the end of the existing append blob. + *

+ * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flux} must produce the same data each time it is subscribed to. + * + * @param data + * The data to write to the blob. Note that this {@code Flux} must be replayable if retries are enabled + * (the default). In other words, the Flux must produce the same data each time it is subscribed to. + * @param length + * The exact length of the data. It is important that this value match precisely the length of the data + * emitted by the {@code Flux}. + * @param appendBlobAccessConditions + * {@link AppendBlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response containing the information of the append blob operation. + */ + public Mono appendBlock(Flux data, long length, + AppendBlobAccessConditions appendBlobAccessConditions, Context context) { + return appendBlobAsyncRawClient + .appendBlock(data, length, appendBlobAccessConditions, context) + .map(ResponseBase::deserializedHeaders); + } + + /** + * Commits a new block of data from another blob to the end of this append blob. + * + * @param sourceURL + * The url to the blob that will be the source of the copy. A source blob in the same storage account can + * be authenticated via Shared Key. However, if the source is a blob in another account, the source blob + * must either be public or must be authenticated via a shared access signature. If the source blob is + * public, no authentication is required to perform the operation. + * @param sourceRange + * The source {@link BlobRange} to copy. + * + * @return + * A reactive response containing the information of the append blob operation. + */ + public Mono appendBlockFromUrl(URL sourceURL, BlobRange sourceRange) { + return this.appendBlockFromUrl(sourceURL, sourceRange, null, null, + null, null); + } + + /** + * Commits a new block of data from another blob to the end of this append blob. + * + * @param sourceURL + * The url to the blob that will be the source of the copy. A source blob in the same storage account can + * be authenticated via Shared Key. However, if the source is a blob in another account, the source blob + * must either be public or must be authenticated via a shared access signature. If the source blob is + * public, no authentication is required to perform the operation. + * @param sourceRange + * {@link BlobRange} + * @param sourceContentMD5 + * An MD5 hash of the block content from the source blob. If specified, the service will calculate the MD5 + * of the received data and fail the request if it does not match the provided MD5. + * @param destAccessConditions + * {@link AppendBlobAccessConditions} + * @param sourceAccessConditions + * {@link SourceModifiedAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response containing the information of the append blob operation. + */ + public Mono appendBlockFromUrl(URL sourceURL, BlobRange sourceRange, + byte[] sourceContentMD5, AppendBlobAccessConditions destAccessConditions, + SourceModifiedAccessConditions sourceAccessConditions, Context context) { + return appendBlobAsyncRawClient + .appendBlockFromUrl(sourceURL, sourceRange, sourceContentMD5, destAccessConditions, sourceAccessConditions, context) + .map(ResponseBase::deserializedHeaders); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/AppendBlobAsyncRawClient.java b/storage/client/src/main/java/com/azure/storage/blob/AppendBlobAsyncRawClient.java new file mode 100644 index 0000000000000..17fc536b9873f --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/AppendBlobAsyncRawClient.java @@ -0,0 +1,207 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.util.Context; +import com.azure.storage.blob.implementation.AzureBlobStorageBuilder; +import com.azure.storage.blob.implementation.AzureBlobStorageImpl; +import com.azure.storage.blob.models.AppendBlobsAppendBlockFromUrlResponse; +import com.azure.storage.blob.models.AppendBlobsAppendBlockResponse; +import com.azure.storage.blob.models.AppendBlobsCreateResponse; +import com.azure.storage.blob.models.BlobHTTPHeaders; +import com.azure.storage.blob.models.SourceModifiedAccessConditions; +import io.netty.buffer.ByteBuf; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.net.URL; + +import static com.azure.storage.blob.Utility.postProcessResponse; + + +/** + * Represents a URL to an append blob. It may be obtained by direct construction or via the create method on a + * {@link ContainerAsyncClient} object. This class does not hold any state about a particular append blob but is instead a + * convenient way of sending off appropriate requests to the resource on the service. Please refer to the + * Azure Docs + */ +final class AppendBlobAsyncRawClient extends BlobAsyncRawClient { + + /** + * Indicates the maximum number of bytes that can be sent in a call to appendBlock. + */ + public static final int MAX_APPEND_BLOCK_BYTES = 4 * Constants.MB; + + /** + * Indicates the maximum number of blocks allowed in an append blob. + */ + public static final int MAX_BLOCKS = 50000; + + /** + * Creates a {@code AppendBlobAsyncRawClient} object pointing to the account specified by the URL and using the provided + * pipeline to make HTTP requests. + */ + AppendBlobAsyncRawClient(AzureBlobStorageImpl azureBlobStorage) { + super(azureBlobStorage); + } + + /** + * Creates a 0-length append blob. Call AppendBlock to append data to an append blob. For more information, see + * the Azure Docs. + * + * @return Emits the successful response. + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=append_blob "Sample code for AppendBlobAsyncRawClient.create")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono create() { + return this.create(null, null, null, null); + } + + /** + * Creates a 0-length append blob. Call AppendBlock to append data to an append blob. For more information, see + * the Azure Docs. + * + * @param headers {@link BlobHTTPHeaders} + * @param metadata {@link Metadata} + * @param accessConditions {@link BlobAccessConditions} + * @param context {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * @return Emits the successful response. + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=append_blob "Sample code for AppendBlobAsyncRawClient.create")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono create(BlobHTTPHeaders headers, Metadata metadata, + BlobAccessConditions accessConditions, Context context) { + metadata = metadata == null ? new Metadata() : metadata; + accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.appendBlobs().createWithRestResponseAsync(null, + null, 0, null, metadata, null, null, + null, null, headers, accessConditions.leaseAccessConditions(), + accessConditions.modifiedAccessConditions(), context)); + } + + /** + * Commits a new block of data to the end of the existing append blob. For more information, see the + * Azure Docs. + *

+ * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flux} must produce the same data each time it is subscribed to. + * + * @param data The data to write to the blob. Note that this {@code Flux} must be replayable if retries are enabled + * (the default). In other words, the Flowable must produce the same data each time it is subscribed to. + * @param length The exact length of the data. It is important that this value match precisely the length of the data + * emitted by the {@code Flux}. + * @return Emits the successful response. + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=append_blob "Sample code for AppendBlobAsyncRawClient.appendBlock")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono appendBlock(Flux data, long length) { + return this.appendBlock(data, length, null, null); + } + + /** + * Commits a new block of data to the end of the existing append blob. For more information, see the + * Azure Docs. + *

+ * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flux} must produce the same data each time it is subscribed to. + * + * @param data The data to write to the blob. Note that this {@code Flux} must be replayable if retries are enabled + * (the default). In other words, the Flowable must produce the same data each time it is subscribed to. + * @param length The exact length of the data. It is important that this value match precisely the length of the data + * emitted by the {@code Flux}. + * @param appendBlobAccessConditions {@link AppendBlobAccessConditions} + * @param context {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * @return Emits the successful response. + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=append_blob "Sample code for AppendBlobAsyncRawClient.appendBlock")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono appendBlock(Flux data, long length, + AppendBlobAccessConditions appendBlobAccessConditions, Context context) { + appendBlobAccessConditions = appendBlobAccessConditions == null ? new AppendBlobAccessConditions() + : appendBlobAccessConditions; + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.appendBlobs().appendBlockWithRestResponseAsync( + null, null, data, length, null, null, + null, null, null, null, + appendBlobAccessConditions.leaseAccessConditions(), + appendBlobAccessConditions.appendPositionAccessConditions(), + appendBlobAccessConditions.modifiedAccessConditions(), context)); + } + + /** + * Commits a new block of data from another blob to the end of this append blob. For more information, see the + * Azure Docs. + *

+ * + * @param sourceURL The url to the blob that will be the source of the copy. A source blob in the same storage account can + * be authenticated via Shared Key. However, if the source is a blob in another account, the source blob + * must either be public or must be authenticated via a shared access signature. If the source blob is + * public, no authentication is required to perform the operation. + * @param sourceRange The source {@link BlobRange} to copy. + * @return Emits the successful response. + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=append_from_url "Sample code for AppendBlobAsyncRawClient.appendBlockFromUrl")] + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono appendBlockFromUrl(URL sourceURL, BlobRange sourceRange) { + return this.appendBlockFromUrl(sourceURL, sourceRange, null, null, + null, null); + } + + /** + * Commits a new block of data from another blob to the end of this append blob. For more information, see the + * Azure Docs. + *

+ * + * @param sourceURL The url to the blob that will be the source of the copy. A source blob in the same storage account can + * be authenticated via Shared Key. However, if the source is a blob in another account, the source blob + * must either be public or must be authenticated via a shared access signature. If the source blob is + * public, no authentication is required to perform the operation. + * @param sourceRange {@link BlobRange} + * @param sourceContentMD5 An MD5 hash of the block content from the source blob. If specified, the service will calculate the MD5 + * of the received data and fail the request if it does not match the provided MD5. + * @param destAccessConditions {@link AppendBlobAccessConditions} + * @param sourceAccessConditions {@link SourceModifiedAccessConditions} + * @param context {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * @return Emits the successful response. + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=append_from_url "Sample code for AppendBlobAsyncRawClient.appendBlockFromUrl")] + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono appendBlockFromUrl(URL sourceURL, BlobRange sourceRange, + byte[] sourceContentMD5, AppendBlobAccessConditions destAccessConditions, + SourceModifiedAccessConditions sourceAccessConditions, Context context) { + + sourceRange = sourceRange == null ? new BlobRange(0) : sourceRange; + destAccessConditions = destAccessConditions == null + ? new AppendBlobAccessConditions() : destAccessConditions; + context = context == null ? Context.NONE : context; + + return postProcessResponse( + this.azureBlobStorage.appendBlobs().appendBlockFromUrlWithRestResponseAsync(null, null, + sourceURL, 0, sourceRange.toString(), sourceContentMD5, null, null, + destAccessConditions.leaseAccessConditions(), + destAccessConditions.appendPositionAccessConditions(), + destAccessConditions.modifiedAccessConditions(), sourceAccessConditions, context)); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/AppendBlobClient.java b/storage/client/src/main/java/com/azure/storage/blob/AppendBlobClient.java new file mode 100644 index 0000000000000..97bc08151b3b1 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/AppendBlobClient.java @@ -0,0 +1,218 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.util.Context; +import com.azure.storage.blob.implementation.AzureBlobStorageImpl; +import com.azure.storage.blob.models.AppendBlobAppendBlockFromUrlHeaders; +import com.azure.storage.blob.models.AppendBlobAppendBlockHeaders; +import com.azure.storage.blob.models.AppendBlobCreateHeaders; +import com.azure.storage.blob.models.BlobHTTPHeaders; +import com.azure.storage.blob.models.SourceModifiedAccessConditions; +import io.netty.buffer.ByteBuf; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.net.URL; +import java.time.Duration; + + +/** + * Client to an append blob. It may only be instantiated through a {@link AppendBlobClientBuilder}, via + * the method {@link BlobClient#asAppendBlobClient()}, or via the method + * {@link ContainerClient#getAppendBlobClient(String)}. This class does not hold + * any state about a particular blob, but is instead a convenient way of sending appropriate + * requests to the resource on the service. + * + *

+ * This client contains operations on a blob. Operations on a container are available on {@link ContainerClient}, + * and operations on the service are available on {@link StorageClient}. + * + *

+ * Please refer to the Azure Docs + * for more information. + */ +public final class AppendBlobClient extends BlobClient { + + AppendBlobAsyncClient appendBlobAsyncClient; + + /** + * Indicates the maximum number of bytes that can be sent in a call to appendBlock. + */ + public static final int MAX_APPEND_BLOCK_BYTES = 4 * Constants.MB; + + /** + * Indicates the maximum number of blocks allowed in an append blob. + */ + public static final int MAX_BLOCKS = 50000; + + /** + * Package-private constructor for use by {@link AppendBlobClientBuilder}. + * @param azureBlobStorage the API client for blob storage API + */ + AppendBlobClient(AzureBlobStorageImpl azureBlobStorage) { + super(azureBlobStorage); + this.appendBlobAsyncClient = new AppendBlobAsyncClient(azureBlobStorage); + } + + /** + * Static method for getting a new builder for this class. + * + * @return + * A new {@link AppendBlobClientBuilder} instance. + */ + public static AppendBlobClientBuilder appendBlobClientBuilder() { + return new AppendBlobClientBuilder(); + } + + /** + * Creates a 0-length append blob. Call appendBlock to append data to an append blob. + * + * @return + * The information of the created appended blob. + */ + public AppendBlobCreateHeaders create() { + return this.create(null, null, null, null, null); + } + + /** + * Creates a 0-length append blob. Call appendBlock to append data to an append blob. + * + * @param headers + * {@link BlobHTTPHeaders} + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link BlobAccessConditions} + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * The information of the created appended blob. + */ + public AppendBlobCreateHeaders create(BlobHTTPHeaders headers, Metadata metadata, + BlobAccessConditions accessConditions, Duration timeout, Context context) { + Mono response = appendBlobAsyncClient.create(headers, metadata, accessConditions, context); + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Commits a new block of data to the end of the existing append blob. + *

+ * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flux} must produce the same data each time it is subscribed to. + * + * @param data + * The data to write to the blob. Note that this {@code Flux} must be replayable if retries are enabled + * (the default). In other words, the Flux must produce the same data each time it is subscribed to. + * @param length + * The exact length of the data. It is important that this value match precisely the length of the data + * emitted by the {@code Flux}. + * + * @return + * The information of the append blob operation. + */ + public AppendBlobAppendBlockHeaders appendBlock(Flux data, long length) { + return this.appendBlock(data, length, null, null, null); + } + + /** + * Commits a new block of data to the end of the existing append blob. + *

+ * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flux} must produce the same data each time it is subscribed to. + * + * @param data + * The data to write to the blob. Note that this {@code Flux} must be replayable if retries are enabled + * (the default). In other words, the Flux must produce the same data each time it is subscribed to. + * @param length + * The exact length of the data. It is important that this value match precisely the length of the data + * emitted by the {@code Flux}. + * @param appendBlobAccessConditions + * {@link AppendBlobAccessConditions} + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * The information of the append blob operation. + */ + public AppendBlobAppendBlockHeaders appendBlock(Flux data, long length, + AppendBlobAccessConditions appendBlobAccessConditions, Duration timeout, Context context) { + Mono response = appendBlobAsyncClient.appendBlock(data, length, appendBlobAccessConditions, context); + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Commits a new block of data from another blob to the end of this append blob. + * + * @param sourceURL + * The url to the blob that will be the source of the copy. A source blob in the same storage account can + * be authenticated via Shared Key. However, if the source is a blob in another account, the source blob + * must either be public or must be authenticated via a shared access signature. If the source blob is + * public, no authentication is required to perform the operation. + * @param sourceRange + * The source {@link BlobRange} to copy. + * + * @return + * The information of the append blob operation. + */ + public AppendBlobAppendBlockFromUrlHeaders appendBlockFromUrl(URL sourceURL, BlobRange sourceRange) { + return this.appendBlockFromUrl(sourceURL, sourceRange, null, null, + null, null, null); + } + + /** + * Commits a new block of data from another blob to the end of this append blob. + * + * @param sourceURL + * The url to the blob that will be the source of the copy. A source blob in the same storage account can + * be authenticated via Shared Key. However, if the source is a blob in another account, the source blob + * must either be public or must be authenticated via a shared access signature. If the source blob is + * public, no authentication is required to perform the operation. + * @param sourceRange + * {@link BlobRange} + * @param sourceContentMD5 + * An MD5 hash of the block content from the source blob. If specified, the service will calculate the MD5 + * of the received data and fail the request if it does not match the provided MD5. + * @param destAccessConditions + * {@link AppendBlobAccessConditions} + * @param sourceAccessConditions + * {@link SourceModifiedAccessConditions} + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * The information of the append blob operation. + */ + public AppendBlobAppendBlockFromUrlHeaders appendBlockFromUrl(URL sourceURL, BlobRange sourceRange, + byte[] sourceContentMD5, AppendBlobAccessConditions destAccessConditions, + SourceModifiedAccessConditions sourceAccessConditions, Duration timeout, Context context) { + Mono response = appendBlobAsyncClient.appendBlockFromUrl(sourceURL, sourceRange, sourceContentMD5, destAccessConditions, sourceAccessConditions, context); + return timeout == null + ? response.block() + : response.block(timeout); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/AppendBlobClientBuilder.java b/storage/client/src/main/java/com/azure/storage/blob/AppendBlobClientBuilder.java new file mode 100644 index 0000000000000..9e511263f96c0 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/AppendBlobClientBuilder.java @@ -0,0 +1,220 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.configuration.Configuration; +import com.azure.core.http.HttpClient; +import com.azure.core.http.HttpPipeline; +import com.azure.core.http.policy.AddDatePolicy; +import com.azure.core.http.policy.HttpLogDetailLevel; +import com.azure.core.http.policy.HttpLoggingPolicy; +import com.azure.core.http.policy.HttpPipelinePolicy; +import com.azure.core.http.policy.RequestIdPolicy; +import com.azure.core.http.policy.RetryPolicy; +import com.azure.core.http.policy.UserAgentPolicy; +import com.azure.core.implementation.util.ImplUtils; +import com.azure.storage.blob.implementation.AzureBlobStorageBuilder; +import com.azure.storage.blob.implementation.AzureBlobStorageImpl; + +import java.net.MalformedURLException; +import java.net.URL; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * Fluent AppendBlobClientBuilder for instantiating a {@link AppendBlobClient} or {@link AppendBlobAsyncClient}. + * + *

+ * An instance of this builder may only be created from static method {@link AppendBlobClient#appendBlobClientBuilder()}. + * The following information must be provided on this builder: + * + *

+ * + *

+ * Once all the configurations are set on this builder, call {@code .buildClient()} to create a + * {@link AppendBlobClient} or {@code .buildAsyncClient()} to create a {@link AppendBlobAsyncClient}. + */ +public final class AppendBlobClientBuilder { + private static final String ACCOUNT_NAME = "AccountName".toLowerCase(); + private static final String ACCOUNT_KEY = "AccountKey".toLowerCase(); + + private final List policies; + + private URL endpoint; + private ICredentials credentials = new AnonymousCredentials(); + private HttpClient httpClient; + private HttpLogDetailLevel logLevel; + private RetryPolicy retryPolicy; + private Configuration configuration; + + public AppendBlobClientBuilder() { + retryPolicy = new RetryPolicy(); + logLevel = HttpLogDetailLevel.NONE; + policies = new ArrayList<>(); + } + + /** + * Constructs an instance of AppendBlobAsyncClient based on the configurations stored in the appendBlobClientBuilder. + * @return a new client instance + */ + private AzureBlobStorageImpl buildImpl() { + Objects.requireNonNull(endpoint); + + // Closest to API goes first, closest to wire goes last. + final List policies = new ArrayList<>(); + + policies.add(new UserAgentPolicy(BlobConfiguration.NAME, BlobConfiguration.VERSION)); + policies.add(new RequestIdPolicy()); + policies.add(new AddDatePolicy()); + policies.add(credentials); // This needs to be a different credential type. + + policies.add(retryPolicy); + + policies.addAll(this.policies); + policies.add(new HttpLoggingPolicy(logLevel)); + + HttpPipeline pipeline = HttpPipeline.builder() + .policies(policies.toArray(new HttpPipelinePolicy[0])) + .httpClient(httpClient) + .build(); + + return new AzureBlobStorageBuilder() + .url(endpoint.toString()) + .pipeline(pipeline) + .build(); + } + + /** + * @return a {@link AppendBlobClient} created from the configurations in this builder. + */ + public AppendBlobClient buildClient() { + return new AppendBlobClient(buildImpl()); + } + + /** + * @return a {@link AppendBlobAsyncClient} created from the configurations in this builder. + */ + public AppendBlobAsyncClient buildAsyncClient() { + return new AppendBlobAsyncClient(buildImpl()); + } + + /** + * Sets the service endpoint, additionally parses it for information (SAS token, container name) + * @param endpoint URL of the service + * @return the updated AppendBlobClientBuilder object + */ + public AppendBlobClientBuilder endpoint(String endpoint) { + Objects.requireNonNull(endpoint); + try { + this.endpoint = new URL(endpoint); + } catch (MalformedURLException ex) { + throw new IllegalArgumentException("The Azure Storage Queue endpoint url is malformed."); + } + + return this; + } + + /** + * Sets the credentials used to authorize requests sent to the service + * @param credentials authorization credentials + * @return the updated AppendBlobClientBuilder object + */ + public AppendBlobClientBuilder credentials(SharedKeyCredentials credentials) { + this.credentials = credentials; + return this; + } + + /** + * Sets the credentials used to authorize requests sent to the service + * @param credentials authorization credentials + * @return the updated AppendBlobClientBuilder object + */ + public AppendBlobClientBuilder credentials(TokenCredentials credentials) { + this.credentials = credentials; + return this; + } + + /** + * Clears the credentials used to authorize requests sent to the service + * @return the updated AppendBlobClientBuilder object + */ + public AppendBlobClientBuilder anonymousCredentials() { + this.credentials = new AnonymousCredentials(); + return this; + } + + /** + * Sets the connection string for the service, parses it for authentication information (account name, account key) + * @param connectionString connection string from access keys section + * @return the updated AppendBlobClientBuilder object + */ + public AppendBlobClientBuilder connectionString(String connectionString) { + Objects.requireNonNull(connectionString); + + Map connectionKVPs = new HashMap<>(); + for (String s : connectionString.split(";")) { + String[] kvp = s.split("=", 2); + connectionKVPs.put(kvp[0].toLowerCase(), kvp[1]); + } + + String accountName = connectionKVPs.get(ACCOUNT_NAME); + String accountKey = connectionKVPs.get(ACCOUNT_KEY); + + if (ImplUtils.isNullOrEmpty(accountName) || ImplUtils.isNullOrEmpty(accountKey)) { + throw new IllegalArgumentException("Connection string must contain 'AccountName' and 'AccountKey'."); + } + + // Use accountName and accountKey to get the SAS token using the credential class. + credentials = new SharedKeyCredentials(accountName, accountKey); + + return this; + } + + /** + * Sets the http client used to send service requests + * @param httpClient http client to send requests + * @return the updated AppendBlobClientBuilder object + */ + public AppendBlobClientBuilder httpClient(HttpClient httpClient) { + this.httpClient = httpClient; + return this; + } + + /** + * Adds a pipeline policy to apply on each request sent + * @param pipelinePolicy a pipeline policy + * @return the updated AppendBlobClientBuilder object + */ + public AppendBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { + this.policies.add(pipelinePolicy); + return this; + } + + /** + * Sets the logging level for service requests + * @param logLevel logging level + * @return the updated AppendBlobClientBuilder object + */ + public AppendBlobClientBuilder httpLogDetailLevel(HttpLogDetailLevel logLevel) { + this.logLevel = logLevel; + return this; + } + + /** + * Sets the configuration object used to retrieve environment configuration values used to buildClient the client with + * when they are not set in the appendBlobClientBuilder, defaults to Configuration.NONE + * @param configuration configuration store + * @return the updated AppendBlobClientBuilder object + */ + public AppendBlobClientBuilder configuration(Configuration configuration) { + this.configuration = configuration; + return this; + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/AppendBlobRawClient.java b/storage/client/src/main/java/com/azure/storage/blob/AppendBlobRawClient.java new file mode 100644 index 0000000000000..22abc039755b6 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/AppendBlobRawClient.java @@ -0,0 +1,220 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.http.HttpPipeline; +import com.azure.core.util.Context; +import com.azure.storage.blob.implementation.AzureBlobStorageBuilder; +import com.azure.storage.blob.implementation.AzureBlobStorageImpl; +import com.azure.storage.blob.models.*; +import io.netty.buffer.ByteBuf; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.net.URL; +import java.nio.ByteBuffer; +import java.time.Duration; + + +/** + * Represents a URL to an append blob. It may be obtained by direct construction or via the create method on a + * {@link ContainerAsyncClient} object. This class does not hold any state about a particular append blob but is instead a + * convenient way of sending off appropriate requests to the resource on the service. Please refer to the + * Azure Docs + */ +final class AppendBlobRawClient extends BlobRawClient { + + AppendBlobAsyncRawClient appendBlobAsyncRawClient; + + /** + * Indicates the maximum number of bytes that can be sent in a call to appendBlock. + */ + public static final int MAX_APPEND_BLOCK_BYTES = 4 * Constants.MB; + + /** + * Indicates the maximum number of blocks allowed in an append blob. + */ + public static final int MAX_BLOCKS = 50000; + + /** + * Creates a {@code AppendBlobAsyncRawClient} object pointing to the account specified by the URL and using the provided + * pipeline to make HTTP requests. + * {@link StorageURL#createPipeline(ICredentials, PipelineOptions)} for more information. + */ + AppendBlobRawClient(AzureBlobStorageImpl azureBlobStorage) { + super(azureBlobStorage); + this.appendBlobAsyncRawClient = new AppendBlobAsyncRawClient(azureBlobStorage); + } + + + /** + * Creates a 0-length append blob. Call AppendBlock to append data to an append blob. For more information, see + * the Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=append_blob "Sample code for AppendBlobAsyncRawClient.create")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public AppendBlobsCreateResponse create() { + return this.create(null, null, null, null, null); + } + + /** + * Creates a 0-length append blob. Call AppendBlock to append data to an append blob. For more information, see + * the Azure Docs. + * + * @param headers + * {@link BlobHTTPHeaders} + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=append_blob "Sample code for AppendBlobAsyncRawClient.create")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public AppendBlobsCreateResponse create(BlobHTTPHeaders headers, Metadata metadata, + BlobAccessConditions accessConditions, Duration timeout, Context context) { + Mono response = appendBlobAsyncRawClient.create(headers, metadata, accessConditions, context); + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Commits a new block of data to the end of the existing append blob. For more information, see the + * Azure Docs. + *

+ * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flux} must produce the same data each time it is subscribed to. + * + * @param data + * The data to write to the blob. Note that this {@code Flux} must be replayable if retries are enabled + * (the default). In other words, the Flowable must produce the same data each time it is subscribed to. + * @param length + * The exact length of the data. It is important that this value match precisely the length of the data + * emitted by the {@code Flux}. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=append_blob "Sample code for AppendBlobAsyncRawClient.appendBlock")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public AppendBlobsAppendBlockResponse appendBlock(Flux data, long length) { + return this.appendBlock(data, length, null, null, null); + } + + /** + * Commits a new block of data to the end of the existing append blob. For more information, see the + * Azure Docs. + *

+ * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flux} must produce the same data each time it is subscribed to. + * + * @param data + * The data to write to the blob. Note that this {@code Flux} must be replayable if retries are enabled + * (the default). In other words, the Flowable must produce the same data each time it is subscribed to. + * @param length + * The exact length of the data. It is important that this value match precisely the length of the data + * emitted by the {@code Flux}. + * @param appendBlobAccessConditions + * {@link AppendBlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=append_blob "Sample code for AppendBlobAsyncRawClient.appendBlock")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public AppendBlobsAppendBlockResponse appendBlock(Flux data, long length, + AppendBlobAccessConditions appendBlobAccessConditions, Duration timeout, Context context) { + Mono response = appendBlobAsyncRawClient.appendBlock(data, length, appendBlobAccessConditions, context); + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Commits a new block of data from another blob to the end of this append blob. For more information, see the + * Azure Docs. + *

+ * + * @param sourceURL + * The url to the blob that will be the source of the copy. A source blob in the same storage account can + * be authenticated via Shared Key. However, if the source is a blob in another account, the source blob + * must either be public or must be authenticated via a shared access signature. If the source blob is + * public, no authentication is required to perform the operation. + * @param sourceRange + * The source {@link BlobRange} to copy. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=append_from_url "Sample code for AppendBlobAsyncRawClient.appendBlockFromUrl")] + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public AppendBlobsAppendBlockFromUrlResponse appendBlockFromUrl(URL sourceURL, BlobRange sourceRange) { + return this.appendBlockFromUrl(sourceURL, sourceRange, null, null, + null, null, null); + } + + /** + * Commits a new block of data from another blob to the end of this append blob. For more information, see the + * Azure Docs. + *

+ * + * @param sourceURL + * The url to the blob that will be the source of the copy. A source blob in the same storage account can + * be authenticated via Shared Key. However, if the source is a blob in another account, the source blob + * must either be public or must be authenticated via a shared access signature. If the source blob is + * public, no authentication is required to perform the operation. + * @param sourceRange + * {@link BlobRange} + * @param sourceContentMD5 + * An MD5 hash of the block content from the source blob. If specified, the service will calculate the MD5 + * of the received data and fail the request if it does not match the provided MD5. + * @param destAccessConditions + * {@link AppendBlobAccessConditions} + * @param sourceAccessConditions + * {@link SourceModifiedAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=append_from_url "Sample code for AppendBlobAsyncRawClient.appendBlockFromUrl")] + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public AppendBlobsAppendBlockFromUrlResponse appendBlockFromUrl(URL sourceURL, BlobRange sourceRange, + byte[] sourceContentMD5, AppendBlobAccessConditions destAccessConditions, + SourceModifiedAccessConditions sourceAccessConditions, Duration timeout, Context context) { + Mono response = appendBlobAsyncRawClient.appendBlockFromUrl(sourceURL, sourceRange, sourceContentMD5, destAccessConditions, sourceAccessConditions, context); + return timeout == null + ? response.block() + : response.block(timeout); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/BlobAccessConditions.java b/storage/client/src/main/java/com/azure/storage/blob/BlobAccessConditions.java new file mode 100644 index 0000000000000..ec115a7513fc3 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/BlobAccessConditions.java @@ -0,0 +1,64 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.storage.blob.models.LeaseAccessConditions; +import com.azure.storage.blob.models.ModifiedAccessConditions; + +/** + * This class contains values which will restrict the successful operation of a variety of requests to the conditions + * present. These conditions are entirely optional. The entire object or any of its properties may be set to null when + * passed to a method to indicate that those conditions are not desired. Please refer to the type of each field for more + * information on those particular access conditions. + */ +public final class BlobAccessConditions { + + private ModifiedAccessConditions modifiedAccessConditions; + + private LeaseAccessConditions leaseAccessConditions; + + /** + * Creates an instance which has fields set to non-null, empty values. + */ + public BlobAccessConditions() { + modifiedAccessConditions = new ModifiedAccessConditions(); + leaseAccessConditions = new LeaseAccessConditions(); + } + + /** + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used to + * construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + */ + public ModifiedAccessConditions modifiedAccessConditions() { + return modifiedAccessConditions; + } + + /** + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used to + * construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + */ + public BlobAccessConditions withModifiedAccessConditions(ModifiedAccessConditions modifiedAccessConditions) { + this.modifiedAccessConditions = modifiedAccessConditions; + return this; + } + + /** + * By setting lease access conditions, requests will fail if the provided lease does not match the active lease on + * the blob. + */ + public LeaseAccessConditions leaseAccessConditions() { + return leaseAccessConditions; + } + + /** + * By setting lease access conditions, requests will fail if the provided lease does not match the active lease on + * the blob. + */ + public BlobAccessConditions withLeaseAccessConditions(LeaseAccessConditions leaseAccessConditions) { + this.leaseAccessConditions = leaseAccessConditions; + return this; + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/BlobAsyncClient.java b/storage/client/src/main/java/com/azure/storage/blob/BlobAsyncClient.java new file mode 100644 index 0000000000000..964caa7d90ad4 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/BlobAsyncClient.java @@ -0,0 +1,818 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.http.rest.ResponseBase; +import com.azure.core.util.Context; +import com.azure.storage.blob.implementation.AzureBlobStorageImpl; +import com.azure.storage.blob.models.AccessTier; +import com.azure.storage.blob.models.BlobHTTPHeaders; +import com.azure.storage.blob.models.BlobStartCopyFromURLHeaders; +import com.azure.storage.blob.models.DeleteSnapshotsOptionType; +import com.azure.storage.blob.models.LeaseAccessConditions; +import com.azure.storage.blob.models.ModifiedAccessConditions; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import reactor.netty.ByteBufFlux; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.io.UncheckedIOException; +import java.net.URL; +import java.nio.ByteBuffer; + +/** + * Client to a blob of any type: block, append, or page. It may only be instantiated through a {@link BlobClientBuilder} or via + * the method {@link ContainerAsyncClient#getBlobAsyncClient(String)}. This class does not hold any state about a particular + * blob, but is instead a convenient way of sending appropriate requests to the resource on the service. + * + *

+ * This client offers the ability to download blobs. Note that uploading data is specific to each type of blob. Please + * refer to the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient} for upload options. This + * client can be converted into one of these clients easily through the methods {@link #asBlockBlobAsyncClient}, + * {@link #asPageBlobAsyncClient}, and {@link #asAppendBlobAsyncClient()}. + * + *

+ * This client contains operations on a blob. Operations on a container are available on {@link ContainerAsyncClient}, + * and operations on the service are available on {@link StorageAsyncClient}. + * + *

+ * Please refer to the Azure Docs + * for more information. + * + *

+ * Note this client is an async client that returns reactive responses from Spring Reactor Core + * project (https://projectreactor.io/). Calling the methods in this client will NOT + * start the actual network operation, until {@code .subscribe()} is called on the reactive response. + * You can simply convert one of these responses to a {@link java.util.concurrent.CompletableFuture} + * object through {@link Mono#toFuture()}. + */ +public class BlobAsyncClient { + + protected BlobAsyncRawClient blobAsyncRawClient; + + /** + * Package-private constructor for use by {@link BlobClientBuilder}. + * @param azureBlobStorage the API client for blob storage API + */ + BlobAsyncClient(AzureBlobStorageImpl azureBlobStorage) { + blobAsyncRawClient = new BlobAsyncRawClient(azureBlobStorage); + } + + /** + * Static method for getting a new builder for this class. + * + * @return + * A new {@link BlobClientBuilder} instance. + */ + public static BlobClientBuilder blobClientBuilder() { + return new BlobClientBuilder(); + } + + /** + * Creates a new {@link BlockBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs + * that are known to be block blobs. + * + * @return + * A {@link BlockBlobAsyncClient} to this resource. + */ + public BlockBlobAsyncClient asBlockBlobAsyncClient() { + return new BlockBlobAsyncClient(this.blobAsyncRawClient.azureBlobStorage); + } + + /** + * Creates a new {@link AppendBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs + * that are known to be append blobs. + * + * @return + * A {@link AppendBlobAsyncClient} to this resource. + */ + public AppendBlobAsyncClient asAppendBlobAsyncClient() { + return new AppendBlobAsyncClient(this.blobAsyncRawClient.azureBlobStorage); + } + + /** + * Creates a new {@link PageBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs + * that are known to be page blobs. + * + * @return + * A {@link PageBlobAsyncClient} to this resource. + */ + public PageBlobAsyncClient asPageBlobAsyncClient() { + return new PageBlobAsyncClient(this.blobAsyncRawClient.azureBlobStorage); + } + + + /** + * Copies the data at the source URL to a blob. For more information, see the Azure Docs + * + * @param sourceURL + * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. + * + * @return + * A reactive response containing the copy ID for the long running operation. + */ + public Mono startCopyFromURL(URL sourceURL) { + return this.startCopyFromURL(sourceURL, null, null, null, null); + } + + /** + * Copies the data at the source URL to a blob. For more information, see the Azure Docs + * + * @param sourceURL + * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. + * @param metadata + * {@link Metadata} + * @param sourceModifiedAccessConditions + * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the + * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob + * was changed relative to the given request. The request will fail if the specified condition is not + * satisfied. + * @param destAccessConditions + * {@link BlobAccessConditions} against the destination. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response containing the copy ID for the long running operation. + */ + public Mono startCopyFromURL(URL sourceURL, Metadata metadata, + ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions, + Context context) { + return blobAsyncRawClient + .startCopyFromURL(sourceURL, metadata, sourceModifiedAccessConditions, destAccessConditions, context) + .map(response -> response.deserializedHeaders().copyId()); + } + + /** + * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. + * + * @param copyId + * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link + * BlobStartCopyFromURLHeaders} object. + * + * @return + * A reactive response signalling completion. + */ + public Mono abortCopyFromURL(String copyId) { + return this.abortCopyFromURL(copyId, null, null); + } + + /** + * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. + * + * @param copyId + * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link + * BlobStartCopyFromURLHeaders} object. + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response signalling completion. + */ + public Mono abortCopyFromURL(String copyId, LeaseAccessConditions leaseAccessConditions, Context context) { + return blobAsyncRawClient + .abortCopyFromURL(copyId, leaseAccessConditions, context) + .then(); + } + + /** + * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. + * + * @param copySource + * The source URL to copy from. + * + * @return + * A reactive response containing the copy ID for the long running operation. + */ + public Mono copyFromURL(URL copySource) { + return this.copyFromURL(copySource, null, null, null, null); + } + + /** + * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. + * + * @param copySource + * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. + * @param metadata + * {@link Metadata} + * @param sourceModifiedAccessConditions + * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the + * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob + * was changed relative to the given request. The request will fail if the specified condition is not + * satisfied. + * @param destAccessConditions + * {@link BlobAccessConditions} against the destination. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response containing the copy ID for the long running operation. + */ + public Mono copyFromURL(URL copySource, Metadata metadata, + ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions, + Context context) { + return blobAsyncRawClient + .syncCopyFromURL(copySource, metadata, sourceModifiedAccessConditions, destAccessConditions, context) + .map(response -> response.deserializedHeaders().copyId()); + } + + /** + * Reads the entire blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. + * + * @return + * A reactive response containing the blob data. + */ + public Flux download() { + return this.download(null, null, false, null, null); + } + + /** + * Reads a range of bytes from a blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. + * + * @param range + * {@link BlobRange} + * @param accessConditions + * {@link BlobAccessConditions} + * @param rangeGetContentMD5 + * Whether the contentMD5 for the specified blob range should be returned. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response containing the blob data. + */ + public Flux download(BlobRange range, BlobAccessConditions accessConditions, + boolean rangeGetContentMD5, ReliableDownloadOptions options, Context context) { + return blobAsyncRawClient + .download(range, accessConditions, rangeGetContentMD5, context) + .flatMapMany(response -> ByteBufFlux.fromInbound(response.body(options)).asByteBuffer()); + } + + /** + * Downloads the entire blob into a file specified by the path. The file will be created if it doesn't exist. + * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. + * + * @param filePath + * A non-null {@link OutputStream} instance where the downloaded data will be written. + */ + public Mono downloadToFile(String filePath) { + return this.downloadToFile(filePath, null, null, false, null, null); + } + + /** + * Downloads a range of bytes blob into a file specified by the path. The file will be created if it doesn't exist. + * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. + * + * @param filePath + * A non-null {@link OutputStream} instance where the downloaded data will be written. + * @param range + * {@link BlobRange} + * @param accessConditions + * {@link BlobAccessConditions} + * @param rangeGetContentMD5 + * Whether the contentMD5 for the specified blob range should be returned. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + */ + public Mono downloadToFile(String filePath, BlobRange range, BlobAccessConditions accessConditions, + boolean rangeGetContentMD5, ReliableDownloadOptions options, Context context) { + //todo make this method smart + return Mono.using( + () -> new FileOutputStream(new File(filePath)), + fstream -> this.download(range, accessConditions, rangeGetContentMD5, options, context) + .doOnNext(byteBuffer -> { + try { + fstream.write(byteBuffer.array()); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }) + .then(), + fstream -> { + try { + fstream.close(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + ); + } + + /** + * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. + * + * @return + * A reactive response signalling completion. + */ + public Mono delete() { + return this.delete(null, null, null); + } + + /** + * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. + * + * @param deleteBlobSnapshotOptions + * Specifies the behavior for deleting the snapshots on this blob. {@code Include} will delete the base blob + * and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being deleted, you must + * pass null. + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response signalling completion. + */ + public Mono delete(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, + BlobAccessConditions accessConditions, Context context) { + return blobAsyncRawClient + .delete(deleteBlobSnapshotOptions, accessConditions, context) + .then(); + } + + /** + * Returns the blob's metadata and properties. + * + * @return + * A reactive response containing the blob properties and metadata. + */ + public Mono getProperties() { + return this.getProperties(null, null); + } + + /** + * Returns the blob's metadata and properties. + * + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response containing the blob properties and metadata. + */ + public Mono getProperties(BlobAccessConditions accessConditions, Context context) { + return blobAsyncRawClient + .getProperties(accessConditions, context) + .map(ResponseBase::deserializedHeaders) + .map(BlobProperties::new); + } + + /** + * Changes a blob's HTTP header properties. if only one HTTP header is updated, the + * others will all be erased. In order to preserve existing values, they must be + * passed alongside the header being changed. For more information, see the + * Azure Docs. + * + * @param headers + * {@link BlobHTTPHeaders} + * + * @return + * A reactive response signalling completion. + */ + public Mono setHTTPHeaders(BlobHTTPHeaders headers) { + return this.setHTTPHeaders(headers, null, null); + } + + /** + * Changes a blob's HTTP header properties. if only one HTTP header is updated, the + * others will all be erased. In order to preserve existing values, they must be + * passed alongside the header being changed. For more information, see the + * Azure Docs. + * + * @param headers + * {@link BlobHTTPHeaders} + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response signalling completion. + */ + public Mono setHTTPHeaders(BlobHTTPHeaders headers, BlobAccessConditions accessConditions, Context context) { + return blobAsyncRawClient + .setHTTPHeaders(headers, accessConditions, context) + .then(); + } + + /** + * Changes a blob's metadata. The specified metadata in this method will replace existing + * metadata. If old values must be preserved, they must be downloaded and included in the + * call to this method. For more information, see the Azure Docs. + * + * @param metadata + * {@link Metadata} + * + * @return + * A reactive response signalling completion. + */ + public Mono setMetadata(Metadata metadata) { + return this.setMetadata(metadata, null, null); + } + + /** + * Changes a blob's metadata. The specified metadata in this method will replace existing + * metadata. If old values must be preserved, they must be downloaded and included in the + * call to this method. For more information, see the Azure Docs. + * + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response signalling completion. + */ + public Mono setMetadata(Metadata metadata, BlobAccessConditions accessConditions, Context context) { + return blobAsyncRawClient + .setMetadata(metadata, accessConditions, context) + .then(); + } + + /** + * Creates a read-only snapshot of a blob. + * + * @return + * A reactive response containing the ID of the new snapshot. + */ + public Mono createSnapshot() { + return this.createSnapshot(null, null, null); + } + + /** + * Creates a read-only snapshot of a blob. + * + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response containing the ID of the new snapshot. + */ + public Mono createSnapshot(Metadata metadata, BlobAccessConditions accessConditions, Context context) { + return blobAsyncRawClient + .createSnapshot(metadata, accessConditions, context) + .map(response -> response.deserializedHeaders().snapshot()); + } + + /** + * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in + * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of + * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. + * + * @param tier + * The new tier for the blob. + * + * @return + * A reactive response signalling completion. + */ + public Mono setTier(AccessTier tier) { + return this.setTier(tier, null, null); + } + + /** + * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in + * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of + * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. + * + * @param tier + * The new tier for the blob. + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response signalling completion. + */ + public Mono setTier(AccessTier tier, LeaseAccessConditions leaseAccessConditions, Context context) { + return blobAsyncRawClient + .setTier(tier, leaseAccessConditions, context) + .then(); + } + + /** + * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. + * + * @return + * A reactive response signalling completion. + */ + public Mono undelete() { + return this.undelete(null); + } + + /** + * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. + * + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return + * A reactive response signalling completion. + */ + public Mono undelete(Context context) { + return blobAsyncRawClient + .undelete(context) + .then(); + } + + /** + * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 + * seconds, or infinite (-1). + * + * @param proposedId + * A {@code String} in any valid GUID format. May be null. + * @param duration + * The duration of the lease, in seconds, or negative one (-1) for a lease that + * never expires. A non-infinite lease can be between 15 and 60 seconds. + * + * @return + * A reactive response containing the lease ID. + */ + public Mono acquireLease(String proposedId, int duration) { + return this.acquireLease(proposedId, duration, null, null); + } + + /** + * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 + * seconds, or infinite (-1). + * + * @param proposedID + * A {@code String} in any valid GUID format. May be null. + * @param duration + * The duration of the lease, in seconds, or negative one (-1) for a lease that + * never expires. A non-infinite lease can be between 15 and 60 seconds. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response containing the lease ID. + */ + public Mono acquireLease(String proposedID, int duration, ModifiedAccessConditions modifiedAccessConditions, + Context context) { + return blobAsyncRawClient + .acquireLease(proposedID, duration, modifiedAccessConditions, context) + .map(response -> response.deserializedHeaders().leaseId()); + } + + /** + * Renews the blob's previously-acquired lease. + * + * @param leaseID + * The leaseId of the active lease on the blob. + * + * @return + * A reactive response containing the renewed lease ID. + */ + public Mono renewLease(String leaseID) { + return this.renewLease(leaseID, null, null); + } + + /** + * Renews the blob's previously-acquired lease. + * + * @param leaseID + * The leaseId of the active lease on the blob. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response containing the renewed lease ID. + */ + public Mono renewLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions, Context context) { + return blobAsyncRawClient + .renewLease(leaseID, modifiedAccessConditions, context) + .map(response -> response.deserializedHeaders().leaseId()); + } + + /** + * Releases the blob's previously-acquired lease. + * + * @param leaseID + * The leaseId of the active lease on the blob. + * + * @return + * A reactive response signalling completion. + */ + public Mono releaseLease(String leaseID) { + return this.releaseLease(leaseID, null, null); + } + + /** + * Releases the blob's previously-acquired lease. + * + * @param leaseID + * The leaseId of the active lease on the blob. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response signalling completion. + */ + public Mono releaseLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions, Context context) { + return blobAsyncRawClient + .releaseLease(leaseID, modifiedAccessConditions, context) + .then(); + } + + /** + * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant + * to break a fixed-duration lease when it expires or an infinite lease immediately. + * + * @return + * A reactive response containing the remaining time in the broken lease in seconds. + */ + public Mono breakLease() { + return this.breakLease(null, null, null); + } + + /** + * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant + * to break a fixed-duration lease when it expires or an infinite lease immediately. + * + * @param breakPeriodInSeconds + * An optional {@code Integer} representing the proposed duration of seconds that the lease should continue + * before it is broken, between 0 and 60 seconds. This break period is only used if it is shorter than the + * time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be + * available before the break period has expired, but the lease may be held for longer than the break + * period. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response containing the remaining time in the broken lease in seconds. + */ + public Mono breakLease(Integer breakPeriodInSeconds, ModifiedAccessConditions modifiedAccessConditions, + Context context) { + return blobAsyncRawClient + .breakLease(breakPeriodInSeconds, modifiedAccessConditions, context) + .map(response -> response.deserializedHeaders().leaseTime()); + } + + /** + * ChangeLease changes the blob's lease ID. + * + * @param leaseId + * The leaseId of the active lease on the blob. + * @param proposedID + * A {@code String} in any valid GUID format. + * + * @return + * A reactive response containing the new lease ID. + */ + public Mono changeLease(String leaseId, String proposedID) { + return this.changeLease(leaseId, proposedID, null, null); + } + + /** + * ChangeLease changes the blob's lease ID. For more information, see the Azure Docs. + * + * @param leaseId + * The leaseId of the active lease on the blob. + * @param proposedID + * A {@code String} in any valid GUID format. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return A reactive response containing the new lease ID. + */ + public Mono changeLease(String leaseId, String proposedID, ModifiedAccessConditions modifiedAccessConditions, + Context context) { + return blobAsyncRawClient + .changeLease(leaseId, proposedID, modifiedAccessConditions, context) + .map(response -> response.deserializedHeaders().leaseId()); + } + + /** + * Returns the sku name and account kind for the account. For more information, please see the Azure Docs. + * + * @return a reactor response containing the sku name and account kind. + */ + public Mono getAccountInfo() { + return this.getAccountInfo(null); + } + + /** + * Returns the sku name and account kind for the account. For more information, please see the Azure Docs. + * + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return a reactor response containing the sku name and account kind. + */ + // TODO determine this return type + public Mono getAccountInfo(Context context) { + return blobAsyncRawClient + .getAccountInfo(context) + .map(ResponseBase::deserializedHeaders) + .map(StorageAccountInfo::new); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/BlobAsyncRawClient.java b/storage/client/src/main/java/com/azure/storage/blob/BlobAsyncRawClient.java new file mode 100644 index 0000000000000..12360ccf0de5b --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/BlobAsyncRawClient.java @@ -0,0 +1,897 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.util.Context; +import com.azure.storage.blob.implementation.AzureBlobStorageImpl; +import com.azure.storage.blob.models.*; +import reactor.core.publisher.Mono; + +import java.net.URL; + +import static com.azure.storage.blob.Utility.postProcessResponse; + +/** + * Represents a URL to a blob of any type: block, append, or page. It may be obtained by direct construction or via the + * create method on a {@link ContainerAsyncClient} object. This class does not hold any state about a particular blob but is + * instead a convenient way of sending off appropriate requests to the resource on the service. Please refer to the + * Azure Docs for more information. + */ +class BlobAsyncRawClient { + + protected AzureBlobStorageImpl azureBlobStorage; + + /** + * Creates a {@code BlobAsyncRawClient} object pointing to the account specified by the URL and using the provided pipeline to + * make HTTP requests.. + */ + BlobAsyncRawClient(AzureBlobStorageImpl azureBlobStorage) { + this.azureBlobStorage = azureBlobStorage; + } + + /** + * Copies the data at the source URL to a blob. For more information, see the Azure Docs + * + * @param sourceURL + * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=start_copy "Sample code for BlobAsyncRawClient.startCopyFromURL")] \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=start_copy_helper "Helper for start_copy sample.")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono startCopyFromURL(URL sourceURL) { + return this.startCopyFromURL(sourceURL, null, null, null, null); + } + + /** + * Copies the data at the source URL to a blob. For more information, see the Azure Docs + * + * @param sourceURL + * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. + * @param metadata + * {@link Metadata} + * @param sourceModifiedAccessConditions + * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the + * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob + * was changed relative to the given request. The request will fail if the specified condition is not + * satisfied. + * @param destAccessConditions + * {@link BlobAccessConditions} against the destination. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=start_copy "Sample code for BlobAsyncRawClient.startCopyFromURL")] \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=start_copy_helper "Helper for start_copy sample.")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono startCopyFromURL(URL sourceURL, Metadata metadata, + ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions, + Context context) { + metadata = metadata == null ? new Metadata() : metadata; + sourceModifiedAccessConditions = sourceModifiedAccessConditions == null + ? new ModifiedAccessConditions() : sourceModifiedAccessConditions; + destAccessConditions = destAccessConditions == null ? new BlobAccessConditions() : destAccessConditions; + context = context == null ? Context.NONE : context; + + // We want to hide the SourceAccessConditions type from the user for consistency's sake, so we convert here. + SourceModifiedAccessConditions sourceConditions = new SourceModifiedAccessConditions() + .sourceIfModifiedSince(sourceModifiedAccessConditions.ifModifiedSince()) + .sourceIfUnmodifiedSince(sourceModifiedAccessConditions.ifUnmodifiedSince()) + .sourceIfMatch(sourceModifiedAccessConditions.ifMatch()) + .sourceIfNoneMatch(sourceModifiedAccessConditions.ifNoneMatch()); + + return postProcessResponse(this.azureBlobStorage.blobs().startCopyFromURLWithRestResponseAsync( + null, null, sourceURL, null, metadata, null, sourceConditions, + destAccessConditions.modifiedAccessConditions(), destAccessConditions.leaseAccessConditions(), context)); + } + + /** + * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. For + * more information, see the Azure Docs. + * + * @param copyId + * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link + * BlobStartCopyFromURLHeaders} object. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=abort_copy "Sample code for BlobAsyncRawClient.abortCopyFromURL")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono abortCopyFromURL(String copyId) { + return this.abortCopyFromURL(copyId, null, null); + } + + /** + * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. For + * more information, see the Azure Docs. + * + * @param copyId + * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link + * BlobStartCopyFromURLHeaders} object. + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=abort_copy "Sample code for BlobAsyncRawClient.abortCopyFromURL")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono abortCopyFromURL(String copyId, + LeaseAccessConditions leaseAccessConditions, Context context) { + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.blobs().abortCopyFromURLWithRestResponseAsync( + null, null, copyId, null, null, leaseAccessConditions, context)); + } + + /** + * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. + * For more information, see the Azure Docs + * + * @param copySource + * The source URL to copy from. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=sync_copy "Sample code for BlobAsyncRawClient.copyFromURL")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono syncCopyFromURL(URL copySource) { + return this.syncCopyFromURL(copySource, null, null, null, null); + } + + /** + * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. + * For more information, see the Azure Docs + * + * @param copySource + * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. + * @param metadata + * {@link Metadata} + * @param sourceModifiedAccessConditions + * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the + * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob + * was changed relative to the given request. The request will fail if the specified condition is not + * satisfied. + * @param destAccessConditions + * {@link BlobAccessConditions} against the destination. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=sync_copy "Sample code for BlobAsyncRawClient.copyFromURL")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono syncCopyFromURL(URL copySource, Metadata metadata, + ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions, + Context context) { + metadata = metadata == null ? new Metadata() : metadata; + sourceModifiedAccessConditions = sourceModifiedAccessConditions == null + ? new ModifiedAccessConditions() : sourceModifiedAccessConditions; + destAccessConditions = destAccessConditions == null ? new BlobAccessConditions() : destAccessConditions; + context = context == null ? Context.NONE : context; + + // We want to hide the SourceAccessConditions type from the user for consistency's sake, so we convert here. + SourceModifiedAccessConditions sourceConditions = new SourceModifiedAccessConditions() + .sourceIfModifiedSince(sourceModifiedAccessConditions.ifModifiedSince()) + .sourceIfUnmodifiedSince(sourceModifiedAccessConditions.ifUnmodifiedSince()) + .sourceIfMatch(sourceModifiedAccessConditions.ifMatch()) + .sourceIfNoneMatch(sourceModifiedAccessConditions.ifNoneMatch()); + + return postProcessResponse(this.azureBlobStorage.blobs().copyFromURLWithRestResponseAsync( + null, null, copySource, null, metadata, null, sourceConditions, + destAccessConditions.modifiedAccessConditions(), destAccessConditions.leaseAccessConditions(), context)); + } + + /** + * Reads a range of bytes from a blob. The response also includes the blob's properties and metadata. For more + * information, see the Azure Docs. + *

+ * Note that the response body has reliable download functionality built in, meaning that a failed download stream + * will be automatically retried. This behavior may be configured with {@link ReliableDownloadOptions}. + * + * @return Emits the successful response. + * @apiNote ## Sample Code \n [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=upload_download + * "Sample code for BlobAsyncRawClient.download")] \n For more samples, please see the [Samples + * file](%https://github.com/Azure/azure-storage-java/blob/New-Storage-SDK-V10-Preview/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono download() { + return this.download(null, null, false, null); + } + + /** + * Reads a range of bytes from a blob. The response also includes the blob's properties and metadata. For more + * information, see the Azure Docs. + *

+ * Note that the response body has reliable download functionality built in, meaning that a failed download stream + * will be automatically retried. This behavior may be configured with {@link ReliableDownloadOptions}. + * + * @param range + * {@link BlobRange} + * @param accessConditions + * {@link BlobAccessConditions} + * @param rangeGetContentMD5 + * Whether the contentMD5 for the specified blob range should be returned. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=upload_download "Sample code for BlobAsyncRawClient.download")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono download(BlobRange range, BlobAccessConditions accessConditions, + boolean rangeGetContentMD5, Context context) { + Boolean getMD5 = rangeGetContentMD5 ? rangeGetContentMD5 : null; + range = range == null ? new BlobRange(0) : range; + accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; + HTTPGetterInfo info = new HTTPGetterInfo() + .withOffset(range.offset()) + .withCount(range.count()) + .withETag(accessConditions.modifiedAccessConditions().ifMatch()); + + // TODO: range is BlobRange but expected as String + // TODO: figure out correct response + return postProcessResponse(this.azureBlobStorage.blobs().downloadWithRestResponseAsync( + null, null, null, null, null, range.toHeaderValue(), getMD5, + null, null, null, null, + accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), context)) + // Convert the autorest response to a DownloadAsyncResponse, which enable reliable download. + .map(response -> { + // If there wasn't an etag originally specified, lock on the one returned. + info.withETag(response.deserializedHeaders().eTag()); + return new DownloadAsyncResponse(response, info, + // In the event of a stream failure, make a new request to pick up where we left off. + newInfo -> + this.download(new BlobRange(newInfo.offset(), newInfo.count()), + new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().ifMatch(info.eTag())), false, + context == null ? Context.NONE : context)); + }); + } + + /** + * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. For more + * information, see the Azure Docs. + * + * @return Emits the successful response. + * @apiNote ## Sample Code \n [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_delete + * "Sample code for BlobAsyncRawClient.delete")] \n For more samples, please see the [Samples + * file](%https://github.com/Azure/azure-storage-java/blob/New-Storage-SDK-V10-Preview/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono delete() { + return this.delete(null, null, null); + } + + /** + * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. For more + * information, see the Azure Docs. + * + * @param deleteBlobSnapshotOptions + * Specifies the behavior for deleting the snapshots on this blob. {@code Include} will delete the base blob + * and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being deleted, you must + * pass null. + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_delete "Sample code for BlobAsyncRawClient.delete")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono delete(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, + BlobAccessConditions accessConditions, Context context) { + accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.blobs().deleteWithRestResponseAsync( + null, null, null, null, null, deleteBlobSnapshotOptions, + null, accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), + context)); + } + + /** + * Returns the blob's metadata and properties. For more information, see the Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=properties_metadata "Sample code for BlobAsyncRawClient.getProperties")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono getProperties() { + return this.getProperties(null, null); + } + + /** + * Returns the blob's metadata and properties. For more information, see the Azure Docs. + * + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=properties_metadata "Sample code for BlobAsyncRawClient.getProperties")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono getProperties(BlobAccessConditions accessConditions, Context context) { + accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.blobs().getPropertiesWithRestResponseAsync( + null, null, null, null, null, null, + null, null, null, accessConditions.leaseAccessConditions(), + accessConditions.modifiedAccessConditions(), context)); + } + + /** + * Changes a blob's HTTP header properties. For more information, see the Azure + * Docs. + * + * @param headers + * {@link BlobHTTPHeaders} + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=properties_metadata "Sample code for BlobAsyncRawClient.setHTTPHeaders")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono setHTTPHeaders(BlobHTTPHeaders headers) { + return this.setHTTPHeaders(headers, null, null); + } + + /** + * Changes a blob's HTTP header properties. For more information, see the Azure Docs. + * + * @param headers + * {@link BlobHTTPHeaders} + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=properties_metadata "Sample code for BlobAsyncRawClient.setHTTPHeaders")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono setHTTPHeaders(BlobHTTPHeaders headers, + BlobAccessConditions accessConditions, Context context) { + accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.blobs().setHTTPHeadersWithRestResponseAsync( + null, null, null, null, headers, + accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), context)); + } + + /** + * Changes a blob's metadata. For more information, see the Azure Docs. + * + * @param metadata + * {@link Metadata} + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=properties_metadata "Sample code for BlobAsyncRawClient.setMetadata")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono setMetadata(Metadata metadata) { + return this.setMetadata(metadata, null, null); + } + + /** + * Changes a blob's metadata. For more information, see the Azure Docs. + * + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=properties_metadata "Sample code for BlobAsyncRawClient.setMetadata")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono setMetadata(Metadata metadata, BlobAccessConditions accessConditions, + Context context) { + metadata = metadata == null ? new Metadata() : metadata; + accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.blobs().setMetadataWithRestResponseAsync( + null, null, null, metadata, null, null, + null, null, accessConditions.leaseAccessConditions(), + accessConditions.modifiedAccessConditions(), context)); + } + + /** + * Creates a read-only snapshot of a blob. For more information, see the Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=snapshot "Sample code for BlobAsyncRawClient.createSnapshot")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono createSnapshot() { + return this.createSnapshot(null, null, null); + } + + /** + * Creates a read-only snapshot of a blob. For more information, see the Azure Docs. + * + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=snapshot "Sample code for BlobAsyncRawClient.createSnapshot")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono createSnapshot(Metadata metadata, BlobAccessConditions accessConditions, + Context context) { + metadata = metadata == null ? new Metadata() : metadata; + accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.blobs().createSnapshotWithRestResponseAsync( + null, null, null, metadata, null, null, + null, null, accessConditions.modifiedAccessConditions(), + accessConditions.leaseAccessConditions(), context)); + } + + /** + * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in + * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of + * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. + *

+ * For detailed information about block blob level tiering see the Azure Docs. + * + * @param tier + * The new tier for the blob. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=tier "Sample code for BlobAsyncRawClient.setTier")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono setTier(AccessTier tier) { + return this.setTier(tier, null, null); + } + + /** + * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in + * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of + * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. + *

+ * For detailed information about block blob level tiering see the Azure Docs. + * + * @param tier + * The new tier for the blob. + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=tier "Sample code for BlobAsyncRawClient.setTier")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono setTier(AccessTier tier, LeaseAccessConditions leaseAccessConditions, + Context context) { + Utility.assertNotNull("tier", tier); + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.blobs().setTierWithRestResponseAsync( + null, null, tier, null, null, leaseAccessConditions, context)); + } + + /** + * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. + * For more information, see the Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=undelete "Sample code for BlobAsyncRawClient.undelete")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono undelete() { + return this.undelete(null); + } + + /** + * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. + * For more information, see the Azure Docs. + * + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=undelete "Sample code for BlobAsyncRawClient.undelete")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono undelete(Context context) { + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.blobs().undeleteWithRestResponseAsync(null, + null, context)); + } + + /** + * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 + * seconds, or infinite (-1). For more information, see the Azure Docs. + * + * @param proposedId + * A {@code String} in any valid GUID format. May be null. + * @param duration + * The duration of the lease, in seconds, or negative one (-1) for a lease that + * never expires. A non-infinite lease can be between 15 and 60 seconds. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_lease "Sample code for BlobAsyncRawClient.acquireLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono acquireLease(String proposedId, int duration) { + return this.acquireLease(proposedId, duration, null, null); + } + + /** + * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 + * seconds, or infinite (-1). For more information, see the Azure Docs. + * + * @param proposedID + * A {@code String} in any valid GUID format. May be null. + * @param duration + * The duration of the lease, in seconds, or negative one (-1) for a lease that + * never expires. A non-infinite lease can be between 15 and 60 seconds. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_lease "Sample code for BlobAsyncRawClient.acquireLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono acquireLease(String proposedID, int duration, + ModifiedAccessConditions modifiedAccessConditions, Context context) { + if (!(duration == -1 || (duration >= 15 && duration <= 60))) { + // Throwing is preferred to Mono.error because this will error out immediately instead of waiting until + // subscription. + throw new IllegalArgumentException("Duration must be -1 or between 15 and 60."); + } + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.blobs().acquireLeaseWithRestResponseAsync( + null, null, null, duration, proposedID, null, + modifiedAccessConditions, context)); + } + + /** + * Renews the blob's previously-acquired lease. For more information, see the Azure Docs. + * + * @param leaseID + * The leaseId of the active lease on the blob. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_lease "Sample code for BlobAsyncRawClient.renewLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono renewLease(String leaseID) { + return this.renewLease(leaseID, null, null); + } + + /** + * Renews the blob's previously-acquired lease. For more information, see the Azure Docs. + * + * @param leaseID + * The leaseId of the active lease on the blob. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_lease "Sample code for BlobAsyncRawClient.renewLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono renewLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions, + Context context) { + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.blobs().renewLeaseWithRestResponseAsync(null, + null, leaseID, null, null, modifiedAccessConditions, context)); + } + + /** + * Releases the blob's previously-acquired lease. For more information, see the Azure Docs. + * + * @param leaseID + * The leaseId of the active lease on the blob. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_lease "Sample code for BlobAsyncRawClient.releaseLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono releaseLease(String leaseID) { + return this.releaseLease(leaseID, null, null); + } + + /** + * Releases the blob's previously-acquired lease. For more information, see the Azure Docs. + * + * @param leaseID + * The leaseId of the active lease on the blob. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_lease "Sample code for BlobAsyncRawClient.releaseLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono releaseLease(String leaseID, + ModifiedAccessConditions modifiedAccessConditions, Context context) { + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.blobs().releaseLeaseWithRestResponseAsync(null, + null, leaseID, null, null, modifiedAccessConditions, context)); + } + + /** + * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant + * to break a fixed-duration lease when it expires or an infinite lease immediately. For more information, see the + * Azure Docs. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_lease "Sample code for BlobAsyncRawClient.breakLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/New-Storage-SDK-V10-Preview/src/test/java/com/microsoft/azure/storage/Samples.java) + * + * @return + * Emits the successful response. + */ + public Mono breakLease() { + return this.breakLease(null, null, null); + } + + /** + * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant + * to break a fixed-duration lease when it expires or an infinite lease immediately. For more information, see the + * Azure Docs. + * + * @param breakPeriodInSeconds + * An optional {@code Integer} representing the proposed duration of seconds that the lease should continue + * before it is broken, between 0 and 60 seconds. This break period is only used if it is shorter than the + * time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be + * available before the break period has expired, but the lease may be held for longer than the break + * period. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_lease "Sample code for BlobAsyncRawClient.breakLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono breakLease(Integer breakPeriodInSeconds, + ModifiedAccessConditions modifiedAccessConditions, Context context) { + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.blobs().breakLeaseWithRestResponseAsync(null, + null, null, breakPeriodInSeconds, null, modifiedAccessConditions, context)); + } + + /** + * ChangeLease changes the blob's lease ID. For more information, see the Azure Docs. + * + * @param leaseId + * The leaseId of the active lease on the blob. + * @param proposedID + * A {@code String} in any valid GUID format. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_lease "Sample code for BlobAsyncRawClient.changeLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono changeLease(String leaseId, String proposedID) { + return this.changeLease(leaseId, proposedID, null, null); + } + + /** + * ChangeLease changes the blob's lease ID. For more information, see the Azure Docs. + * + * @param leaseId + * The leaseId of the active lease on the blob. + * @param proposedID + * A {@code String} in any valid GUID format. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_lease "Sample code for BlobAsyncRawClient.changeLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono changeLease(String leaseId, String proposedID, + ModifiedAccessConditions modifiedAccessConditions, Context context) { + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.blobs().changeLeaseWithRestResponseAsync(null, + null, leaseId, proposedID, null, null, modifiedAccessConditions, context)); + } + + /** + * Returns the sku name and account kind for the account. For more information, please see the Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=account_info "Sample code for BlobAsyncRawClient.getAccountInfo")] \n + * For more samples, please see the [Samples file](https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono getAccountInfo() { + return this.getAccountInfo(null); + } + + /** + * Returns the sku name and account kind for the account. For more information, please see the Azure Docs. + * + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=account_info "Sample code for BlobAsyncRawClient.getAccountInfo")] \n + * For more samples, please see the [Samples file](https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono getAccountInfo(Context context) { + context = context == null ? Context.NONE : context; + + return postProcessResponse( + this.azureBlobStorage.blobs().getAccountInfoWithRestResponseAsync(null, null, context)); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/BlobClient.java b/storage/client/src/main/java/com/azure/storage/blob/BlobClient.java new file mode 100644 index 0000000000000..b1b49e1f32cda --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/BlobClient.java @@ -0,0 +1,756 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.storage.blob.implementation.AzureBlobStorageImpl; +import com.azure.storage.blob.models.AccessTier; +import com.azure.storage.blob.models.BlobHTTPHeaders; +import com.azure.storage.blob.models.BlobStartCopyFromURLHeaders; +import com.azure.storage.blob.models.DeleteSnapshotsOptionType; +import com.azure.storage.blob.models.LeaseAccessConditions; +import com.azure.storage.blob.models.ModifiedAccessConditions; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.io.IOException; +import java.io.OutputStream; +import java.io.UncheckedIOException; +import java.net.URL; +import java.nio.ByteBuffer; +import java.time.Duration; + +/** + * Client to a blob of any type: block, append, or page. It may only be instantiated through a {@link BlobClientBuilder} or via + * the method {@link ContainerClient#getBlobClient(String)}. This class does not hold any state about a particular + * blob, but is instead a convenient way of sending appropriate requests to the resource on the service. + * + *

+ * This client offers the ability to download blobs. Note that uploading data is specific to each type of blob. Please + * refer to the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient} for upload options. This + * client can be converted into one of these clients easily through the methods {@link #asBlockBlobClient}, {@link #asPageBlobClient}, + * and {@link #asAppendBlobClient}. + * + *

+ * This client contains operations on a blob. Operations on a container are available on {@link ContainerClient}, + * and operations on the service are available on {@link StorageClient}. + * + *

+ * Please refer to the Azure Docs + * for more information. + */ +public class BlobClient { + + private BlobAsyncClient blobAsyncClient; + + /** + * Package-private constructor for use by {@link BlobClientBuilder}. + * @param azureBlobStorage the API client for blob storage API + */ + BlobClient(AzureBlobStorageImpl azureBlobStorage) { + this.blobAsyncClient = new BlobAsyncClient(azureBlobStorage); + } + + /** + * Static method for getting a new builder for this class. + * + * @return + * A new {@link BlobClientBuilder} instance. + */ + public static BlobClientBuilder blobClientBuilder() { + return new BlobClientBuilder(); + } + + /** + * Creates a new {@link BlockBlobClient} to this resource, maintaining configurations. Only do this for blobs + * that are known to be block blobs. + * + * @return + * A {@link BlockBlobClient} to this resource. + */ + public BlockBlobClient asBlockBlobClient() { + return new BlockBlobClient(this.blobAsyncClient.blobAsyncRawClient.azureBlobStorage); + } + + /** + * Creates a new {@link AppendBlobClient} to this resource, maintaining configurations. Only do this for blobs + * that are known to be append blobs. + * + * @return + * A {@link AppendBlobClient} to this resource. + */ + public AppendBlobClient asAppendBlobClient() { + return new AppendBlobClient(this.blobAsyncClient.blobAsyncRawClient.azureBlobStorage); + } + + /** + * Creates a new {@link PageBlobClient} to this resource, maintaining configurations. Only do this for blobs + * that are known to be page blobs. + * + * @return + * A {@link PageBlobClient} to this resource. + */ + public PageBlobClient asPageBlobClient() { + return new PageBlobClient(this.blobAsyncClient.blobAsyncRawClient.azureBlobStorage); + } + + /** + * Copies the data at the source URL to a blob. For more information, see the Azure Docs + * + * @param sourceURL + * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. + * + * @return + * The copy ID for the long running operation. + */ + public String startCopyFromURL(URL sourceURL) { + return this.startCopyFromURL(sourceURL, null, null, null, null); + } + + /** + * Copies the data at the source URL to a blob. For more information, see the Azure Docs + * + * @param sourceURL + * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. + * @param metadata + * {@link Metadata} + * @param sourceModifiedAccessConditions + * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the + * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob + * was changed relative to the given request. The request will fail if the specified condition is not + * satisfied. + * @param destAccessConditions + * {@link BlobAccessConditions} against the destination. + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * + * @return + * The copy ID for the long running operation. + */ + public String startCopyFromURL(URL sourceURL, Metadata metadata, + ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions, + Duration timeout) { + Mono response = blobAsyncClient + .startCopyFromURL(sourceURL, metadata, sourceModifiedAccessConditions, destAccessConditions, null /*context*/); + + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. + * + * @param copyId + * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link + * BlobStartCopyFromURLHeaders} object. + */ + public void abortCopyFromURL(String copyId) { + this.abortCopyFromURL(copyId, null, null); + } + + /** + * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. + * + * @param copyId + * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link + * BlobStartCopyFromURLHeaders} object. + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + */ + public void abortCopyFromURL(String copyId, LeaseAccessConditions leaseAccessConditions, Duration timeout) { + Mono response = blobAsyncClient + .abortCopyFromURL(copyId, leaseAccessConditions, null /*context*/); + + if (timeout == null) { + response.block(); + } else { + response.block(timeout); + } + } + + /** + * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. + * + * @param copySource + * The source URL to copy from. + * + * @return + * The copy ID for the long running operation. + */ + public String copyFromURL(URL copySource) { + return this.copyFromURL(copySource, null, null, null, null); + } + + /** + * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. + * + * @param copySource + * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. + * @param metadata + * {@link Metadata} + * @param sourceModifiedAccessConditions + * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the + * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob + * was changed relative to the given request. The request will fail if the specified condition is not + * satisfied. + * @param destAccessConditions + * {@link BlobAccessConditions} against the destination. + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * + * @return + * The copy ID for the long running operation. + */ + public String copyFromURL(URL copySource, Metadata metadata, + ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions, + Duration timeout) { + Mono response = blobAsyncClient + .copyFromURL(copySource, metadata, sourceModifiedAccessConditions, destAccessConditions, null /*context*/); + + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient}, + * {@link PageBlobClient}, or {@link AppendBlobClient}. + * + * @param stream + * A non-null {@link OutputStream} instance where the downloaded data will be written. + */ + public void download(OutputStream stream) throws IOException { + this.download(stream, null, null, null, false, null); + } + + /** + * Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link BlockBlobClient}, + * {@link PageBlobClient}, or {@link AppendBlobClient}. + * + * @param stream + * A non-null {@link OutputStream} instance where the downloaded data will be written. + * @param range + * {@link BlobRange} + * @param accessConditions + * {@link BlobAccessConditions} + * @param rangeGetContentMD5 + * Whether the contentMD5 for the specified blob range should be returned. + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + */ + public void download(OutputStream stream, ReliableDownloadOptions options, BlobRange range, + BlobAccessConditions accessConditions, boolean rangeGetContentMD5, Duration timeout) throws IOException { + Flux data = blobAsyncClient + .download(range, accessConditions, rangeGetContentMD5, options, null /*context*/); + + data = timeout == null + ? data + : data.timeout(timeout); //TODO this isn't doing what we want + + for (ByteBuffer buffer : data.toIterable()) { + stream.write(buffer.array()); + } + } + + /** + * Downloads the entire blob into a file specified by the path. The file will be created if it doesn't exist. + * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. + * + * @param filePath + * A non-null {@link OutputStream} instance where the downloaded data will be written. + */ + public void downloadToFile(String filePath) throws IOException { + this.downloadToFile(filePath, null, null, null, false, null); + } + + /** + * Downloads a range of bytes blob into a file specified by the path. The file will be created if it doesn't exist. + * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. + * + * @param filePath + * A non-null {@link OutputStream} instance where the downloaded data will be written. + * @param range + * {@link BlobRange} + * @param accessConditions + * {@link BlobAccessConditions} + * @param rangeGetContentMD5 + * Whether the contentMD5 for the specified blob range should be returned. + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + */ + public void downloadToFile(String filePath, ReliableDownloadOptions options, BlobRange range, + BlobAccessConditions accessConditions, boolean rangeGetContentMD5, Duration timeout) throws IOException { + Mono download = blobAsyncClient.downloadToFile(filePath, range, accessConditions, rangeGetContentMD5, options, null); + + try { + if (timeout == null) { + download.block(); + } else { + download.block(timeout); //TODO this isn't doing what we want + } + } catch (UncheckedIOException e) { + throw e.getCause(); + } + } + + /** + * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. + */ + public void delete() { + this.delete(null, null, null); + } + + /** + * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. + * + * @param deleteBlobSnapshotOptions + * Specifies the behavior for deleting the snapshots on this blob. {@code Include} will delete the base blob + * and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being deleted, you must + * pass null. + * @param accessConditions + * {@link BlobAccessConditions} + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * + * @return + * A reactive response signalling completion. + */ + public void delete(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, + BlobAccessConditions accessConditions, Duration timeout) { + Mono response = blobAsyncClient + .delete(deleteBlobSnapshotOptions, accessConditions, null /*context*/); + + if (timeout == null) { + response.block(); + } else { + response.block(timeout); + } + } + + /** + * Returns the blob's metadata and properties. + * + * @return + * The blob properties and metadata. + */ + public BlobProperties getProperties() { + return this.getProperties(null, null); + } + + /** + * Returns the blob's metadata and properties. + * + * @param accessConditions + * {@link BlobAccessConditions} + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * + * @return + * The blob properties and metadata. + */ + public BlobProperties getProperties(BlobAccessConditions accessConditions, Duration timeout) { + Mono response = blobAsyncClient + .getProperties(accessConditions, null /*context*/); + + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Changes a blob's HTTP header properties. if only one HTTP header is updated, the + * others will all be erased. In order to preserve existing values, they must be + * passed alongside the header being changed. For more information, see the + * Azure Docs. + * + * @param headers + * {@link BlobHTTPHeaders} + */ + public void setHTTPHeaders(BlobHTTPHeaders headers) { + this.setHTTPHeaders(headers, null, null); + } + + /** + * Changes a blob's HTTP header properties. if only one HTTP header is updated, the + * others will all be erased. In order to preserve existing values, they must be + * passed alongside the header being changed. For more information, see the + * Azure Docs. + * + * @param headers + * {@link BlobHTTPHeaders} + * @param accessConditions + * {@link BlobAccessConditions} + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + */ + public void setHTTPHeaders(BlobHTTPHeaders headers, BlobAccessConditions accessConditions, + Duration timeout) { + Mono response = blobAsyncClient + .setHTTPHeaders(headers, accessConditions, null /*context*/); + + if (timeout == null) { + response.block(); + } else { + response.block(timeout); + } + } + + /** + * Changes a blob's metadata. The specified metadata in this method will replace existing + * metadata. If old values must be preserved, they must be downloaded and included in the + * call to this method. For more information, see the Azure Docs. + * + * @param metadata + * {@link Metadata} + */ + public void setMetadata(Metadata metadata) { + this.setMetadata(metadata, null, null); + } + + /** + * Changes a blob's metadata. The specified metadata in this method will replace existing + * metadata. If old values must be preserved, they must be downloaded and included in the + * call to this method. For more information, see the Azure Docs. + * + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link BlobAccessConditions} + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + */ + public void setMetadata(Metadata metadata, BlobAccessConditions accessConditions, Duration timeout) { + Mono response = blobAsyncClient + .setMetadata(metadata, accessConditions, null /*context*/); + + if (timeout == null) { + response.block(); + } else { + response.block(timeout); + } + } + + /** + * Creates a read-only snapshot of a blob. + * + * @return + * The ID of the new snapshot. + */ + public String createSnapshot() { + return this.createSnapshot(null, null, null); + } + + /** + * Creates a read-only snapshot of a blob. + * + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link BlobAccessConditions} + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * + * @return + * The ID of the new snapshot. + */ + public String createSnapshot(Metadata metadata, BlobAccessConditions accessConditions, Duration timeout) { + Mono response = blobAsyncClient + .createSnapshot(metadata, accessConditions, null /*context*/); + + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in + * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of + * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. + * + * @param tier + * The new tier for the blob. + */ + public void setTier(AccessTier tier) { + this.setTier(tier, null, null); + } + + /** + * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in + * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of + * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. + * + * @param tier + * The new tier for the blob. + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + */ + public void setTier(AccessTier tier, LeaseAccessConditions leaseAccessConditions, Duration timeout) { + Mono response = blobAsyncClient + .setTier(tier, leaseAccessConditions, null /*context*/); + + if (timeout == null) { + response.block(); + } else { + response.block(timeout); + } + } + + /** + * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. + */ + public void undelete() { + this.undelete(null); + } + + /** + * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. + * + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + */ + public void undelete(Duration timeout) { + Mono response = blobAsyncClient + .undelete(null /*context*/); + + if (timeout == null) { + response.block(); + } else { + response.block(timeout); + } + } + + /** + * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 + * seconds, or infinite (-1). + * + * @param proposedId + * A {@code String} in any valid GUID format. May be null. + * @param duration + * The duration of the lease, in seconds, or negative one (-1) for a lease that + * never expires. A non-infinite lease can be between 15 and 60 seconds. + * + * @return + * The lease ID. + */ + public String acquireLease(String proposedId, int duration) { + return this.acquireLease(proposedId, duration, null, null); + } + + /** + * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 + * seconds, or infinite (-1). + * + * @param proposedID + * A {@code String} in any valid GUID format. May be null. + * @param duration + * The duration of the lease, in seconds, or negative one (-1) for a lease that + * never expires. A non-infinite lease can be between 15 and 60 seconds. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * + * @return + * The lease ID. + */ + public String acquireLease(String proposedID, int duration, + ModifiedAccessConditions modifiedAccessConditions, Duration timeout) { + Mono response = blobAsyncClient + .acquireLease(proposedID, duration, modifiedAccessConditions, null /*context*/); + + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Renews the blob's previously-acquired lease. + * + * @param leaseID + * The leaseId of the active lease on the blob. + * + * @return + * The renewed lease ID. + */ + public String renewLease(String leaseID) { + return this.renewLease(leaseID, null, null); + } + + /** + * Renews the blob's previously-acquired lease. + * + * @param leaseID + * The leaseId of the active lease on the blob. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * + * @return + * The renewed lease ID. + */ + public String renewLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions, + Duration timeout) { + Mono response = blobAsyncClient + .renewLease(leaseID, modifiedAccessConditions, null /*context*/); + + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Releases the blob's previously-acquired lease. + * + * @param leaseID + * The leaseId of the active lease on the blob. + */ + public void releaseLease(String leaseID) { + this.releaseLease(leaseID, null, null); + } + + /** + * Releases the blob's previously-acquired lease. + * + * @param leaseID + * The leaseId of the active lease on the blob. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + */ + public void releaseLease(String leaseID, + ModifiedAccessConditions modifiedAccessConditions, Duration timeout) { + Mono response = blobAsyncClient + .releaseLease(leaseID, modifiedAccessConditions, null /*context*/); + + if (timeout == null) { + response.block(); + } else { + response.block(timeout); + } + } + + /** + * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant + * to break a fixed-duration lease when it expires or an infinite lease immediately. + * + * @return + * The remaining time in the broken lease in seconds. + */ + public int breakLease() { + return this.breakLease(null, null, null); + } + + /** + * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant + * to break a fixed-duration lease when it expires or an infinite lease immediately. + * + * @param breakPeriodInSeconds + * An optional {@code Integer} representing the proposed duration of seconds that the lease should continue + * before it is broken, between 0 and 60 seconds. This break period is only used if it is shorter than the + * time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be + * available before the break period has expired, but the lease may be held for longer than the break + * period. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * + * @return + * The remaining time in the broken lease in seconds. + */ + public int breakLease(Integer breakPeriodInSeconds, + ModifiedAccessConditions modifiedAccessConditions, Duration timeout) { + Mono response = blobAsyncClient + .breakLease(breakPeriodInSeconds, modifiedAccessConditions, null /*context*/); + + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * ChangeLease changes the blob's lease ID. + * + * @param leaseId + * The leaseId of the active lease on the blob. + * @param proposedID + * A {@code String} in any valid GUID format. + * + * @return + * The new lease ID. + */ + public String changeLease(String leaseId, String proposedID) { + return this.changeLease(leaseId, proposedID, null, null); + } + + /** + * ChangeLease changes the blob's lease ID. For more information, see the Azure Docs. + * + * @param leaseId + * The leaseId of the active lease on the blob. + * @param proposedID + * A {@code String} in any valid GUID format. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * + * @return The new lease ID. + */ + public String changeLease(String leaseId, String proposedID, + ModifiedAccessConditions modifiedAccessConditions, Duration timeout) { + Mono response = blobAsyncClient + .changeLease(leaseId, proposedID, modifiedAccessConditions, null /*context*/); + + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Returns the sku name and account kind for the account. For more information, please see the Azure Docs. + * + * @return The sku name and account kind. + */ + public StorageAccountInfo getAccountInfo() { + return this.getAccountInfo(null); + } + + /** + * Returns the sku name and account kind for the account. For more information, please see the Azure Docs. + * + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * + * @return The sku name and account kind. + */ + public StorageAccountInfo getAccountInfo(Duration timeout) { + Mono response = blobAsyncClient + .getAccountInfo(null /*context*/); + + return timeout == null + ? response.block() + : response.block(timeout); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/BlobClientBuilder.java b/storage/client/src/main/java/com/azure/storage/blob/BlobClientBuilder.java new file mode 100644 index 0000000000000..9fe00cbd8fa55 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/BlobClientBuilder.java @@ -0,0 +1,220 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.configuration.Configuration; +import com.azure.core.http.HttpClient; +import com.azure.core.http.HttpPipeline; +import com.azure.core.http.policy.AddDatePolicy; +import com.azure.core.http.policy.HttpLogDetailLevel; +import com.azure.core.http.policy.HttpLoggingPolicy; +import com.azure.core.http.policy.HttpPipelinePolicy; +import com.azure.core.http.policy.RequestIdPolicy; +import com.azure.core.http.policy.RetryPolicy; +import com.azure.core.http.policy.UserAgentPolicy; +import com.azure.core.implementation.util.ImplUtils; +import com.azure.storage.blob.implementation.AzureBlobStorageBuilder; +import com.azure.storage.blob.implementation.AzureBlobStorageImpl; + +import java.net.MalformedURLException; +import java.net.URL; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * Fluent BlobClientBuilder for instantiating a {@link BlobClient} or {@link BlobAsyncClient}. + * + *

+ * An instance of this builder may only be created from static method {@link BlobClient#blobClientBuilder()}. + * The following information must be provided on this builder: + * + *

+ * + *

+ * Once all the configurations are set on this builder, call {@code .buildClient()} to create a + * {@link BlobClient} or {@code .buildAsyncClient()} to create a {@link BlobAsyncClient}. + */ +public final class BlobClientBuilder { + private static final String ACCOUNT_NAME = "AccountName".toLowerCase(); + private static final String ACCOUNT_KEY = "AccountKey".toLowerCase(); + + private final List policies; + + private URL endpoint; + private ICredentials credentials = new AnonymousCredentials(); + private HttpClient httpClient; + private HttpLogDetailLevel logLevel; + private RetryPolicy retryPolicy; + private Configuration configuration; + + public BlobClientBuilder() { + retryPolicy = new RetryPolicy(); + logLevel = HttpLogDetailLevel.NONE; + policies = new ArrayList<>(); + } + + /** + * Constructs an instance of BlobAsyncClient based on the configurations stored in the appendBlobClientBuilder. + * @return a new client instance + */ + private AzureBlobStorageImpl buildImpl() { + Objects.requireNonNull(endpoint); + + // Closest to API goes first, closest to wire goes last. + final List policies = new ArrayList<>(); + + policies.add(new UserAgentPolicy(BlobConfiguration.NAME, BlobConfiguration.VERSION)); + policies.add(new RequestIdPolicy()); + policies.add(new AddDatePolicy()); + policies.add(credentials); // This needs to be a different credential type. + + policies.add(retryPolicy); + + policies.addAll(this.policies); + policies.add(new HttpLoggingPolicy(logLevel)); + + HttpPipeline pipeline = HttpPipeline.builder() + .policies(policies.toArray(new HttpPipelinePolicy[0])) + .httpClient(httpClient) + .build(); + + return new AzureBlobStorageBuilder() + .url(endpoint.toString()) + .pipeline(pipeline) + .build(); + } + + /** + * @return a {@link BlobClient} created from the configurations in this builder. + */ + public BlobClient buildClient() { + return new BlobClient(buildImpl()); + } + + /** + * @return a {@link BlobAsyncClient} created from the configurations in this builder. + */ + public BlobAsyncClient buildAsyncClient() { + return new BlobAsyncClient(buildImpl()); + } + + /** + * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) + * @param endpoint URL of the service + * @return the updated BlobClientBuilder object + */ + public BlobClientBuilder endpoint(String endpoint) { + Objects.requireNonNull(endpoint); + try { + this.endpoint = new URL(endpoint); + } catch (MalformedURLException ex) { + throw new IllegalArgumentException("The Azure Storage Queue endpoint url is malformed."); + } + + return this; + } + + /** + * Sets the credentials used to authorize requests sent to the service + * @param credentials authorization credentials + * @return the updated BlobClientBuilder object + */ + public BlobClientBuilder credentials(SharedKeyCredentials credentials) { + this.credentials = credentials; + return this; + } + + /** + * Sets the credentials used to authorize requests sent to the service + * @param credentials authorization credentials + * @return the updated BlobClientBuilder object + */ + public BlobClientBuilder credentials(TokenCredentials credentials) { + this.credentials = credentials; + return this; + } + + /** + * Clears the credentials used to authorize requests sent to the service + * @return the updated BlobClientBuilder object + */ + public BlobClientBuilder anonymousCredentials() { + this.credentials = new AnonymousCredentials(); + return this; + } + + /** + * Sets the connection string for the service, parses it for authentication information (account name, account key) + * @param connectionString connection string from access keys section + * @return the updated BlobClientBuilder object + */ + public BlobClientBuilder connectionString(String connectionString) { + Objects.requireNonNull(connectionString); + + Map connectionKVPs = new HashMap<>(); + for (String s : connectionString.split(";")) { + String[] kvp = s.split("=", 2); + connectionKVPs.put(kvp[0].toLowerCase(), kvp[1]); + } + + String accountName = connectionKVPs.get(ACCOUNT_NAME); + String accountKey = connectionKVPs.get(ACCOUNT_KEY); + + if (ImplUtils.isNullOrEmpty(accountName) || ImplUtils.isNullOrEmpty(accountKey)) { + throw new IllegalArgumentException("Connection string must contain 'AccountName' and 'AccountKey'."); + } + + // Use accountName and accountKey to get the SAS token using the credential class. + credentials = new SharedKeyCredentials(accountName, accountKey); + + return this; + } + + /** + * Sets the http client used to send service requests + * @param httpClient http client to send requests + * @return the updated BlobClientBuilder object + */ + public BlobClientBuilder httpClient(HttpClient httpClient) { + this.httpClient = httpClient; + return this; + } + + /** + * Adds a pipeline policy to apply on each request sent + * @param pipelinePolicy a pipeline policy + * @return the updated BlobClientBuilder object + */ + public BlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { + this.policies.add(pipelinePolicy); + return this; + } + + /** + * Sets the logging level for service requests + * @param logLevel logging level + * @return the updated BlobClientBuilder object + */ + public BlobClientBuilder httpLogDetailLevel(HttpLogDetailLevel logLevel) { + this.logLevel = logLevel; + return this; + } + + /** + * Sets the configuration object used to retrieve environment configuration values used to buildClient the client with + * when they are not set in the appendBlobClientBuilder, defaults to Configuration.NONE + * @param configuration configuration store + * @return the updated BlobClientBuilder object + */ + public BlobClientBuilder configuration(Configuration configuration) { + this.configuration = configuration; + return this; + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/BlobConfiguration.java b/storage/client/src/main/java/com/azure/storage/blob/BlobConfiguration.java new file mode 100644 index 0000000000000..ae1e422a24c0a --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/BlobConfiguration.java @@ -0,0 +1,8 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +package com.azure.storage.blob; + +class BlobConfiguration { + static final String NAME = "storage-blob"; + static final String VERSION = "1.0.0-SNAPSHOT"; +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/BlobListDetails.java b/storage/client/src/main/java/com/azure/storage/blob/BlobListDetails.java new file mode 100644 index 0000000000000..445bb567f7823 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/BlobListDetails.java @@ -0,0 +1,133 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.storage.blob.models.ListBlobsIncludeItem; + +import java.util.ArrayList; + +/** + * This type allows users to specify additional information the service should return with each blob when listing blobs + * in a container (via a {@link ContainerURL} object). This type is immutable to ensure thread-safety of requests, so + * changing the details for a different listing operation requires construction of a new object. Null may be passed if + * none of the options are desirable. + */ +public final class BlobListDetails { + + private boolean copy; + + private boolean metadata; + + private boolean snapshots; + + private boolean uncommittedBlobs; + + private boolean deletedBlobs; + + public BlobListDetails() { + } + + /** + * Whether blob metadata related to any current or previous Copy Blob operation should be included in the + * response. + */ + public boolean copy() { + return copy; + } + + /** + * Whether blob metadata related to any current or previous Copy Blob operation should be included in the + * response. + */ + public BlobListDetails withCopy(boolean copy) { + this.copy = copy; + return this; + } + + /** + * Whether blob metadata should be returned. + */ + public boolean metadata() { + return metadata; + } + + /** + * Whether blob metadata should be returned. + */ + public BlobListDetails withMetadata(boolean metadata) { + this.metadata = metadata; + return this; + } + + /** + * Whether snapshots should be returned. Snapshots are listed from oldest to newest. + */ + public boolean snapshots() { + return snapshots; + } + + /** + * Whether snapshots should be returned. Snapshots are listed from oldest to newest. + */ + public BlobListDetails withSnapshots(boolean snapshots) { + this.snapshots = snapshots; + return this; + } + + /** + * Whether blobs for which blocks have been uploaded, but which have not been committed using Put Block List, + * should be included in the response. + */ + public boolean uncommittedBlobs() { + return uncommittedBlobs; + } + + /** + * Whether blobs for which blocks have been uploaded, but which have not been committed using Put Block List, + * should be included in the response. + */ + public BlobListDetails withUncommittedBlobs(boolean uncommittedBlobs) { + this.uncommittedBlobs = uncommittedBlobs; + return this; + } + + /** + * Whether blobs which have been soft deleted should be returned. + */ + public boolean deletedBlobs() { + return deletedBlobs; + } + + /** + * Whether blobs which have been soft deleted should be returned. + */ + public BlobListDetails withDeletedBlobs(boolean deletedBlobs) { + this.deletedBlobs = deletedBlobs; + return this; + } + + /* + This is used internally to convert the details structure into a list to pass to the protocol layer. The customer + should never have need for this. + */ + ArrayList toList() { + ArrayList details = new ArrayList(); + if (this.copy) { + details.add(ListBlobsIncludeItem.COPY); + } + if (this.deletedBlobs) { + details.add(ListBlobsIncludeItem.DELETED); + } + if (this.metadata) { + details.add(ListBlobsIncludeItem.METADATA); + } + if (this.snapshots) { + details.add(ListBlobsIncludeItem.SNAPSHOTS); + } + if (this.uncommittedBlobs) { + details.add(ListBlobsIncludeItem.UNCOMMITTEDBLOBS); + } + return details; + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/BlobProperties.java b/storage/client/src/main/java/com/azure/storage/blob/BlobProperties.java new file mode 100644 index 0000000000000..ad5026cd63f35 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/BlobProperties.java @@ -0,0 +1,70 @@ +package com.azure.storage.blob; + +import com.azure.storage.blob.models.BlobGetPropertiesHeaders; +import com.azure.storage.blob.models.BlobType; + +public class BlobProperties { + + private final BlobType blobType; + + private final Metadata metadata; + + private final long blobSize; + + private final byte[] contentMD5; + + private final String contentEncoding; + + private final String contentDisposition; + + private final String contentLanguage; + + private final String cacheControl; + + //todo decide datetime representation for last modified time + + + BlobProperties(BlobGetPropertiesHeaders generatedHeaders) { + this.blobType = generatedHeaders.blobType(); + this.metadata = new Metadata(generatedHeaders.metadata()); + this.blobSize = generatedHeaders.contentLength(); + this.contentMD5 = generatedHeaders.contentMD5(); + this.contentEncoding = generatedHeaders.contentEncoding(); + this.contentDisposition = generatedHeaders.contentDisposition(); + this.contentLanguage = generatedHeaders.contentLanguage(); + this.cacheControl = generatedHeaders.cacheControl(); + } + + + public BlobType blobType() { + return blobType; + } + + public Metadata metadata() { + return metadata; + } + + public long blobSize() { + return blobSize; + } + + public byte[] contentMD5() { + return contentMD5; + } + + public String contentEncoding() { + return contentEncoding; + } + + public String contentDisposition() { + return contentDisposition; + } + + public String contentLanguage() { + return contentLanguage; + } + + public String cacheControl() { + return cacheControl; + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/BlobRange.java b/storage/client/src/main/java/com/azure/storage/blob/BlobRange.java new file mode 100644 index 0000000000000..054f9f1a1b782 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/BlobRange.java @@ -0,0 +1,91 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import java.util.Locale; + +/** + * This is a representation of a range of bytes on a blob, typically used during a download operation. This type is + * immutable to ensure thread-safety of requests, so changing the values for a different operation requires construction + * of a new object. Passing null as a BlobRange value will default to the entire range of the blob. + */ +public final class BlobRange { + + private long offset; + private Long count; + + /** + * Specifies the download operation to start from the offset position (zero-based) and download the + * rest of the entire blob to the end. + * + * @param offset + * the zero-based position to start downloading + */ + public BlobRange(long offset) { + if (offset < 0) { + throw new IllegalArgumentException("BlobRange offset must be greater than or equal to 0."); + } + this.offset = offset; + } + + /** + * Specifies the download operation to start from the offset position (zero-based) and download the + * count number of bytes. + * + * @param offset + * the zero-based position to start downloading + * @param count + * the number of bytes to download + */ + public BlobRange(long offset, long count) { + this(offset); + if (count < 0) { + throw new IllegalArgumentException( + "BlobRange count must be greater than or equal to 0 if specified."); + } + this.count = count; + } + + /** + * The start of the range. Must be greater than or equal to 0. + */ + public long offset() { + return offset; + } + + /** + * How many bytes to include in the range. Must be greater than or equal to 0 if specified. + */ + public Long count() { + return count; + } + + /** + * @return A {@code String} compliant with the format of the Azure Storage x-ms-range and Range headers. + */ + @Override + public String toString() { + if (this.count != null) { + long rangeEnd = this.offset + this.count - 1; + return String.format( + Locale.ROOT, Constants.HeaderConstants.RANGE_HEADER_FORMAT, this.offset, rangeEnd); + } + + return String.format( + Locale.ROOT, Constants.HeaderConstants.BEGIN_RANGE_HEADER_FORMAT, this.offset); + } + + /* + In the case where the customer passes a null BlobRange, constructing the default of "0-" will fail on an empty blob. + By returning null as the header value, we elect not to set the header, which has the same effect, namely downloading + the whole blob, but it will not fail in the empty case. + */ + String toHeaderValue() { + // The default values of a BlobRange + if (this.offset == 0 && this.count == null) { + return null; + } + return this.toString(); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/BlobRawClient.java b/storage/client/src/main/java/com/azure/storage/blob/BlobRawClient.java new file mode 100644 index 0000000000000..58f48d3454b1f --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/BlobRawClient.java @@ -0,0 +1,768 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.storage.blob.implementation.AzureBlobStorageImpl; +import com.azure.storage.blob.models.AccessTier; +import com.azure.storage.blob.models.BlobHTTPHeaders; +import com.azure.storage.blob.models.BlobStartCopyFromURLHeaders; +import com.azure.storage.blob.models.BlobsAbortCopyFromURLResponse; +import com.azure.storage.blob.models.BlobsAcquireLeaseResponse; +import com.azure.storage.blob.models.BlobsBreakLeaseResponse; +import com.azure.storage.blob.models.BlobsChangeLeaseResponse; +import com.azure.storage.blob.models.BlobsCopyFromURLResponse; +import com.azure.storage.blob.models.BlobsCreateSnapshotResponse; +import com.azure.storage.blob.models.BlobsDeleteResponse; +import com.azure.storage.blob.models.BlobsGetAccountInfoResponse; +import com.azure.storage.blob.models.BlobsGetPropertiesResponse; +import com.azure.storage.blob.models.BlobsReleaseLeaseResponse; +import com.azure.storage.blob.models.BlobsRenewLeaseResponse; +import com.azure.storage.blob.models.BlobsSetHTTPHeadersResponse; +import com.azure.storage.blob.models.BlobsSetMetadataResponse; +import com.azure.storage.blob.models.BlobsSetTierResponse; +import com.azure.storage.blob.models.BlobsStartCopyFromURLResponse; +import com.azure.storage.blob.models.BlobsUndeleteResponse; +import com.azure.storage.blob.models.DeleteSnapshotsOptionType; +import com.azure.storage.blob.models.LeaseAccessConditions; +import com.azure.storage.blob.models.ModifiedAccessConditions; +import reactor.core.publisher.Mono; + +import java.io.IOException; +import java.io.OutputStream; +import java.net.URL; +import java.time.Duration; + +/** + * Represents a URL to a blob of any type: block, append, or page. It may be obtained by direct construction or via the + * create method on a {@link ContainerAsyncClient} object. This class does not hold any state about a particular blob but is + * instead a convenient way of sending off appropriate requests to the resource on the service. Please refer to the + * Azure Docs for more information. + */ +class BlobRawClient { + + private BlobAsyncRawClient blobAsyncRawClient; + + /** + * Creates a {@code BlobAsyncRawClient} object pointing to the account specified by the URL and using the provided pipeline to + * make HTTP requests. + */ + BlobRawClient(AzureBlobStorageImpl azureBlobStorage) { + this.blobAsyncRawClient = new BlobAsyncRawClient(azureBlobStorage); + } + + /** + * Copies the data at the source URL to a blob. For more information, see the Azure Docs + * + * @param sourceURL + * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=start_copy "Sample code for BlobAsyncRawClient.startCopyFromURL")] \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=start_copy_helper "Helper for start_copy sample.")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlobsStartCopyFromURLResponse startCopyFromURL(URL sourceURL) { + return this.startCopyFromURL(sourceURL, null, null, null, null); + } + + /** + * Copies the data at the source URL to a blob. For more information, see the Azure Docs + * + * @param sourceURL + * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. + * @param metadata + * {@link Metadata} + * @param sourceModifiedAccessConditions + * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the + * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob + * was changed relative to the given request. The request will fail if the specified condition is not + * satisfied. + * @param destAccessConditions + * {@link BlobAccessConditions} against the destination. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=start_copy "Sample code for BlobAsyncRawClient.startCopyFromURL")] \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=start_copy_helper "Helper for start_copy sample.")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlobsStartCopyFromURLResponse startCopyFromURL(URL sourceURL, Metadata metadata, + ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions, + Duration timeout) { + Mono response = blobAsyncRawClient + .startCopyFromURL(sourceURL, metadata, sourceModifiedAccessConditions, destAccessConditions, null /*context*/); + + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. For + * more information, see the Azure Docs. + * + * @param copyId + * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link + * BlobStartCopyFromURLHeaders} object. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=abort_copy "Sample code for BlobAsyncRawClient.abortCopyFromURL")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlobsAbortCopyFromURLResponse abortCopyFromURL(String copyId) { + return this.abortCopyFromURL(copyId, null, null); + } + + /** + * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. For + * more information, see the Azure Docs. + * + * @param copyId + * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link + * BlobStartCopyFromURLHeaders} object. + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=abort_copy "Sample code for BlobAsyncRawClient.abortCopyFromURL")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlobsAbortCopyFromURLResponse abortCopyFromURL(String copyId, LeaseAccessConditions leaseAccessConditions, + Duration timeout) { + Mono response = blobAsyncRawClient + .abortCopyFromURL(copyId, leaseAccessConditions, null /*context*/); + + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. + * For more information, see the Azure Docs + * + * @param copySource + * The source URL to copy from. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=sync_copy "Sample code for BlobAsyncRawClient.copyFromURL")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlobsCopyFromURLResponse syncCopyFromURL(URL copySource) { + return this.syncCopyFromURL(copySource, null, null, null, null); + } + + /** + * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. + * For more information, see the Azure Docs + * + * @param copySource + * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. + * @param metadata + * {@link Metadata} + * @param sourceModifiedAccessConditions + * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the + * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob + * was changed relative to the given request. The request will fail if the specified condition is not + * satisfied. + * @param destAccessConditions + * {@link BlobAccessConditions} against the destination. + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=sync_copy "Sample code for BlobAsyncRawClient.copyFromURL")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlobsCopyFromURLResponse syncCopyFromURL(URL copySource, Metadata metadata, + ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions, + Duration timeout) { + Mono response = blobAsyncRawClient + .syncCopyFromURL(copySource, metadata, sourceModifiedAccessConditions, destAccessConditions, null /*context*/); + + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Reads a range of bytes from a blob. The response also includes the blob's properties and metadata. For more + * information, see the Azure Docs. + *

+ * Note that the response body has reliable download functionality built in, meaning that a failed download stream + * will be automatically retried. This behavior may be configured with {@link ReliableDownloadOptions}. + * + * @return Emits the successful response. + * @apiNote ## Sample Code \n [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=upload_download + * "Sample code for BlobAsyncRawClient.download")] \n For more samples, please see the [Samples + * file](%https://github.com/Azure/azure-storage-java/blob/New-Storage-SDK-V10-Preview/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public void download(OutputStream stream) throws IOException { + this.download(stream, null, null, null, false, null); + } + + /** + * Reads a range of bytes from a blob. The response also includes the blob's properties and metadata. For more + * information, see the Azure Docs. + *

+ * Note that the response body has reliable download functionality built in, meaning that a failed download stream + * will be automatically retried. This behavior may be configured with {@link ReliableDownloadOptions}. + * + * @param range + * {@link BlobRange} + * @param accessConditions + * {@link BlobAccessConditions} + * @param rangeGetContentMD5 + * Whether the contentMD5 for the specified blob range should be returned. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=upload_download "Sample code for BlobAsyncRawClient.download")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public void download(OutputStream stream, ReliableDownloadOptions options, BlobRange range, + BlobAccessConditions accessConditions, boolean rangeGetContentMD5, Duration timeout) throws IOException { + Mono response = blobAsyncRawClient + .download(range, accessConditions, rangeGetContentMD5, null /*context*/); + + DownloadResponse download = new DownloadResponse(timeout == null + ? response.block() + : response.block(timeout)); + + download.body(stream, options); + } + + /** + * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. For more + * information, see the Azure Docs. + * + * @return Emits the successful response. + * @apiNote ## Sample Code \n [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_delete + * "Sample code for BlobAsyncRawClient.delete")] \n For more samples, please see the [Samples + * file](%https://github.com/Azure/azure-storage-java/blob/New-Storage-SDK-V10-Preview/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlobsDeleteResponse delete() { + return this.delete(null, null, null); + } + + /** + * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. For more + * information, see the Azure Docs. + * + * @param deleteBlobSnapshotOptions + * Specifies the behavior for deleting the snapshots on this blob. {@code Include} will delete the base blob + * and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being deleted, you must + * pass null. + * @param accessConditions + * {@link BlobAccessConditions} + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_delete "Sample code for BlobAsyncRawClient.delete")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlobsDeleteResponse delete(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, + BlobAccessConditions accessConditions, Duration timeout) { + Mono response = blobAsyncRawClient + .delete(deleteBlobSnapshotOptions, accessConditions, null /*context*/); + + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Returns the blob's metadata and properties. For more information, see the Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=properties_metadata "Sample code for BlobAsyncRawClient.getProperties")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlobsGetPropertiesResponse getProperties() { + return this.getProperties(null, null); + } + + /** + * Returns the blob's metadata and properties. For more information, see the Azure Docs. + * + * @param accessConditions + * {@link BlobAccessConditions} + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=properties_metadata "Sample code for BlobAsyncRawClient.getProperties")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlobsGetPropertiesResponse getProperties(BlobAccessConditions accessConditions, Duration timeout) { + Mono response = blobAsyncRawClient + .getProperties(accessConditions, null /*context*/); + + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Changes a blob's HTTP header properties. For more information, see the Azure + * Docs. + * + * @param headers + * {@link BlobHTTPHeaders} + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=properties_metadata "Sample code for BlobAsyncRawClient.setHTTPHeaders")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlobsSetHTTPHeadersResponse setHTTPHeaders(BlobHTTPHeaders headers) { + return this.setHTTPHeaders(headers, null, null); + } + + /** + * Changes a blob's HTTP header properties. For more information, see the Azure Docs. + * + * @param headers + * {@link BlobHTTPHeaders} + * @param accessConditions + * {@link BlobAccessConditions} + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=properties_metadata "Sample code for BlobAsyncRawClient.setHTTPHeaders")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlobsSetHTTPHeadersResponse setHTTPHeaders(BlobHTTPHeaders headers, BlobAccessConditions accessConditions, + Duration timeout) { + Mono response = blobAsyncRawClient + .setHTTPHeaders(headers, accessConditions, null /*context*/); + + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Changes a blob's metadata. For more information, see the Azure Docs. + * + * @param metadata + * {@link Metadata} + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=properties_metadata "Sample code for BlobAsyncRawClient.setMetadata")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlobsSetMetadataResponse setMetadata(Metadata metadata) { + return this.setMetadata(metadata, null, null); + } + + /** + * Changes a blob's metadata. For more information, see the Azure Docs. + * + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link BlobAccessConditions} + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=properties_metadata "Sample code for BlobAsyncRawClient.setMetadata")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlobsSetMetadataResponse setMetadata(Metadata metadata, BlobAccessConditions accessConditions, + Duration timeout) { + Mono response = blobAsyncRawClient + .setMetadata(metadata, accessConditions, null /*context*/); + + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Creates a read-only snapshot of a blob. For more information, see the Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=snapshot "Sample code for BlobAsyncRawClient.createSnapshot")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlobsCreateSnapshotResponse createSnapshot() { + return this.createSnapshot(null, null, null); + } + + /** + * Creates a read-only snapshot of a blob. For more information, see the Azure Docs. + * + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link BlobAccessConditions} + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=snapshot "Sample code for BlobAsyncRawClient.createSnapshot")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlobsCreateSnapshotResponse createSnapshot(Metadata metadata, BlobAccessConditions accessConditions, + Duration timeout) { + Mono response = blobAsyncRawClient + .createSnapshot(metadata, accessConditions, null /*context*/); + + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in + * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of + * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. + *

+ * For detailed information about block blob level tiering see the Azure Docs. + * + * @param tier + * The new tier for the blob. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=tier "Sample code for BlobAsyncRawClient.setTier")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlobsSetTierResponse setTier(AccessTier tier) { + return this.setTier(tier, null, null); + } + + /** + * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in + * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of + * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. + *

+ * For detailed information about block blob level tiering see the Azure Docs. + * + * @param tier + * The new tier for the blob. + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=tier "Sample code for BlobAsyncRawClient.setTier")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlobsSetTierResponse setTier(AccessTier tier, LeaseAccessConditions leaseAccessConditions, Duration timeout) { + Mono response = blobAsyncRawClient + .setTier(tier, leaseAccessConditions, null /*context*/); + + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. + * For more information, see the Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=undelete "Sample code for BlobAsyncRawClient.undelete")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlobsUndeleteResponse undelete() { + return this.undelete(null); + } + + /** + * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. + * For more information, see the Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=undelete "Sample code for BlobAsyncRawClient.undelete")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlobsUndeleteResponse undelete(Duration timeout) { + Mono response = blobAsyncRawClient + .undelete(null /*context*/); + + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 + * seconds, or infinite (-1). For more information, see the Azure Docs. + * + * @param proposedId + * A {@code String} in any valid GUID format. May be null. + * @param duration + * The duration of the lease, in seconds, or negative one (-1) for a lease that + * never expires. A non-infinite lease can be between 15 and 60 seconds. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_lease "Sample code for BlobAsyncRawClient.acquireLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlobsAcquireLeaseResponse acquireLease(String proposedId, int duration) { + return this.acquireLease(proposedId, duration, null, null); + } + + /** + * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 + * seconds, or infinite (-1). For more information, see the Azure Docs. + * + * @param proposedID + * A {@code String} in any valid GUID format. May be null. + * @param duration + * The duration of the lease, in seconds, or negative one (-1) for a lease that + * never expires. A non-infinite lease can be between 15 and 60 seconds. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_lease "Sample code for BlobAsyncRawClient.acquireLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlobsAcquireLeaseResponse acquireLease(String proposedID, int duration, + ModifiedAccessConditions modifiedAccessConditions, Duration timeout) { + Mono response = blobAsyncRawClient + .acquireLease(proposedID, duration, modifiedAccessConditions, null /*context*/); + + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Renews the blob's previously-acquired lease. For more information, see the Azure Docs. + * + * @param leaseID + * The leaseId of the active lease on the blob. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_lease "Sample code for BlobAsyncRawClient.renewLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlobsRenewLeaseResponse renewLease(String leaseID) { + return this.renewLease(leaseID, null, null); + } + + /** + * Renews the blob's previously-acquired lease. For more information, see the Azure Docs. + * + * @param leaseID + * The leaseId of the active lease on the blob. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_lease "Sample code for BlobAsyncRawClient.renewLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlobsRenewLeaseResponse renewLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions, + Duration timeout) { + Mono response = blobAsyncRawClient + .renewLease(leaseID, modifiedAccessConditions, null /*context*/); + + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Releases the blob's previously-acquired lease. For more information, see the Azure Docs. + * + * @param leaseID + * The leaseId of the active lease on the blob. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_lease "Sample code for BlobAsyncRawClient.releaseLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlobsReleaseLeaseResponse releaseLease(String leaseID) { + return this.releaseLease(leaseID, null, null); + } + + /** + * Releases the blob's previously-acquired lease. For more information, see the Azure Docs. + * + * @param leaseID + * The leaseId of the active lease on the blob. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_lease "Sample code for BlobAsyncRawClient.releaseLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlobsReleaseLeaseResponse releaseLease(String leaseID, + ModifiedAccessConditions modifiedAccessConditions, Duration timeout) { + Mono response = blobAsyncRawClient + .releaseLease(leaseID, modifiedAccessConditions, null /*context*/); + + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant + * to break a fixed-duration lease when it expires or an infinite lease immediately. For more information, see the + * Azure Docs. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_lease "Sample code for BlobAsyncRawClient.breakLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/New-Storage-SDK-V10-Preview/src/test/java/com/microsoft/azure/storage/Samples.java) + * + * @return + * Emits the successful response. + */ + public BlobsBreakLeaseResponse breakLease() { + return this.breakLease(null, null, null); + } + + /** + * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant + * to break a fixed-duration lease when it expires or an infinite lease immediately. For more information, see the + * Azure Docs. + * + * @param breakPeriodInSeconds + * An optional {@code Integer} representing the proposed duration of seconds that the lease should continue + * before it is broken, between 0 and 60 seconds. This break period is only used if it is shorter than the + * time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be + * available before the break period has expired, but the lease may be held for longer than the break + * period. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_lease "Sample code for BlobAsyncRawClient.breakLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlobsBreakLeaseResponse breakLease(Integer breakPeriodInSeconds, + ModifiedAccessConditions modifiedAccessConditions, Duration timeout) { + Mono response = blobAsyncRawClient + .breakLease(breakPeriodInSeconds, modifiedAccessConditions, null /*context*/); + + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * ChangeLease changes the blob's lease ID. For more information, see the Azure Docs. + * + * @param leaseId + * The leaseId of the active lease on the blob. + * @param proposedID + * A {@code String} in any valid GUID format. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_lease "Sample code for BlobAsyncRawClient.changeLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlobsChangeLeaseResponse changeLease(String leaseId, String proposedID) { + return this.changeLease(leaseId, proposedID, null, null); + } + + /** + * ChangeLease changes the blob's lease ID. For more information, see the Azure Docs. + * + * @param leaseId + * The leaseId of the active lease on the blob. + * @param proposedID + * A {@code String} in any valid GUID format. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_lease "Sample code for BlobAsyncRawClient.changeLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlobsChangeLeaseResponse changeLease(String leaseId, String proposedID, + ModifiedAccessConditions modifiedAccessConditions, Duration timeout) { + Mono response = blobAsyncRawClient + .changeLease(leaseId, proposedID, modifiedAccessConditions, null /*context*/); + + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Returns the sku name and account kind for the account. For more information, please see the Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=account_info "Sample code for BlobAsyncRawClient.getAccountInfo")] \n + * For more samples, please see the [Samples file](https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlobsGetAccountInfoResponse getAccountInfo() { + return this.getAccountInfo(null); + } + + /** + * Returns the sku name and account kind for the account. For more information, please see the Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=account_info "Sample code for BlobAsyncRawClient.getAccountInfo")] \n + * For more samples, please see the [Samples file](https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlobsGetAccountInfoResponse getAccountInfo(Duration timeout) { + Mono response = blobAsyncRawClient + .getAccountInfo(null /*context*/); + + return timeout == null + ? response.block() + : response.block(timeout); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/BlobSASPermission.java b/storage/client/src/main/java/com/azure/storage/blob/BlobSASPermission.java new file mode 100644 index 0000000000000..3a7c747b2fc77 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/BlobSASPermission.java @@ -0,0 +1,181 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import java.util.Locale; + +/** + * This is a helper class to construct a string representing the permissions granted by a ServiceSAS to a blob. Setting + * a value to true means that any SAS which uses these permissions will grant permissions for that operation. Once all + * the values are set, this should be serialized with toString and set as the permissions field on a + * {@link ServiceSASSignatureValues} object. It is possible to construct the permissions string without this class, but + * the order of the permissions is particular and this class guarantees correctness. + */ +final class BlobSASPermission { + + private boolean read; + + private boolean add; + + private boolean create; + + private boolean write; + + private boolean delete; + + /** + * Initializes a {@code BlobSASPermission} object with all fields set to false. + */ + public BlobSASPermission() { + } + + /** + * Creates a {@code BlobSASPermission} from the specified permissions string. This method will throw an + * {@code IllegalArgumentException} if it encounters a character that does not correspond to a valid permission. + * + * @param permString + * A {@code String} which represents the {@code BlobSASPermission}. + * + * @return A {@code BlobSASPermission} generated from the given {@code String}. + */ + public static BlobSASPermission parse(String permString) { + BlobSASPermission permissions = new BlobSASPermission(); + + for (int i = 0; i < permString.length(); i++) { + char c = permString.charAt(i); + switch (c) { + case 'r': + permissions.read = true; + break; + case 'a': + permissions.add = true; + break; + case 'c': + permissions.create = true; + break; + case 'w': + permissions.write = true; + break; + case 'd': + permissions.delete = true; + break; + default: + throw new IllegalArgumentException( + String.format(Locale.ROOT, SR.ENUM_COULD_NOT_BE_PARSED_INVALID_VALUE, "Permissions", permString, c)); + } + } + return permissions; + } + + /** + * Specifies Read access granted. + */ + public boolean read() { + return read; + } + + /** + * Specifies Read access granted. + */ + public BlobSASPermission withRead(boolean read) { + this.read = read; + return this; + } + + /** + * Specifies Add access granted. + */ + public boolean add() { + return add; + } + + /** + * Specifies Add access granted. + */ + public BlobSASPermission withAdd(boolean add) { + this.add = add; + return this; + } + + /** + * Specifies Create access granted. + */ + public boolean create() { + return create; + } + + /** + * Specifies Create access granted. + */ + public BlobSASPermission withCreate(boolean create) { + this.create = create; + return this; + } + + /** + * Specifies Write access granted. + */ + public boolean write() { + return write; + } + + /** + * Specifies Write access granted. + */ + public BlobSASPermission withWrite(boolean write) { + this.write = write; + return this; + } + + /** + * Specifies Delete access granted. + */ + public boolean delete() { + return delete; + } + + /** + * Specifies Delete access granted. + */ + public BlobSASPermission withDelete(boolean delete) { + this.delete = delete; + return this; + } + + /** + * Converts the given permissions to a {@code String}. Using this method will guarantee the permissions are in an + * order accepted by the service. + * + * @return A {@code String} which represents the {@code BlobSASPermission}. + */ + @Override + public String toString() { + // The order of the characters should be as specified here to ensure correctness: + // https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas + + final StringBuilder builder = new StringBuilder(); + + if (this.read) { + builder.append('r'); + } + + if (this.add) { + builder.append('a'); + } + + if (this.create) { + builder.append('c'); + } + + if (this.write) { + builder.append('w'); + } + + if (this.delete) { + builder.append('d'); + } + + return builder.toString(); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/BlobURLParts.java b/storage/client/src/main/java/com/azure/storage/blob/BlobURLParts.java new file mode 100644 index 0000000000000..f4bf83810b743 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/BlobURLParts.java @@ -0,0 +1,197 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.implementation.http.UrlBuilder; + +import java.net.MalformedURLException; +import java.net.URL; +import java.util.HashMap; +import java.util.Map; + +/** + * A BlobURLParts object represents the components that make up an Azure Storage Container/Blob URL. You may parse an + * existing URL into its parts with the {@link URLParser} class. You may construct a URL from parts by calling toURL(). + * It is also possible to use the empty constructor to buildClient a blobURL from scratch. + * NOTE: Changing any SAS-related field requires computing a new SAS signature. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=url_parts "Sample code for BlobURLParts")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ +final class BlobURLParts { + + private String scheme; + + private String host; + + private String containerName; + + private String blobName; + + private String snapshot; + + private SASQueryParameters sasQueryParameters; + + private Map unparsedParameters; + + /** + * Initializes a BlobURLParts object with all fields set to null, except unparsedParameters, which is an empty map. + * This may be useful for constructing a URL to a blob storage resource from scratch when the constituent parts are + * already known. + */ + public BlobURLParts() { + unparsedParameters = new HashMap<>(); + } + + /** + * The scheme. Ex: "https://". + */ + public String scheme() { + return scheme; + } + + /** + * The scheme. Ex: "https://". + */ + public BlobURLParts withScheme(String scheme) { + this.scheme = scheme; + return this; + } + + /** + * The host. Ex: "account.blob.core.windows.net". + */ + public String host() { + return host; + } + + /** + * The host. Ex: "account.blob.core.windows.net". + */ + public BlobURLParts withHost(String host) { + this.host = host; + return this; + } + + /** + * The container name or {@code null} if a {@link StorageAsyncRawClient} was parsed. + */ + public String containerName() { + return containerName; + } + + /** + * The container name or {@code null} if a {@link StorageAsyncRawClient} was parsed. + */ + public BlobURLParts withContainerName(String containerName) { + this.containerName = containerName; + return this; + } + + /** + * The blob name or {@code null} if a {@link StorageAsyncRawClient} or {@link ContainerAsyncClient} was parsed. + */ + public String blobName() { + return blobName; + } + + /** + * The blob name or {@code null} if a {@link StorageAsyncRawClient} or {@link ContainerAsyncClient} was parsed. + */ + public BlobURLParts withBlobName(String blobName) { + this.blobName = blobName; + return this; + } + + /** + * The snapshot time or {@code null} if anything except a URL to a snapshot was parsed. + */ + public String snapshot() { + return snapshot; + } + + /** + * The snapshot time or {@code null} if anything except a URL to a snapshot was parsed. + */ + public BlobURLParts withSnapshot(String snapshot) { + this.snapshot = snapshot; + return this; + } + + /** + * A {@link SASQueryParameters} representing the SAS query parameters or {@code null} if there were no such + * parameters. + */ + public SASQueryParameters sasQueryParameters() { + return sasQueryParameters; + } + + /** + * A {@link SASQueryParameters} representing the SAS query parameters or {@code null} if there were no such + * parameters. + */ + public BlobURLParts withSasQueryParameters(SASQueryParameters sasQueryParameters) { + this.sasQueryParameters = sasQueryParameters; + return this; + } + + /** + * The query parameter key value pairs aside from SAS parameters and snapshot time or {@code null} if there were + * no such parameters. + */ + public Map unparsedParameters() { + return unparsedParameters; + } + + /** + * The query parameter key value pairs aside from SAS parameters and snapshot time or {@code null} if there were + * no such parameters. + */ + public BlobURLParts withUnparsedParameters(Map unparsedParameters) { + this.unparsedParameters = unparsedParameters; + return this; + } + + /** + * Converts the blob URL parts to a {@link URL}. + * + * @return A {@code java.net.URL} to the blob resource composed of all the elements in the object. + * + * @throws MalformedURLException + * The fields present on the BlobURLParts object were insufficient to construct a valid URL or were + * ill-formatted. + */ + public URL toURL() throws MalformedURLException { + UrlBuilder url = new UrlBuilder().withScheme(this.scheme).withHost(this.host); + + StringBuilder path = new StringBuilder(); + if (this.containerName != null) { + path.append(this.containerName); + if (this.blobName != null) { + path.append('/'); + path.append(this.blobName); + } + } + url.withPath(path.toString()); + + if (this.snapshot != null) { + url.setQueryParameter(Constants.SNAPSHOT_QUERY_PARAMETER, this.snapshot); + } + if (this.sasQueryParameters != null) { + String encodedSAS = this.sasQueryParameters.encode(); + if (encodedSAS.length() != 0) { + url.withQuery(encodedSAS); + } + } + + for (Map.Entry entry : this.unparsedParameters.entrySet()) { + // The commas are intentionally encoded. + url.setQueryParameter(entry.getKey(), + Utility.safeURLEncode(String.join(",", entry.getValue()))); + } + + return url.toURL(); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/BlockBlobAsyncClient.java b/storage/client/src/main/java/com/azure/storage/blob/BlockBlobAsyncClient.java new file mode 100644 index 0000000000000..617a8be812299 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/BlockBlobAsyncClient.java @@ -0,0 +1,395 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.http.rest.ResponseBase; +import com.azure.core.util.Context; +import com.azure.storage.blob.implementation.AzureBlobStorageImpl; +import com.azure.storage.blob.models.BlobHTTPHeaders; +import com.azure.storage.blob.models.BlockItem; +import com.azure.storage.blob.models.BlockListType; +import com.azure.storage.blob.models.LeaseAccessConditions; +import com.azure.storage.blob.models.SourceModifiedAccessConditions; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import reactor.netty.ByteBufFlux; + +import java.io.File; +import java.net.URL; +import java.nio.ByteBuffer; +import java.nio.file.Paths; +import java.util.List; + +/** + * Client to a block blob. It may only be instantiated through a {@link BlockBlobClientBuilder}, via + * the method {@link BlobAsyncClient#asBlockBlobAsyncClient()}, or via the method + * {@link ContainerAsyncClient#getBlockBlobAsyncClient(String)}. This class does not hold + * any state about a particular blob, but is instead a convenient way of sending appropriate + * requests to the resource on the service. + * + *

+ * This client contains operations on a blob. Operations on a container are available on {@link ContainerAsyncClient}, + * and operations on the service are available on {@link StorageAsyncClient}. + * + *

+ * Please refer + * to the Azure Docs + * for more information. + * + *

+ * Note this client is an async client that returns reactive responses from Spring Reactor Core + * project (https://projectreactor.io/). Calling the methods in this client will NOT + * start the actual network operation, until {@code .subscribe()} is called on the reactive response. + * You can simply convert one of these responses to a {@link java.util.concurrent.CompletableFuture} + * object through {@link Mono#toFuture()}. + */ +public final class BlockBlobAsyncClient extends BlobAsyncClient { + + private BlockBlobAsyncRawClient blockBlobAsyncRawClient; + + /** + * Indicates the maximum number of bytes that can be sent in a call to upload. + */ + public static final int MAX_UPLOAD_BLOB_BYTES = 256 * Constants.MB; + + /** + * Indicates the maximum number of bytes that can be sent in a call to stageBlock. + */ + public static final int MAX_STAGE_BLOCK_BYTES = 100 * Constants.MB; + + /** + * Indicates the maximum number of blocks allowed in a block blob. + */ + public static final int MAX_BLOCKS = 50000; + + /** + * Package-private constructor for use by {@link BlockBlobClientBuilder}. + * @param azureBlobStorage the API client for blob storage API + */ + BlockBlobAsyncClient(AzureBlobStorageImpl azureBlobStorage) { + super(azureBlobStorage); + blockBlobAsyncRawClient = new BlockBlobAsyncRawClient(azureBlobStorage); + } + + /** + * Static method for getting a new builder for this class. + * + * @return + * A new {@link BlockBlobClientBuilder} instance. + */ + public static BlockBlobClientBuilder builder() { + return new BlockBlobClientBuilder(); + } + + /** + * Creates a new block blob, or updates the content of an existing block blob. + * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not + * supported with PutBlob; the content of the existing blob is overwritten with the new content. To + * perform a partial update of a block blob's, use PutBlock and PutBlockList. + * For more information, see the + * Azure Docs. + *

+ * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flux} must produce the same data each time it is subscribed to. + *

+ * + * @param data + * The data to write to the blob. Note that this {@code Flux} must be replayable if retries are enabled + * (the default). In other words, the Flux must produce the same data each time it is subscribed to. + * @param length + * The exact length of the data. It is important that this value match precisely the length of the data + * emitted by the {@code Flux}. + * + * @return + * A reactive response containing the information of the uploaded block blob. + */ + public Mono upload(Flux data, long length) { + return this.upload(data, length, null, null, null, null); + } + + /** + * Creates a new block blob, or updates the content of an existing block blob. + * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not + * supported with PutBlob; the content of the existing blob is overwritten with the new content. To + * perform a partial update of a block blob's, use PutBlock and PutBlockList. + * For more information, see the + * Azure Docs. + *

+ * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flux} must produce the same data each time it is subscribed to. + *

+ * + * @param data + * The data to write to the blob. Note that this {@code Flux} must be replayable if retries are enabled + * (the default). In other words, the Flux must produce the same data each time it is subscribed to. + * @param length + * The exact length of the data. It is important that this value match precisely the length of the data + * emitted by the {@code Flux}. + * @param headers + * {@link BlobHTTPHeaders} + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response containing the information of the uploaded block blob. + */ + public Mono upload(Flux data, long length, BlobHTTPHeaders headers, + Metadata metadata, BlobAccessConditions accessConditions, Context context) { + return blockBlobAsyncRawClient + .upload(data.map(Unpooled::wrappedBuffer), length, headers, metadata, accessConditions, context) + .then(); + } + + public Mono uploadFromFile(String filePath) { + return this.uploadFromFile(filePath, null, null, null, null); + } + + public Mono uploadFromFile(String filePath, BlobHTTPHeaders headers, Metadata metadata, + BlobAccessConditions accessConditions, Context context) { + //TODO make this method smart + return this.blockBlobAsyncRawClient + .upload(ByteBufFlux.fromPath(Paths.get(filePath)), new File(filePath).length()) + .then(); + } + + /** + * Uploads the specified block to the block blob's "staging area" to be later committed by a call to + * commitBlockList. For more information, see the + * Azure Docs. + *

+ * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flux} must produce the same data each time it is subscribed to. + * + * @param base64BlockID + * A Base64 encoded {@code String} that specifies the ID for this block. Note that all block ids for a given + * blob must be the same length. + * @param data + * The data to write to the block. Note that this {@code Flux} must be replayable if retries are enabled + * (the default). In other words, the Flux must produce the same data each time it is subscribed to. + * @param length + * The exact length of the data. It is important that this value match precisely the length of the data + * emitted by the {@code Flux}. + * + * @return + * A reactive response signalling completion. + */ + public Mono stageBlock(String base64BlockID, Flux data, + long length) { + return this.stageBlock(base64BlockID, data, length, null, null); + } + + /** + * Uploads the specified block to the block blob's "staging area" to be later committed by a call to + * commitBlockList. For more information, see the + * Azure Docs. + *

+ * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flux} must produce the same data each time it is subscribed to. + * + * @param base64BlockID + * A Base64 encoded {@code String} that specifies the ID for this block. Note that all block ids for a given + * blob must be the same length. + * @param data + * The data to write to the block. Note that this {@code Flux} must be replayable if retries are enabled + * (the default). In other words, the Flux must produce the same data each time it is subscribed to. + * @param length + * The exact length of the data. It is important that this value match precisely the length of the data + * emitted by the {@code Flux}. + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response signalling completion. + */ + public Mono stageBlock(String base64BlockID, Flux data, long length, + LeaseAccessConditions leaseAccessConditions, Context context) { + return blockBlobAsyncRawClient + .stageBlock(base64BlockID, data, length, leaseAccessConditions, context) + .then(); + } + + /** + * Creates a new block to be committed as part of a blob where the contents are read from a URL. For more + * information, see the Azure Docs. + * + * @param base64BlockID + * A Base64 encoded {@code String} that specifies the ID for this block. Note that all block ids for a given + * blob must be the same length. + * @param sourceURL + * The url to the blob that will be the source of the copy. A source blob in the same storage account can be + * authenticated via Shared Key. However, if the source is a blob in another account, the source blob must + * either be public or must be authenticated via a shared access signature. If the source blob is public, no + * authentication is required to perform the operation. + * @param sourceRange + * {@link BlobRange} + * + * @return + * A reactive response signalling completion. + */ + public Mono stageBlockFromURL(String base64BlockID, URL sourceURL, + BlobRange sourceRange) { + return this.stageBlockFromURL(base64BlockID, sourceURL, sourceRange, null, + null, null, null); + } + + /** + * Creates a new block to be committed as part of a blob where the contents are read from a URL. For more + * information, see the Azure Docs. + * + * @param base64BlockID + * A Base64 encoded {@code String} that specifies the ID for this block. Note that all block ids for a given + * blob must be the same length. + * @param sourceURL + * The url to the blob that will be the source of the copy. A source blob in the same storage account can + * be authenticated via Shared Key. However, if the source is a blob in another account, the source blob + * must either be public or must be authenticated via a shared access signature. If the source blob is + * public, no authentication is required to perform the operation. + * @param sourceRange + * {@link BlobRange} + * @param sourceContentMD5 + * An MD5 hash of the block content from the source blob. If specified, the service will calculate the MD5 + * of the received data and fail the request if it does not match the provided MD5. + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param sourceModifiedAccessConditions + * {@link SourceModifiedAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response signalling completion. + */ + public Mono stageBlockFromURL(String base64BlockID, URL sourceURL, + BlobRange sourceRange, byte[] sourceContentMD5, LeaseAccessConditions leaseAccessConditions, + SourceModifiedAccessConditions sourceModifiedAccessConditions, Context context) { + return blockBlobAsyncRawClient + .stageBlockFromURL(base64BlockID, sourceURL, sourceRange, sourceContentMD5, leaseAccessConditions, sourceModifiedAccessConditions, context) + .then(); + } + + /** + * Returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter. + * For more information, see the + * Azure Docs. + * + * @param listType + * Specifies which type of blocks to return. + * + * @return + * A reactive response containing the list of blocks. + */ + public Flux listBlocks(BlockListType listType) { + return this.listBlocks(listType, null, null); + } + + /** + * + * Returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter. + * For more information, see the + * Azure Docs. + * + * @param listType + * Specifies which type of blocks to return. + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response containing the list of blocks. + */ + public Flux listBlocks(BlockListType listType, + LeaseAccessConditions leaseAccessConditions, Context context) { + return blockBlobAsyncRawClient + .listBlocks(listType, leaseAccessConditions, context) + .map(ResponseBase::value) + .flatMapMany(bl -> { + Flux committed = Flux.fromIterable(bl.committedBlocks()) + .map(block -> new BlockItem(block, true)); + Flux uncommitted = Flux.fromIterable(bl.uncommittedBlocks()) + .map(block -> new BlockItem(block, false)); + return Flux.concat(committed, uncommitted); + }); + } + + /** + * Writes a blob by specifying the list of block IDs that are to make up the blob. + * In order to be written as part of a blob, a block must have been successfully written + * to the server in a prior stageBlock operation. You can call commitBlockList to update a blob + * by uploading only those blocks that have changed, then committing the new and existing + * blocks together. Any blocks not specified in the block list and permanently deleted. + * For more information, see the + * Azure Docs. + * + * @param base64BlockIDs + * A list of base64 encode {@code String}s that specifies the block IDs to be committed. + * + * @return + * A reactive response containing the information of the block blob. + */ + public Mono commitBlockList(List base64BlockIDs) { + return this.commitBlockList(base64BlockIDs, null, null, null, null); + } + + /** + * Writes a blob by specifying the list of block IDs that are to make up the blob. + * In order to be written as part of a blob, a block must have been successfully written + * to the server in a prior stageBlock operation. You can call commitBlockList to update a blob + * by uploading only those blocks that have changed, then committing the new and existing + * blocks together. Any blocks not specified in the block list and permanently deleted. + * For more information, see the + * Azure Docs. + * + * @param base64BlockIDs + * A list of base64 encode {@code String}s that specifies the block IDs to be committed. + * @param headers + * {@link BlobHTTPHeaders} + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response containing the information of the block blob. + */ + public Mono commitBlockList(List base64BlockIDs, + BlobHTTPHeaders headers, Metadata metadata, BlobAccessConditions accessConditions, Context context) { + return blockBlobAsyncRawClient + .commitBlockList(base64BlockIDs, headers, metadata, accessConditions, context) + .then(); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/BlockBlobAsyncRawClient.java b/storage/client/src/main/java/com/azure/storage/blob/BlockBlobAsyncRawClient.java new file mode 100644 index 0000000000000..7c789842c05e5 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/BlockBlobAsyncRawClient.java @@ -0,0 +1,394 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.http.HttpPipeline; +import com.azure.core.util.Context; +import com.azure.storage.blob.implementation.AzureBlobStorageImpl; +import com.azure.storage.blob.models.*; +import io.netty.buffer.ByteBuf; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.net.URL; +import java.util.List; + +import static com.azure.storage.blob.Utility.postProcessResponse; + +/** + * Represents a URL to a block blob. It may be obtained by direct construction or via the create method on a + * {@link ContainerAsyncClient} object. This class does not hold any state about a particular blob but is instead a convenient + * way of sending off appropriate requests to the resource on the service. Please refer to the + * Azure Docs + * for more information on block blobs. + */ +final class BlockBlobAsyncRawClient extends BlobAsyncRawClient { + + /** + * Indicates the maximum number of bytes that can be sent in a call to upload. + */ + public static final int MAX_UPLOAD_BLOB_BYTES = 256 * Constants.MB; + + /** + * Indicates the maximum number of bytes that can be sent in a call to stageBlock. + */ + public static final int MAX_STAGE_BLOCK_BYTES = 100 * Constants.MB; + + /** + * Indicates the maximum number of blocks allowed in a block blob. + */ + public static final int MAX_BLOCKS = 50000; + + /** + * Creates a {@code BlockBlobAsyncRawClient} object pointing to the account specified by the URL and using the provided + */ + public BlockBlobAsyncRawClient(AzureBlobStorageImpl azureBlobStorage) { + super(azureBlobStorage); + } + + + /** + * Creates a new block blob, or updates the content of an existing block blob. + * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not + * supported with PutBlob; the content of the existing blob is overwritten with the new content. To + * perform a partial update of a block blob's, use PutBlock and PutBlockList. + * For more information, see the + * Azure Docs. + *

+ * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flux} must produce the same data each time it is subscribed to. + *

+ * For more efficient bulk-upload scenarios, please refer to the {@link TransferManager} for convenience methods. + * + * @param data + * The data to write to the blob. Note that this {@code Flux} must be replayable if retries are enabled + * (the default). In other words, the Flowable must produce the same data each time it is subscribed to. + * @param length + * The exact length of the data. It is important that this value match precisely the length of the data + * emitted by the {@code Flux}. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=upload_download "Sample code for BlockBlobAsyncRawClient.upload")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono upload(Flux data, long length) { + return this.upload(data, length, null, null, null, null); + } + + /** + * Creates a new block blob, or updates the content of an existing block blob. + * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not + * supported with PutBlob; the content of the existing blob is overwritten with the new content. To + * perform a partial update of a block blob's, use PutBlock and PutBlockList. + * For more information, see the + * Azure Docs. + *

+ * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flux} must produce the same data each time it is subscribed to. + *

+ * For more efficient bulk-upload scenarios, please refer to the {@link TransferManager} for convenience methods. + * + * @param data + * The data to write to the blob. Note that this {@code Flux} must be replayable if retries are enabled + * (the default). In other words, the Flowable must produce the same data each time it is subscribed to. + * @param length + * The exact length of the data. It is important that this value match precisely the length of the data + * emitted by the {@code Flux}. + * @param headers + * {@link BlobHTTPHeaders} + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=upload_download "Sample code for BlockBlobAsyncRawClient.upload")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono upload(Flux data, long length, BlobHTTPHeaders headers, + Metadata metadata, BlobAccessConditions accessConditions, Context context) { + metadata = metadata == null ? new Metadata() : metadata; + accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.blockBlobs().uploadWithRestResponseAsync(null, + null, data, length, null, metadata, null, null, + null, null, headers, accessConditions.leaseAccessConditions(), + accessConditions.modifiedAccessConditions(), context)); + } + + /** + * Uploads the specified block to the block blob's "staging area" to be later committed by a call to + * commitBlockList. For more information, see the + * Azure Docs. + *

+ * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flux} must produce the same data each time it is subscribed to. + * + * @param base64BlockID + * A Base64 encoded {@code String} that specifies the ID for this block. Note that all block ids for a given + * blob must be the same length. + * @param data + * The data to write to the block. Note that this {@code Flux} must be replayable if retries are enabled + * (the default). In other words, the Flowable must produce the same data each time it is subscribed to. + * @param length + * The exact length of the data. It is important that this value match precisely the length of the data + * emitted by the {@code Flux}. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blocks "Sample code for BlockBlobAsyncRawClient.stageBlock")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono stageBlock(String base64BlockID, Flux data, + long length) { + return this.stageBlock(base64BlockID, data, length, null, null); + } + + /** + * Uploads the specified block to the block blob's "staging area" to be later committed by a call to + * commitBlockList. For more information, see the + * Azure Docs. + *

+ * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flux} must produce the same data each time it is subscribed to. + * + * @param base64BlockID + * A Base64 encoded {@code String} that specifies the ID for this block. Note that all block ids for a given + * blob must be the same length. + * @param data + * The data to write to the block. Note that this {@code Flux} must be replayable if retries are enabled + * (the default). In other words, the Flowable must produce the same data each time it is subscribed to. + * @param length + * The exact length of the data. It is important that this value match precisely the length of the data + * emitted by the {@code Flux}. + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blocks "Sample code for BlockBlobAsyncRawClient.stageBlock")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono stageBlock(String base64BlockID, Flux data, long length, + LeaseAccessConditions leaseAccessConditions, Context context) { + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.blockBlobs().stageBlockWithRestResponseAsync(null, + null, base64BlockID, length, data, null, null, null, + null, null, null, leaseAccessConditions, context)); + } + + /** + * Creates a new block to be committed as part of a blob where the contents are read from a URL. For more + * information, see the Azure Docs. + * + * @param base64BlockID + * A Base64 encoded {@code String} that specifies the ID for this block. Note that all block ids for a given + * blob must be the same length. + * @param sourceURL + * The url to the blob that will be the source of the copy. A source blob in the same storage account can be + * authenticated via Shared Key. However, if the source is a blob in another account, the source blob must + * either be public or must be authenticated via a shared access signature. If the source blob is public, no + * authentication is required to perform the operation. + * @param sourceRange + * {@link BlobRange} + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=block_from_url "Sample code for BlockBlobAsyncRawClient.stageBlockFromURL")] + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono stageBlockFromURL(String base64BlockID, URL sourceURL, + BlobRange sourceRange) { + return this.stageBlockFromURL(base64BlockID, sourceURL, sourceRange, null, + null, null, null); + } + + /** + * Creates a new block to be committed as part of a blob where the contents are read from a URL. For more + * information, see the Azure Docs. + * + * @param base64BlockID + * A Base64 encoded {@code String} that specifies the ID for this block. Note that all block ids for a given + * blob must be the same length. + * @param sourceURL + * The url to the blob that will be the source of the copy. A source blob in the same storage account can + * be authenticated via Shared Key. However, if the source is a blob in another account, the source blob + * must either be public or must be authenticated via a shared access signature. If the source blob is + * public, no authentication is required to perform the operation. + * @param sourceRange + * {@link BlobRange} + * @param sourceContentMD5 + * An MD5 hash of the block content from the source blob. If specified, the service will calculate the MD5 + * of the received data and fail the request if it does not match the provided MD5. + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param sourceModifiedAccessConditions + * {@link SourceModifiedAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=block_from_url "Sample code for BlockBlobAsyncRawClient.stageBlockFromURL")] + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono stageBlockFromURL(String base64BlockID, URL sourceURL, + BlobRange sourceRange, byte[] sourceContentMD5, LeaseAccessConditions leaseAccessConditions, + SourceModifiedAccessConditions sourceModifiedAccessConditions, Context context) { + sourceRange = sourceRange == null ? new BlobRange(0) : sourceRange; + context = context == null ? Context.NONE : context; + + return postProcessResponse( + this.azureBlobStorage.blockBlobs().stageBlockFromURLWithRestResponseAsync(null, null, + base64BlockID, 0, sourceURL, sourceRange.toHeaderValue(), sourceContentMD5, null, + null, null, null, null, + leaseAccessConditions, sourceModifiedAccessConditions, context)); + } + + /** + * Returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter. + * For more information, see the + * Azure Docs. + * + * @param listType + * Specifies which type of blocks to return. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blocks "Sample code for BlockBlobAsyncRawClient.listBlocks")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono listBlocks(BlockListType listType) { + return this.listBlocks(listType, null, null); + } + + /** + * Returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter. + * For more information, see the + * Azure Docs. + * + * @param listType + * Specifies which type of blocks to return. + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blocks "Sample code for BlockBlobAsyncRawClient.listBlocks")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono listBlocks(BlockListType listType, + LeaseAccessConditions leaseAccessConditions, Context context) { + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.blockBlobs().getBlockListWithRestResponseAsync( + null, null, listType, null, null, null, null, + leaseAccessConditions, context)); + } + + /** + * Writes a blob by specifying the list of block IDs that are to make up the blob. + * In order to be written as part of a blob, a block must have been successfully written + * to the server in a prior stageBlock operation. You can call commitBlockList to update a blob + * by uploading only those blocks that have changed, then committing the new and existing + * blocks together. Any blocks not specified in the block list and permanently deleted. + * For more information, see the + * Azure Docs. + *

+ * For more efficient bulk-upload scenarios, please refer to the {@link TransferManager} for convenience methods. + * + * @param base64BlockIDs + * A list of base64 encode {@code String}s that specifies the block IDs to be committed. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blocks "Sample code for BlockBlobAsyncRawClient.commitBlockList")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono commitBlockList(List base64BlockIDs) { + return this.commitBlockList(base64BlockIDs, null, null, null, null); + } + + /** + * Writes a blob by specifying the list of block IDs that are to make up the blob. + * In order to be written as part of a blob, a block must have been successfully written + * to the server in a prior stageBlock operation. You can call commitBlockList to update a blob + * by uploading only those blocks that have changed, then committing the new and existing + * blocks together. Any blocks not specified in the block list and permanently deleted. + * For more information, see the + * Azure Docs. + *

+ * For more efficient bulk-upload scenarios, please refer to the {@link TransferManager} for convenience methods. + * + * @param base64BlockIDs + * A list of base64 encode {@code String}s that specifies the block IDs to be committed. + * @param headers + * {@link BlobHTTPHeaders} + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blocks "Sample code for BlockBlobAsyncRawClient.commitBlockList")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono commitBlockList(List base64BlockIDs, + BlobHTTPHeaders headers, Metadata metadata, BlobAccessConditions accessConditions, Context context) { + metadata = metadata == null ? new Metadata() : metadata; + accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.blockBlobs().commitBlockListWithRestResponseAsync( + null, null, new BlockLookupList().latest(base64BlockIDs), null, metadata, + null, null, null, null, headers, + accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), context)); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/BlockBlobClient.java b/storage/client/src/main/java/com/azure/storage/blob/BlockBlobClient.java new file mode 100644 index 0000000000000..9d2c6088deb73 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/BlockBlobClient.java @@ -0,0 +1,398 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.util.Context; +import com.azure.storage.blob.implementation.AzureBlobStorageImpl; +import com.azure.storage.blob.models.BlobHTTPHeaders; +import com.azure.storage.blob.models.BlockItem; +import com.azure.storage.blob.models.BlockListType; +import com.azure.storage.blob.models.LeaseAccessConditions; +import com.azure.storage.blob.models.SourceModifiedAccessConditions; +import io.netty.buffer.Unpooled; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.io.IOException; +import java.io.InputStream; +import java.io.UncheckedIOException; +import java.net.URL; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.util.List; + +/** + * Client to a block blob. It may only be instantiated through a {@link BlockBlobClientBuilder}, via + * the method {@link BlobClient#asBlockBlobClient()}, or via the method + * {@link ContainerClient#getBlockBlobClient(String)}. This class does not hold + * any state about a particular blob, but is instead a convenient way of sending appropriate + * requests to the resource on the service. + * + *

+ * This client contains operations on a blob. Operations on a container are available on {@link ContainerClient}, + * and operations on the service are available on {@link StorageClient}. + * + *

+ * Please refer to the Azure Docs + * for more information. + */ +public final class BlockBlobClient extends BlobClient { + + private BlockBlobAsyncClient blockBlobAsyncClient; + /** + * Indicates the maximum number of bytes that can be sent in a call to upload. + */ + public static final int MAX_UPLOAD_BLOB_BYTES = 256 * Constants.MB; + + /** + * Indicates the maximum number of bytes that can be sent in a call to stageBlock. + */ + public static final int MAX_STAGE_BLOCK_BYTES = 100 * Constants.MB; + + /** + * Indicates the maximum number of blocks allowed in a block blob. + */ + public static final int MAX_BLOCKS = 50000; + + /** + * Package-private constructor for use by {@link BlockBlobClientBuilder}. + * @param azureBlobStorage the API client for blob storage API + */ + BlockBlobClient(AzureBlobStorageImpl azureBlobStorage) { + super(azureBlobStorage); + this.blockBlobAsyncClient = new BlockBlobAsyncClient(azureBlobStorage); + } + + /** + * Static method for getting a new builder for this class. + * + * @return + * A new {@link BlockBlobClientBuilder} instance. + */ + public static BlockBlobClientBuilder blockBlobClientBuilder() { + return new BlockBlobClientBuilder(); + } + + /** + * Creates a new block blob, or updates the content of an existing block blob. + * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not + * supported with PutBlob; the content of the existing blob is overwritten with the new content. To + * perform a partial update of a block blob's, use PutBlock and PutBlockList. + * For more information, see the + * Azure Docs. + * + * @param data + * The data to write to the blob. + * @param length + * The exact length of the data. It is important that this value match precisely the length of the data + * provided in the {@link InputStream}. + * + * @return + * The information of the uploaded block blob. + */ + public void upload(InputStream data, long length) throws IOException { + this.upload(data, length, null, null, null, null, null); + } + + /** + * Creates a new block blob, or updates the content of an existing block blob. + * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not + * supported with PutBlob; the content of the existing blob is overwritten with the new content. To + * perform a partial update of a block blob's, use PutBlock and PutBlockList. + * For more information, see the + * Azure Docs. + * + * @param data + * The data to write to the blob. + * @param length + * The exact length of the data. It is important that this value match precisely the length of the data + * provided in the {@link InputStream}. + * @param headers + * {@link BlobHTTPHeaders} + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link BlobAccessConditions} + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * The information of the uploaded block blob. + */ + public void upload(InputStream data, long length, BlobHTTPHeaders headers, + Metadata metadata, BlobAccessConditions accessConditions, Duration timeout, Context context) throws IOException { + + // buffer strategy for UX study only + byte[] bufferedData = new byte[(int)length]; + data.read(bufferedData); + + Mono upload = blockBlobAsyncClient + .upload(Flux.just(ByteBuffer.wrap(bufferedData)), length, headers, metadata, accessConditions, context); + + try { + if (timeout == null) { + upload.block(); + } else { + upload.block(timeout); + } + } + catch (UncheckedIOException e) { + throw e.getCause(); + } + } + + public void uploadFromFile(String filePath) throws IOException { + this.uploadFromFile(filePath, null, null, null, null); + } + + public void uploadFromFile(String filePath, BlobHTTPHeaders headers, Metadata metadata, + BlobAccessConditions accessConditions, Duration timeout) throws IOException { + Mono upload = this.blockBlobAsyncClient.uploadFromFile(filePath, headers, metadata, accessConditions, null); + + try { + if (timeout == null) { + upload.block(); + } else { + upload.block(timeout); + } + } + catch (UncheckedIOException e) { + throw e.getCause(); + } + } + + /** + * Uploads the specified block to the block blob's "staging area" to be later committed by a call to + * commitBlockList. For more information, see the + * Azure Docs. + * + * @param base64BlockID + * A Base64 encoded {@code String} that specifies the ID for this block. Note that all block ids for a given + * blob must be the same length. + * @param data + * The data to write to the block. + * @param length + * The exact length of the data. It is important that this value match precisely the length of the data + * provided in the {@link InputStream}. + */ + public void stageBlock(String base64BlockID, InputStream data, long length) throws IOException { + this.stageBlock(base64BlockID, data, length, null, null, null); + } + + /** + * Uploads the specified block to the block blob's "staging area" to be later committed by a call to + * commitBlockList. For more information, see the + * Azure Docs. + * + * @param base64BlockID + * A Base64 encoded {@code String} that specifies the ID for this block. Note that all block ids for a given + * blob must be the same length. + * @param data + * The data to write to the block. + * @param length + * The exact length of the data. It is important that this value match precisely the length of the data + * provided in the {@link InputStream}. + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + */ + public void stageBlock(String base64BlockID, InputStream data, long length, + LeaseAccessConditions leaseAccessConditions, Duration timeout, Context context) throws IOException { + + // buffer strategy for UX study only + byte[] bufferedData = new byte[(int)length]; + data.read(bufferedData); + + Mono response = blockBlobAsyncClient.stageBlock(base64BlockID, + Flux.just(Unpooled.wrappedBuffer(bufferedData)), length, leaseAccessConditions, context); + if (timeout == null) { + response.block(); + } else { + response.block(timeout); + } + } + + /** + * Creates a new block to be committed as part of a blob where the contents are read from a URL. For more + * information, see the Azure Docs. + * + * @param base64BlockID + * A Base64 encoded {@code String} that specifies the ID for this block. Note that all block ids for a given + * blob must be the same length. + * @param sourceURL + * The url to the blob that will be the source of the copy. A source blob in the same storage account can be + * authenticated via Shared Key. However, if the source is a blob in another account, the source blob must + * either be public or must be authenticated via a shared access signature. If the source blob is public, no + * authentication is required to perform the operation. + * @param sourceRange + * {@link BlobRange} + */ + public void stageBlockFromURL(String base64BlockID, URL sourceURL, + BlobRange sourceRange) { + this.stageBlockFromURL(base64BlockID, sourceURL, sourceRange, null, + null, null, null, null); + } + + /** + * Creates a new block to be committed as part of a blob where the contents are read from a URL. For more + * information, see the Azure Docs. + * + * @param base64BlockID + * A Base64 encoded {@code String} that specifies the ID for this block. Note that all block ids for a given + * blob must be the same length. + * @param sourceURL + * The url to the blob that will be the source of the copy. A source blob in the same storage account can + * be authenticated via Shared Key. However, if the source is a blob in another account, the source blob + * must either be public or must be authenticated via a shared access signature. If the source blob is + * public, no authentication is required to perform the operation. + * @param sourceRange + * {@link BlobRange} + * @param sourceContentMD5 + * An MD5 hash of the block content from the source blob. If specified, the service will calculate the MD5 + * of the received data and fail the request if it does not match the provided MD5. + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param sourceModifiedAccessConditions + * {@link SourceModifiedAccessConditions} + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + */ + public void stageBlockFromURL(String base64BlockID, URL sourceURL, + BlobRange sourceRange, byte[] sourceContentMD5, LeaseAccessConditions leaseAccessConditions, + SourceModifiedAccessConditions sourceModifiedAccessConditions, Duration timeout, Context context) { + Mono response = blockBlobAsyncClient.stageBlockFromURL(base64BlockID, sourceURL, sourceRange, sourceContentMD5, leaseAccessConditions, sourceModifiedAccessConditions, context); + if (timeout == null) { + response.block(); + } else { + response.block(timeout); + } + } + + /** + * Returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter. + * For more information, see the + * Azure Docs. + * + * @param listType + * Specifies which type of blocks to return. + * + * @return + * The list of blocks. + */ + public Iterable listBlocks(BlockListType listType) { + return this.listBlocks(listType, null, null, null); + } + + /** + * + * Returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter. + * For more information, see the + * Azure Docs. + * + * @param listType + * Specifies which type of blocks to return. + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * The list of blocks. + */ + public Iterable listBlocks(BlockListType listType, + LeaseAccessConditions leaseAccessConditions, Duration timeout, Context context) { + Flux response = blockBlobAsyncClient.listBlocks(listType, leaseAccessConditions, context); + + return timeout == null? + response.toIterable(): + response.timeout(timeout).toIterable(); + } + + /** + * Writes a blob by specifying the list of block IDs that are to make up the blob. + * In order to be written as part of a blob, a block must have been successfully written + * to the server in a prior stageBlock operation. You can call commitBlockList to update a blob + * by uploading only those blocks that have changed, then committing the new and existing + * blocks together. Any blocks not specified in the block list and permanently deleted. + * For more information, see the + * Azure Docs. + * + * @param base64BlockIDs + * A list of base64 encode {@code String}s that specifies the block IDs to be committed. + * + * @return + * The information of the block blob. + */ + public void commitBlockList(List base64BlockIDs) { + this.commitBlockList(base64BlockIDs, null, null, null, null, null); + } + + /** + * Writes a blob by specifying the list of block IDs that are to make up the blob. + * In order to be written as part of a blob, a block must have been successfully written + * to the server in a prior stageBlock operation. You can call commitBlockList to update a blob + * by uploading only those blocks that have changed, then committing the new and existing + * blocks together. Any blocks not specified in the block list and permanently deleted. + * For more information, see the + * Azure Docs. + * + * @param base64BlockIDs + * A list of base64 encode {@code String}s that specifies the block IDs to be committed. + * @param headers + * {@link BlobHTTPHeaders} + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link BlobAccessConditions} + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * The information of the block blob. + */ + public void commitBlockList(List base64BlockIDs, + BlobHTTPHeaders headers, Metadata metadata, BlobAccessConditions accessConditions, Duration timeout, Context context) { + Mono response = blockBlobAsyncClient.commitBlockList(base64BlockIDs, headers, metadata, accessConditions, context); + + if (timeout == null) { + response.block(); + } else { + response.block(timeout); + } + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/BlockBlobClientBuilder.java b/storage/client/src/main/java/com/azure/storage/blob/BlockBlobClientBuilder.java new file mode 100644 index 0000000000000..5e9798253d854 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/BlockBlobClientBuilder.java @@ -0,0 +1,220 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.configuration.Configuration; +import com.azure.core.http.HttpClient; +import com.azure.core.http.HttpPipeline; +import com.azure.core.http.policy.AddDatePolicy; +import com.azure.core.http.policy.HttpLogDetailLevel; +import com.azure.core.http.policy.HttpLoggingPolicy; +import com.azure.core.http.policy.HttpPipelinePolicy; +import com.azure.core.http.policy.RequestIdPolicy; +import com.azure.core.http.policy.RetryPolicy; +import com.azure.core.http.policy.UserAgentPolicy; +import com.azure.core.implementation.util.ImplUtils; +import com.azure.storage.blob.implementation.AzureBlobStorageBuilder; +import com.azure.storage.blob.implementation.AzureBlobStorageImpl; + +import java.net.MalformedURLException; +import java.net.URL; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * Fluent BlockBlobClientBuilder for instantiating a {@link BlockBlobClient} or {@link BlockBlobAsyncClient}. + * + *

+ * An instance of this builder may only be created from static method {@link BlockBlobClient#blockBlobClientBuilder()}. + * The following information must be provided on this builder: + * + *

+ * + *

+ * Once all the configurations are set on this builder, call {@code .buildClient()} to create a + * {@link BlockBlobClient} or {@code .buildAsyncClient()} to create a {@link BlockBlobAsyncClient}. + */ +public final class BlockBlobClientBuilder { + private static final String ACCOUNT_NAME = "AccountName".toLowerCase(); + private static final String ACCOUNT_KEY = "AccountKey".toLowerCase(); + + private final List policies; + + private URL endpoint; + private ICredentials credentials = new AnonymousCredentials(); + private HttpClient httpClient; + private HttpLogDetailLevel logLevel; + private RetryPolicy retryPolicy; + private Configuration configuration; + + public BlockBlobClientBuilder() { + retryPolicy = new RetryPolicy(); + logLevel = HttpLogDetailLevel.NONE; + policies = new ArrayList<>(); + } + + /** + * Constructs an instance of BlockBlobAsyncClient based on the configurations stored in the appendBlobClientBuilder. + * @return a new client instance + */ + private AzureBlobStorageImpl buildImpl() { + Objects.requireNonNull(endpoint); + + // Closest to API goes first, closest to wire goes last. + final List policies = new ArrayList<>(); + + policies.add(new UserAgentPolicy(BlobConfiguration.NAME, BlobConfiguration.VERSION)); + policies.add(new RequestIdPolicy()); + policies.add(new AddDatePolicy()); + policies.add(credentials); // This needs to be a different credential type. + + policies.add(retryPolicy); + + policies.addAll(this.policies); + policies.add(new HttpLoggingPolicy(logLevel)); + + HttpPipeline pipeline = HttpPipeline.builder() + .policies(policies.toArray(new HttpPipelinePolicy[0])) + .httpClient(httpClient) + .build(); + + return new AzureBlobStorageBuilder() + .url(endpoint.toString()) + .pipeline(pipeline) + .build(); + } + + /** + * @return a {@link BlockBlobClient} created from the configurations in this builder. + */ + public BlockBlobClient buildClient() { + return new BlockBlobClient(buildImpl()); + } + + /** + * @return a {@link BlockBlobAsyncClient} created from the configurations in this builder. + */ + public BlockBlobAsyncClient buildAsyncClient() { + return new BlockBlobAsyncClient(buildImpl()); + } + + /** + * Sets the service endpoint, additionally parses it for information (SAS token, container name) + * @param endpoint URL of the service + * @return the updated BlockBlobClientBuilder object + */ + public BlockBlobClientBuilder endpoint(String endpoint) { + Objects.requireNonNull(endpoint); + try { + this.endpoint = new URL(endpoint); + } catch (MalformedURLException ex) { + throw new IllegalArgumentException("The Azure Storage Queue endpoint url is malformed."); + } + + return this; + } + + /** + * Sets the credentials used to authorize requests sent to the service + * @param credentials authorization credentials + * @return the updated BlockBlobClientBuilder object + */ + public BlockBlobClientBuilder credentials(SharedKeyCredentials credentials) { + this.credentials = credentials; + return this; + } + + /** + * Sets the credentials used to authorize requests sent to the service + * @param credentials authorization credentials + * @return the updated BlockBlobClientBuilder object + */ + public BlockBlobClientBuilder credentials(TokenCredentials credentials) { + this.credentials = credentials; + return this; + } + + /** + * Clears the credentials used to authorize requests sent to the service + * @return the updated BlockBlobClientBuilder object + */ + public BlockBlobClientBuilder anonymousCredentials() { + this.credentials = new AnonymousCredentials(); + return this; + } + + /** + * Sets the connection string for the service, parses it for authentication information (account name, account key) + * @param connectionString connection string from access keys section + * @return the updated BlockBlobClientBuilder object + */ + public BlockBlobClientBuilder connectionString(String connectionString) { + Objects.requireNonNull(connectionString); + + Map connectionKVPs = new HashMap<>(); + for (String s : connectionString.split(";")) { + String[] kvp = s.split("=", 2); + connectionKVPs.put(kvp[0].toLowerCase(), kvp[1]); + } + + String accountName = connectionKVPs.get(ACCOUNT_NAME); + String accountKey = connectionKVPs.get(ACCOUNT_KEY); + + if (ImplUtils.isNullOrEmpty(accountName) || ImplUtils.isNullOrEmpty(accountKey)) { + throw new IllegalArgumentException("Connection string must contain 'AccountName' and 'AccountKey'."); + } + + // Use accountName and accountKey to get the SAS token using the credential class. + credentials = new SharedKeyCredentials(accountName, accountKey); + + return this; + } + + /** + * Sets the http client used to send service requests + * @param httpClient http client to send requests + * @return the updated BlockBlobClientBuilder object + */ + public BlockBlobClientBuilder httpClient(HttpClient httpClient) { + this.httpClient = httpClient; + return this; + } + + /** + * Adds a pipeline policy to apply on each request sent + * @param pipelinePolicy a pipeline policy + * @return the updated BlockBlobClientBuilder object + */ + public BlockBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { + this.policies.add(pipelinePolicy); + return this; + } + + /** + * Sets the logging level for service requests + * @param logLevel logging level + * @return the updated BlockBlobClientBuilder object + */ + public BlockBlobClientBuilder httpLogDetailLevel(HttpLogDetailLevel logLevel) { + this.logLevel = logLevel; + return this; + } + + /** + * Sets the configuration object used to retrieve environment configuration values used to buildClient the client with + * when they are not set in the appendBlobClientBuilder, defaults to Configuration.NONE + * @param configuration configuration store + * @return the updated BlockBlobClientBuilder object + */ + public BlockBlobClientBuilder configuration(Configuration configuration) { + this.configuration = configuration; + return this; + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/BlockBlobRawClient.java b/storage/client/src/main/java/com/azure/storage/blob/BlockBlobRawClient.java new file mode 100644 index 0000000000000..07260ffa53f63 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/BlockBlobRawClient.java @@ -0,0 +1,379 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.http.HttpPipeline; +import com.azure.core.util.Context; +import com.azure.storage.blob.implementation.AzureBlobStorageImpl; +import com.azure.storage.blob.models.*; +import io.netty.buffer.ByteBuf; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.net.URL; +import java.time.Duration; +import java.util.List; + +/** + * Represents a URL to a block blob. It may be obtained by direct construction or via the create method on a + * {@link ContainerAsyncClient} object. This class does not hold any state about a particular blob but is instead a convenient + * way of sending off appropriate requests to the resource on the service. Please refer to the + * Azure Docs + * for more information on block blobs. + */ +final class BlockBlobRawClient extends BlobAsyncRawClient { + + private BlockBlobAsyncRawClient blockBlobAsyncRawClient; + /** + * Indicates the maximum number of bytes that can be sent in a call to upload. + */ + public static final int MAX_UPLOAD_BLOB_BYTES = 256 * Constants.MB; + + /** + * Indicates the maximum number of bytes that can be sent in a call to stageBlock. + */ + public static final int MAX_STAGE_BLOCK_BYTES = 100 * Constants.MB; + + /** + * Indicates the maximum number of blocks allowed in a block blob. + */ + public static final int MAX_BLOCKS = 50000; + + /** + * Creates a {@code BlockBlobAsyncRawClient} object pointing to the account specified by the URL and using the provided + */ + BlockBlobRawClient(AzureBlobStorageImpl azureBlobStorage) { + super(azureBlobStorage); + this.blockBlobAsyncRawClient = new BlockBlobAsyncRawClient(azureBlobStorage); + } + + /** + * Creates a new block blob, or updates the content of an existing block blob. + * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not + * supported with PutBlob; the content of the existing blob is overwritten with the new content. To + * perform a partial update of a block blob's, use PutBlock and PutBlockList. + * For more information, see the + * Azure Docs. + *

+ * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flux} must produce the same data each time it is subscribed to. + *

+ * For more efficient bulk-upload scenarios, please refer to the {@link TransferManager} for convenience methods. + * + * @param data + * The data to write to the blob. Note that this {@code Flux} must be replayable if retries are enabled + * (the default). In other words, the Flowable must produce the same data each time it is subscribed to. + * @param length + * The exact length of the data. It is important that this value match precisely the length of the data + * emitted by the {@code Flux}. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=upload_download "Sample code for BlockBlobAsyncRawClient.upload")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlockBlobsUploadResponse upload(Flux data, long length) { + return this.upload(data, length, null, null, null, null, null); + } + + /** + * Creates a new block blob, or updates the content of an existing block blob. + * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not + * supported with PutBlob; the content of the existing blob is overwritten with the new content. To + * perform a partial update of a block blob's, use PutBlock and PutBlockList. + * For more information, see the + * Azure Docs. + *

+ * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flux} must produce the same data each time it is subscribed to. + *

+ * For more efficient bulk-upload scenarios, please refer to the {@link TransferManager} for convenience methods. + * + * @param data + * The data to write to the blob. Note that this {@code Flux} must be replayable if retries are enabled + * (the default). In other words, the Flowable must produce the same data each time it is subscribed to. + * @param length + * The exact length of the data. It is important that this value match precisely the length of the data + * emitted by the {@code Flux}. + * @param headers + * {@link BlobHTTPHeaders} + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=upload_download "Sample code for BlockBlobAsyncRawClient.upload")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlockBlobsUploadResponse upload(Flux data, long length, BlobHTTPHeaders headers, + Metadata metadata, BlobAccessConditions accessConditions, Duration timeout, Context context) { + Mono response = blockBlobAsyncRawClient.upload(data, length, headers, metadata, accessConditions, context); + return timeout == null? + response.block(): + response.block(timeout); + } + + /** + * Uploads the specified block to the block blob's "staging area" to be later committed by a call to + * commitBlockList. For more information, see the + * Azure Docs. + *

+ * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flux} must produce the same data each time it is subscribed to. + * + * @param base64BlockID + * A Base64 encoded {@code String} that specifies the ID for this block. Note that all block ids for a given + * blob must be the same length. + * @param data + * The data to write to the block. Note that this {@code Flux} must be replayable if retries are enabled + * (the default). In other words, the Flowable must produce the same data each time it is subscribed to. + * @param length + * The exact length of the data. It is important that this value match precisely the length of the data + * emitted by the {@code Flux}. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blocks "Sample code for BlockBlobAsyncRawClient.stageBlock")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlockBlobsStageBlockResponse stageBlock(String base64BlockID, Flux data, long length) { + return this.stageBlock(base64BlockID, data, length, null, null, null); + } + + /** + * Uploads the specified block to the block blob's "staging area" to be later committed by a call to + * commitBlockList. For more information, see the + * Azure Docs. + *

+ * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flux} must produce the same data each time it is subscribed to. + * + * @param base64BlockID + * A Base64 encoded {@code String} that specifies the ID for this block. Note that all block ids for a given + * blob must be the same length. + * @param data + * The data to write to the block. Note that this {@code Flux} must be replayable if retries are enabled + * (the default). In other words, the Flowable must produce the same data each time it is subscribed to. + * @param length + * The exact length of the data. It is important that this value match precisely the length of the data + * emitted by the {@code Flux}. + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blocks "Sample code for BlockBlobAsyncRawClient.stageBlock")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlockBlobsStageBlockResponse stageBlock(String base64BlockID, Flux data, long length, + LeaseAccessConditions leaseAccessConditions, Duration timeout, Context context) { + Mono response = blockBlobAsyncRawClient.stageBlock(base64BlockID, data, length, leaseAccessConditions, context); + return timeout == null? + response.block(): + response.block(timeout); + } + + /** + * Creates a new block to be committed as part of a blob where the contents are read from a URL. For more + * information, see the Azure Docs. + * + * @param base64BlockID + * A Base64 encoded {@code String} that specifies the ID for this block. Note that all block ids for a given + * blob must be the same length. + * @param sourceURL + * The url to the blob that will be the source of the copy. A source blob in the same storage account can be + * authenticated via Shared Key. However, if the source is a blob in another account, the source blob must + * either be public or must be authenticated via a shared access signature. If the source blob is public, no + * authentication is required to perform the operation. + * @param sourceRange + * {@link BlobRange} + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=block_from_url "Sample code for BlockBlobAsyncRawClient.stageBlockFromURL")] + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlockBlobsStageBlockFromURLResponse stageBlockFromURL(String base64BlockID, URL sourceURL, + BlobRange sourceRange) { + return this.stageBlockFromURL(base64BlockID, sourceURL, sourceRange, null, + null, null, null, null); + } + + /** + * Creates a new block to be committed as part of a blob where the contents are read from a URL. For more + * information, see the Azure Docs. + * + * @param base64BlockID + * A Base64 encoded {@code String} that specifies the ID for this block. Note that all block ids for a given + * blob must be the same length. + * @param sourceURL + * The url to the blob that will be the source of the copy. A source blob in the same storage account can + * be authenticated via Shared Key. However, if the source is a blob in another account, the source blob + * must either be public or must be authenticated via a shared access signature. If the source blob is + * public, no authentication is required to perform the operation. + * @param sourceRange + * {@link BlobRange} + * @param sourceContentMD5 + * An MD5 hash of the block content from the source blob. If specified, the service will calculate the MD5 + * of the received data and fail the request if it does not match the provided MD5. + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param sourceModifiedAccessConditions + * {@link SourceModifiedAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=block_from_url "Sample code for BlockBlobAsyncRawClient.stageBlockFromURL")] + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlockBlobsStageBlockFromURLResponse stageBlockFromURL(String base64BlockID, URL sourceURL, + BlobRange sourceRange, byte[] sourceContentMD5, LeaseAccessConditions leaseAccessConditions, + SourceModifiedAccessConditions sourceModifiedAccessConditions, Duration timeout, Context context) { + Mono response = blockBlobAsyncRawClient.stageBlockFromURL(base64BlockID, sourceURL, sourceRange, sourceContentMD5, leaseAccessConditions, sourceModifiedAccessConditions, context); + return timeout == null? + response.block(): + response.block(timeout); + } + + /** + * Returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter. + * For more information, see the + * Azure Docs. + * + * @param listType + * Specifies which type of blocks to return. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blocks "Sample code for BlockBlobAsyncRawClient.listBlocks")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlockBlobsGetBlockListResponse getBlockList(BlockListType listType) { + return this.getBlockList(listType, null, null, null); + } + + /** + * Returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter. + * For more information, see the + * Azure Docs. + * + * @param listType + * Specifies which type of blocks to return. + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blocks "Sample code for BlockBlobAsyncRawClient.listBlocks")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlockBlobsGetBlockListResponse getBlockList(BlockListType listType, + LeaseAccessConditions leaseAccessConditions, Duration timeout, Context context) { + Mono response = blockBlobAsyncRawClient.listBlocks(listType, leaseAccessConditions, context); + return timeout == null? + response.block(): + response.block(timeout); + } + + /** + * Writes a blob by specifying the list of block IDs that are to make up the blob. + * In order to be written as part of a blob, a block must have been successfully written + * to the server in a prior stageBlock operation. You can call commitBlockList to update a blob + * by uploading only those blocks that have changed, then committing the new and existing + * blocks together. Any blocks not specified in the block list and permanently deleted. + * For more information, see the + * Azure Docs. + *

+ * For more efficient bulk-upload scenarios, please refer to the {@link TransferManager} for convenience methods. + * + * @param base64BlockIDs + * A list of base64 encode {@code String}s that specifies the block IDs to be committed. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blocks "Sample code for BlockBlobAsyncRawClient.commitBlockList")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlockBlobsCommitBlockListResponse commitBlockList(List base64BlockIDs) { + return this.commitBlockList(base64BlockIDs, null, null, null, null, null); + } + + /** + * Writes a blob by specifying the list of block IDs that are to make up the blob. + * In order to be written as part of a blob, a block must have been successfully written + * to the server in a prior stageBlock operation. You can call commitBlockList to update a blob + * by uploading only those blocks that have changed, then committing the new and existing + * blocks together. Any blocks not specified in the block list and permanently deleted. + * For more information, see the + * Azure Docs. + *

+ * For more efficient bulk-upload scenarios, please refer to the {@link TransferManager} for convenience methods. + * + * @param base64BlockIDs + * A list of base64 encode {@code String}s that specifies the block IDs to be committed. + * @param headers + * {@link BlobHTTPHeaders} + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blocks "Sample code for BlockBlobAsyncRawClient.commitBlockList")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public BlockBlobsCommitBlockListResponse commitBlockList(List base64BlockIDs, + BlobHTTPHeaders headers, Metadata metadata, BlobAccessConditions accessConditions, Duration timeout, Context context) { + Mono response = blockBlobAsyncRawClient.commitBlockList(base64BlockIDs, headers, metadata, accessConditions, context); + return timeout == null? + response.block(): + response.block(timeout); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/CommonRestResponse.java b/storage/client/src/main/java/com/azure/storage/blob/CommonRestResponse.java new file mode 100644 index 0000000000000..38a68dbd41b6e --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/CommonRestResponse.java @@ -0,0 +1,110 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.http.rest.Response; +import com.azure.storage.blob.models.BlockBlobsCommitBlockListResponse; +import com.azure.storage.blob.models.BlockBlobsUploadResponse; + +import java.time.OffsetDateTime; + +/** + * A generic wrapper for any type of blob REST API response. Used and returned by methods in the {@link TransferManager} + * class. The methods there return this type because they represent composite operations which may conclude with any of + * several possible REST calls depending on the data provided. + */ +final class CommonRestResponse { + + private BlockBlobsUploadResponse uploadBlobResponse; + + private BlockBlobsCommitBlockListResponse commitBlockListResponse; + + private CommonRestResponse() { + uploadBlobResponse = null; + commitBlockListResponse = null; + } + + static CommonRestResponse createFromPutBlobResponse(BlockBlobsUploadResponse response) { + CommonRestResponse commonRestResponse = new CommonRestResponse(); + commonRestResponse.uploadBlobResponse = response; + return commonRestResponse; + } + + static CommonRestResponse createFromPutBlockListResponse(BlockBlobsCommitBlockListResponse response) { + CommonRestResponse commonRestResponse = new CommonRestResponse(); + commonRestResponse.commitBlockListResponse = response; + return commonRestResponse; + } + + /** + * @return The status code for the response + */ + public int statusCode() { + if (uploadBlobResponse != null) { + return uploadBlobResponse.statusCode(); + } + return commitBlockListResponse.statusCode(); + } + + /** + * @return An HTTP Etag for the blob at the time of the request. + */ + public String eTag() { + if (uploadBlobResponse != null) { + return uploadBlobResponse.deserializedHeaders().eTag(); + } + return commitBlockListResponse.deserializedHeaders().eTag(); + } + + /** + * @return The time when the blob was last modified. + */ + public OffsetDateTime lastModified() { + if (uploadBlobResponse != null) { + return uploadBlobResponse.deserializedHeaders().lastModified(); + } + return commitBlockListResponse.deserializedHeaders().lastModified(); + } + + /** + * @return The id of the service request for which this is the response. + */ + public String requestId() { + if (uploadBlobResponse != null) { + return uploadBlobResponse.deserializedHeaders().requestId(); + } + return commitBlockListResponse.deserializedHeaders().requestId(); + } + + /** + * @return The date of the response. + */ + public OffsetDateTime date() { + if (uploadBlobResponse != null) { + return uploadBlobResponse.deserializedHeaders().dateProperty(); + } + return commitBlockListResponse.deserializedHeaders().dateProperty(); + } + + /** + * @return The service version responding to the request. + */ + public String version() { + if (uploadBlobResponse != null) { + return uploadBlobResponse.deserializedHeaders().version(); + } + return commitBlockListResponse.deserializedHeaders().version(); + } + + /** + * @return The underlying response. + */ + public Response response() { + if (uploadBlobResponse != null) { + return uploadBlobResponse; + } + return commitBlockListResponse; + } + +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/Constants.java b/storage/client/src/main/java/com/azure/storage/blob/Constants.java new file mode 100644 index 0000000000000..a7b0d3e145c3f --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/Constants.java @@ -0,0 +1,299 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +/** + * RESERVED FOR INTERNAL USE. Contains storage constants. + */ +final class Constants { + + /** + * The master Microsoft Azure Storage header prefix. + */ + static final String PREFIX_FOR_STORAGE_HEADER = "x-ms-"; + /** + * Constant representing a kilobyte (Non-SI version). + */ + static final int KB = 1024; + /** + * Constant representing a megabyte (Non-SI version). + */ + static final int MB = 1024 * KB; + /** + * An empty {@code String} to use for comparison. + */ + static final String EMPTY_STRING = ""; + /** + * Specifies HTTP. + */ + static final String HTTP = "http"; + /** + * Specifies HTTPS. + */ + static final String HTTPS = "https"; + /** + * Specifies both HTTPS and HTTP. + */ + static final String HTTPS_HTTP = "https,http"; + /** + * The default type for content-type and accept. + */ + static final String UTF8_CHARSET = "UTF-8"; + /** + * The query parameter for snapshots. + */ + static final String SNAPSHOT_QUERY_PARAMETER = "snapshot"; + /** + * The word redacted. + */ + static final String REDACTED = "REDACTED"; + /** + * The default amount of parallelism for TransferManager operations. + */ + // We chose this to match Go, which followed AWS' default. + static final int TRANSFER_MANAGER_DEFAULT_PARALLELISM = 5; + + /** + * Private Default Ctor + */ + private Constants() { + // Private to prevent construction. + } + + /** + * Defines constants for use with HTTP headers. + */ + static final class HeaderConstants { + /** + * The Authorization header. + */ + static final String AUTHORIZATION = "Authorization"; + + /** + * The format string for specifying ranges with only begin offset. + */ + static final String BEGIN_RANGE_HEADER_FORMAT = "bytes=%d-"; + + /** + * The header that indicates the client request ID. + */ + static final String CLIENT_REQUEST_ID_HEADER = PREFIX_FOR_STORAGE_HEADER + "client-request-id"; + + /** + * The ContentEncoding header. + */ + static final String CONTENT_ENCODING = "Content-Encoding"; + + /** + * The ContentLangauge header. + */ + static final String CONTENT_LANGUAGE = "Content-Language"; + + /** + * The ContentLength header. + */ + static final String CONTENT_LENGTH = "Content-Length"; + + /** + * The ContentMD5 header. + */ + static final String CONTENT_MD5 = "Content-MD5"; + + /** + * The ContentType header. + */ + static final String CONTENT_TYPE = "Content-Type"; + + /** + * The header that specifies the date. + */ + static final String DATE = PREFIX_FOR_STORAGE_HEADER + "date"; + + /** + * The header that specifies the error code on unsuccessful responses. + */ + static final String ERROR_CODE = PREFIX_FOR_STORAGE_HEADER + "error-code"; + + /** + * The IfMatch header. + */ + static final String IF_MATCH = "If-Match"; + + /** + * The IfModifiedSince header. + */ + static final String IF_MODIFIED_SINCE = "If-Modified-Since"; + + /** + * The IfNoneMatch header. + */ + static final String IF_NONE_MATCH = "If-None-Match"; + + /** + * The IfUnmodifiedSince header. + */ + static final String IF_UNMODIFIED_SINCE = "If-Unmodified-Since"; + + /** + * The Range header. + */ + static final String RANGE = "Range"; + + /** + * The format string for specifying ranges. + */ + static final String RANGE_HEADER_FORMAT = "bytes=%d-%d"; + + /** + * The copy source header. + */ + static final String COPY_SOURCE = "x-ms-copy-source"; + + /** + * The version header. + */ + static final String VERSION = "x-ms-version"; + + /** + * The current storage version header value. + */ + static final String TARGET_STORAGE_VERSION = "2018-11-09"; + + /** + * The UserAgent header. + */ + static final String USER_AGENT = "User-Agent"; + + /** + * Specifies the value to use for UserAgent header. + */ + static final String USER_AGENT_PREFIX = "Azure-Storage"; + + /** + * Specifies the value to use for UserAgent header. + */ + static final String USER_AGENT_VERSION = "11.0.1"; + + private HeaderConstants() { + // Private to prevent construction. + } + } + + static final class UrlConstants { + + /** + * The SAS service version parameter. + */ + static final String SAS_SERVICE_VERSION = "sv"; + + /** + * The SAS services parameter. + */ + static final String SAS_SERVICES = "ss"; + + /** + * The SAS resource types parameter. + */ + static final String SAS_RESOURCES_TYPES = "srt"; + + /** + * The SAS protocol parameter. + */ + static final String SAS_PROTOCOL = "spr"; + + /** + * The SAS start time parameter. + */ + static final String SAS_START_TIME = "st"; + + /** + * The SAS expiration time parameter. + */ + static final String SAS_EXPIRY_TIME = "se"; + + /** + * The SAS IP range parameter. + */ + static final String SAS_IP_RANGE = "sip"; + + /** + * The SAS signed identifier parameter. + */ + static final String SAS_SIGNED_IDENTIFIER = "si"; + + /** + * The SAS signed resource parameter. + */ + static final String SAS_SIGNED_RESOURCE = "sr"; + + /** + * The SAS signed permissions parameter. + */ + static final String SAS_SIGNED_PERMISSIONS = "sp"; + + /** + * The SAS signature parameter. + */ + static final String SAS_SIGNATURE = "sig"; + + /** + * The SAS cache control parameter. + */ + static final String SAS_CACHE_CONTROL = "rscc"; + + /** + * The SAS content disposition parameter. + */ + static final String SAS_CONTENT_DISPOSITION = "rscd"; + + /** + * The SAS content encoding parameter. + */ + static final String SAS_CONTENT_ENCODING = "rsce"; + + /** + * The SAS content language parameter. + */ + static final String SAS_CONTENT_LANGUAGE = "rscl"; + + /** + * The SAS content type parameter. + */ + static final String SAS_CONTENT_TYPE = "rsct"; + + /** + * The SAS signed object id parameter for user delegation SAS. + */ + public static final String SAS_SIGNED_OBJECT_ID = "skoid"; + + /** + * The SAS signed tenant id parameter for user delegation SAS. + */ + public static final String SAS_SIGNED_TENANT_ID = "sktid"; + + /** + * The SAS signed key-start parameter for user delegation SAS. + */ + public static final String SAS_SIGNED_KEY_START = "skt"; + + /** + * The SAS signed key-expiry parameter for user delegation SAS. + */ + public static final String SAS_SIGNED_KEY_EXPIRY = "ske"; + + /** + * The SAS signed service parameter for user delegation SAS. + */ + public static final String SAS_SIGNED_KEY_SERVICE = "sks"; + + /** + * The SAS signed version parameter for user delegation SAS. + */ + public static final String SAS_SIGNED_KEY_VERSION = "skv"; + + private UrlConstants() { + // Private to prevent construction. + } + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/ContainerAccessConditions.java b/storage/client/src/main/java/com/azure/storage/blob/ContainerAccessConditions.java new file mode 100644 index 0000000000000..f2b9ca1c04f5e --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/ContainerAccessConditions.java @@ -0,0 +1,64 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.storage.blob.models.LeaseAccessConditions; +import com.azure.storage.blob.models.ModifiedAccessConditions; + +/** + * This class contains values which will restrict the successful operation of a variety of requests to the conditions + * present. These conditions are entirely optional. The entire object or any of its properties may be set to null when + * passed to a method to indicate that those conditions are not desired. Please refer to the type of each field for more + * information on those particular access conditions. + */ +public final class ContainerAccessConditions { + + private ModifiedAccessConditions modifiedAccessConditions; + + private LeaseAccessConditions leaseAccessConditions; + + /** + * Creates an instance which has fields set to non-null, empty values. + */ + public ContainerAccessConditions() { + this.modifiedAccessConditions = new ModifiedAccessConditions(); + this.leaseAccessConditions = new LeaseAccessConditions(); + } + + /** + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used to + * construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + */ + public ModifiedAccessConditions modifiedAccessConditions() { + return modifiedAccessConditions; + } + + /** + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used to + * construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + */ + public ContainerAccessConditions withModifiedAccessConditions(ModifiedAccessConditions modifiedAccessConditions) { + this.modifiedAccessConditions = modifiedAccessConditions; + return this; + } + + /** + * By setting lease access conditions, requests will fail if the provided lease does not match the active lease on + * the blob. + */ + public LeaseAccessConditions leaseAccessConditions() { + return leaseAccessConditions; + } + + /** + * By setting lease access conditions, requests will fail if the provided lease does not match the active lease on + * the blob. + */ + public ContainerAccessConditions withLeaseAccessConditions(LeaseAccessConditions leaseID) { + this.leaseAccessConditions = leaseID; + return this; + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/ContainerAsyncClient.java b/storage/client/src/main/java/com/azure/storage/blob/ContainerAsyncClient.java new file mode 100644 index 0000000000000..447501a3666cb --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/ContainerAsyncClient.java @@ -0,0 +1,759 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.http.rest.ResponseBase; +import com.azure.core.util.Context; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.ContainerGetAccessPolicyHeaders; +import com.azure.storage.blob.models.ContainersListBlobFlatSegmentResponse; +import com.azure.storage.blob.models.LeaseAccessConditions; +import com.azure.storage.blob.models.ModifiedAccessConditions; +import com.azure.storage.blob.models.PublicAccessType; +import com.azure.storage.blob.models.SignedIdentifier; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.net.MalformedURLException; +import java.net.URL; +import java.util.List; + +/** + * Client to a container. It may only be instantiated through a {@link ContainerClientBuilder} or via the method + * {@link StorageAsyncClient#getContainerAsyncClient(String)}. This class does not hold any + * state about a particular blob but is instead a convenient way of sending off appropriate requests to + * the resource on the service. It may also be used to construct URLs to blobs. + * + *

+ * This client contains operations on a container. Operations on a blob are available on {@link BlobAsyncClient} through + * {@link #getBlobAsyncClient(String)}, and operations on the service are available on {@link StorageAsyncClient}. + * + *

+ * Please refer to the Azure Docs + * for more information on containers. + * + *

+ * Note this client is an async client that returns reactive responses from Spring Reactor Core + * project (https://projectreactor.io/). Calling the methods in this client will NOT + * start the actual network operation, until {@code .subscribe()} is called on the reactive response. + * You can simply convert one of these responses to a {@link java.util.concurrent.CompletableFuture} + * object through {@link Mono#toFuture()}. + */ +public final class ContainerAsyncClient { + + ContainerAsyncRawClient containerAsyncRawClient; + private ContainerClientBuilder builder; + + public static final String ROOT_CONTAINER_NAME = "$root"; + + public static final String STATIC_WEBSITE_CONTAINER_NAME = "$web"; + + public static final String LOG_CONTAINER_NAME = "$logs"; + + /** + * Package-private constructor for use by {@link ContainerClientBuilder}. + * @param builder the container client builder + */ + ContainerAsyncClient(ContainerClientBuilder builder) { + this.builder = builder; + this.containerAsyncRawClient = new ContainerAsyncRawClient(builder.buildImpl()); + } + + /** + * @return a new client {@link ContainerClientBuilder} instance. + */ + public static ContainerClientBuilder containerClientBuilder() { + return new ContainerClientBuilder(); + } + + /** + * Creates a new {@link BlockBlobAsyncClient} object by concatenating the blobName to the end of + * ContainerAsyncClient's URL. The new BlockBlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. + * To change the pipeline, create the BlockBlobAsyncClient and then call its WithPipeline method passing in the + * desired pipeline object. Or, call this package's NewBlockBlobAsyncClient instead of calling this object's + * NewBlockBlobAsyncClient method. + * + * @param blobName + * A {@code String} representing the name of the blob. + * + * @return A new {@link BlockBlobAsyncClient} object which references the blob with the specified name in this container. + */ + public BlockBlobAsyncClient getBlockBlobAsyncClient(String blobName) { + try { + return new BlockBlobAsyncClient(this.builder.copyBuilder().endpoint(Utility.appendToURLPath(new URL(builder.endpoint()), blobName).toString()).buildImpl()); + } catch (MalformedURLException e) { + throw new RuntimeException(e); + } + } + + /** + * Creates creates a new PageBlobAsyncClient object by concatenating blobName to the end of + * ContainerAsyncClient's URL. The new PageBlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. + * To change the pipeline, create the PageBlobAsyncClient and then call its WithPipeline method passing in the + * desired pipeline object. Or, call this package's NewPageBlobAsyncClient instead of calling this object's + * NewPageBlobAsyncClient method. + * + * @param blobName + * A {@code String} representing the name of the blob. + * + * @return A new {@link PageBlobAsyncClient} object which references the blob with the specified name in this container. + */ + public PageBlobAsyncClient getPageBlobAsyncClient(String blobName) { + try { + return new PageBlobAsyncClient(this.builder.copyBuilder().endpoint(Utility.appendToURLPath(new URL(builder.endpoint()), blobName).toString()).buildImpl()); + } catch (MalformedURLException e) { + throw new RuntimeException(e); + } + } + + /** + * Creates creates a new AppendBlobAsyncClient object by concatenating blobName to the end of + * ContainerAsyncClient's URL. The new AppendBlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. + * To change the pipeline, create the AppendBlobAsyncClient and then call its WithPipeline method passing in the + * desired pipeline object. Or, call this package's NewAppendBlobAsyncClient instead of calling this object's + * NewAppendBlobAsyncClient method. + * + * @param blobName + * A {@code String} representing the name of the blob. + * + * @return A new {@link AppendBlobAsyncClient} object which references the blob with the specified name in this container. + */ + public AppendBlobAsyncClient getAppendBlobAsyncClient(String blobName) { + try { + return new AppendBlobAsyncClient(this.builder.copyBuilder().endpoint(Utility.appendToURLPath(new URL(builder.endpoint()), blobName).toString()).buildImpl()); + } catch (MalformedURLException e) { + throw new RuntimeException(e); + } + } + + /** + * Creates a new BlobAsyncClient object by concatenating blobName to the end of + * ContainerAsyncClient's URL. The new BlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient. + * To change the pipeline, create the BlobAsyncClient and then call its WithPipeline method passing in the + * desired pipeline object. Or, call this package's getBlobAsyncClient instead of calling this object's + * getBlobAsyncClient method. + * + * @param blobName + * A {@code String} representing the name of the blob. + * + * @return A new {@link BlobAsyncClient} object which references the blob with the specified name in this container. + */ + public BlobAsyncClient getBlobAsyncClient(String blobName) { + try { + return new BlobAsyncClient(this.builder.copyBuilder().endpoint(Utility.appendToURLPath(new URL(builder.endpoint()), blobName).toString()).buildImpl()); + } catch (MalformedURLException e) { + throw new RuntimeException(e); + } + } + + /** + * Creates a new container within a storage account. If a container with the same name already exists, the operation + * fails. For more information, see the + * Azure Docs. + * + * @return + * A reactive response signalling completion. + */ + public Mono create() { + return this.create(null, null, null); + } + + /** + * Creates a new container within a storage account. If a container with the same name already exists, the operation + * fails. For more information, see the + * Azure Docs. + * + * @param metadata + * {@link Metadata} + * @param accessType + * Specifies how the data in this container is available to the public. See the x-ms-blob-public-access header + * in the Azure Docs for more information. Pass null for no public access. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return + * A reactive response signalling completion. + */ + public Mono create(Metadata metadata, PublicAccessType accessType, Context context) { + return containerAsyncRawClient + .create(metadata, accessType, context) + .then(); + } + + /** + * Marks the specified container for deletion. The container and any blobs contained within it are later + * deleted during garbage collection. For more information, see the + * Azure Docs. + * + * @return + * A reactive response signalling completion. + */ + public Mono delete() { + return this.delete(null, null); + } + + /** + * Marks the specified container for deletion. The container and any blobs contained within it are later + * deleted during garbage collection. For more information, see the + * Azure Docs. + * + * @param accessConditions + * {@link ContainerAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return + * A reactive response signalling completion. + */ + public Mono delete(ContainerAccessConditions accessConditions, Context context) { + return containerAsyncRawClient + .delete(accessConditions, context) + .then(); + } + + /** + * Returns the container's metadata and system properties. For more information, see the + * Azure Docs. + * + * @return + * A reactive response containing the container properties. + */ + public Mono getProperties() { + return this.getProperties(null, null); + } + + /** + * Returns the container's metadata and system properties. For more information, see the + * Azure Docs. + * + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return + * A reactive response containing the container properties. + */ + public Mono getProperties(LeaseAccessConditions leaseAccessConditions, + Context context) { + return containerAsyncRawClient + .getProperties(leaseAccessConditions, context) + .map(ResponseBase::deserializedHeaders) + .map(ContainerProperties::new); + } + + /** + * Sets the container's metadata. For more information, see the + * Azure Docs. + * + * @param metadata + * {@link Metadata} + * + * @return + * A reactive response signalling completion. + */ + public Mono setMetadata(Metadata metadata) { + return this.setMetadata(metadata, null, null); + } + + /** + * Sets the container's metadata. For more information, see the + * Azure Docs. + * + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link ContainerAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return + * A reactive response signalling completion. + */ + public Mono setMetadata(Metadata metadata, + ContainerAccessConditions accessConditions, Context context) { + return containerAsyncRawClient + .setMetadata(metadata, accessConditions, context) + .then(); + } + + /** + * Returns the container's permissions. The permissions indicate whether container's blobs may be accessed publicly. + * For more information, see the + * Azure Docs. + * + * @return + * A reactive response containing the container access policy. + */ + public Mono getAccessPolicy() { + return this.getAccessPolicy(null, null); + } + + /** + * Returns the container's permissions. The permissions indicate whether container's blobs may be accessed publicly. + * For more information, see the + * Azure Docs. + * + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return + * A reactive response containing the container access policy. + */ + public Mono getAccessPolicy(LeaseAccessConditions leaseAccessConditions, + Context context) { + return containerAsyncRawClient + .getAccessPolicy(leaseAccessConditions, context) + .map(ResponseBase::deserializedHeaders) + .map(ContainerGetAccessPolicyHeaders::blobPublicAccess); + } + + /** + * Sets the container's permissions. The permissions indicate whether blobs in a container may be accessed publicly. + * Note that, for each signed identifier, we will truncate the start and expiry times to the nearest second to + * ensure the time formatting is compatible with the service. For more information, see the + * Azure Docs. + * + * @param accessType + * Specifies how the data in this container is available to the public. See the x-ms-blob-public-access header + * in the Azure Docs for more information. Pass null for no public access. + * @param identifiers + * A list of {@link SignedIdentifier} objects that specify the permissions for the container. Please see + * here + * for more information. Passing null will clear all access policies. + * + * @return + * A reactive response signalling completion. + */ + public Mono setAccessPolicy(PublicAccessType accessType, + List identifiers) { + return this.setAccessPolicy(accessType, identifiers, null, null); + } + + /** + * Sets the container's permissions. The permissions indicate whether blobs in a container may be accessed publicly. + * Note that, for each signed identifier, we will truncate the start and expiry times to the nearest second to + * ensure the time formatting is compatible with the service. For more information, see the + * Azure Docs. + * + * @param accessType + * Specifies how the data in this container is available to the public. See the x-ms-blob-public-access header + * in the Azure Docs for more information. Pass null for no public access. + * @param identifiers + * A list of {@link SignedIdentifier} objects that specify the permissions for the container. Please see + * here + * for more information. Passing null will clear all access policies. + * @param accessConditions + * {@link ContainerAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return + * A reactive response signalling completion. + */ + public Mono setAccessPolicy(PublicAccessType accessType, + List identifiers, ContainerAccessConditions accessConditions, Context context) { + return containerAsyncRawClient + .setAccessPolicy(accessType, identifiers, accessConditions, context) + .then(); + } + + // TODO: figure out if this is meant to stay private or change to public + private boolean validateNoEtag(ModifiedAccessConditions modifiedAccessConditions) { + if (modifiedAccessConditions == null) { + return true; + } + return modifiedAccessConditions.ifMatch() == null && modifiedAccessConditions.ifNoneMatch() == null; + } + + + /** + * Returns a reactive Publisher emitting all the blobs in this container lazily as needed. + * + *

+ * Blob names are returned in lexicographic order. For more information, see the + * Azure Docs. + * + * @return + * A reactive response emitting the flattened blobs. + */ + public Flux listBlobsFlat() { + return this.listBlobsFlat(new ListBlobsOptions(), null); + } + + /** + * Returns a reactive Publisher emitting all the blobs in this container lazily as needed. + * + *

+ * Blob names are returned in lexicographic order. For more information, see the + * Azure Docs. + * + * @param options + * {@link ListBlobsOptions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return + * A reactive response emitting the listed blobs, flattened. + */ + public Flux listBlobsFlat(ListBlobsOptions options, Context context) { + return containerAsyncRawClient + .listBlobsFlatSegment(null, options, context) + .flatMapMany(response -> listBlobsFlatHelper(response.value().marker(), options, context, response)); + } + + private Flux listBlobsFlatHelper(String marker, ListBlobsOptions options, + Context context, ContainersListBlobFlatSegmentResponse response){ + Flux result = Flux.fromIterable(response.value().segment().blobItems()); + + if (response.value().nextMarker() != null) { + // Recursively add the continuation items to the observable. + result = result.concatWith(containerAsyncRawClient.listBlobsFlatSegment(marker, options, + context) + .flatMapMany((r) -> + listBlobsFlatHelper(response.value().nextMarker(), options, context, r))); + } + + return result; + } + + /** + * Returns a single segment of blobs and blob prefixes starting from the specified Marker. Use an empty + * marker to start enumeration from the beginning. Blob names are returned in lexicographic order. + * After getting a segment, process it, and then call ListBlobs again (passing the the previously-returned + * Marker) to get the next segment. For more information, see the + * Azure Docs. + * + * @param marker + * Identifies the portion of the list to be returned with the next list operation. + * This value is returned in the response of a previous list operation as the + * ListBlobsHierarchySegmentResponse.body().nextMarker(). Set to null to list the first segment. + * @param delimiter + * The operation returns a BlobPrefix element in the response body that acts as a placeholder for all blobs + * whose names begin with the same substring up to the appearance of the delimiter character. The delimiter may + * be a single character or a string. + * @param options + * {@link ListBlobsOptions} + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_hierarchy "Sample code for ContainerAsyncClient.listBlobsHierarchySegment")] \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_hierarchy_helper "helper code for ContainerAsyncClient.listBlobsHierarchySegment")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ +// public Flux listBlobsHierarchySegment(String marker, String delimiter, +// ListBlobsOptions options) { +// return this.listBlobsHierarchySegment(marker, delimiter, options, null); +// } + + /** + * Returns a single segment of blobs and blob prefixes starting from the specified Marker. Use an empty + * marker to start enumeration from the beginning. Blob names are returned in lexicographic order. + * After getting a segment, process it, and then call ListBlobs again (passing the the previously-returned + * Marker) to get the next segment. For more information, see the + * Azure Docs. + * + * @param marker + * Identifies the portion of the list to be returned with the next list operation. + * This value is returned in the response of a previous list operation as the + * ListBlobsHierarchySegmentResponse.body().nextMarker(). Set to null to list the first segment. + * @param delimiter + * The operation returns a BlobPrefix element in the response body that acts as a placeholder for all blobs + * whose names begin with the same substring up to the appearance of the delimiter character. The delimiter may + * be a single character or a string. + * @param options + * {@link ListBlobsOptions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_hierarchy "Sample code for ContainerAsyncClient.listBlobsHierarchySegment")] \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_hierarchy_helper "helper code for ContainerAsyncClient.listBlobsHierarchySegment")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ +// public Flux listBlobsHierarchySegment(String marker, String delimiter, +// ListBlobsOptions options, Context context) { +// return containerAsyncRawClient +// .listBlobsHierarchySegment(null, delimiter, options, context) +// .flatMapMany(); +// } + + /** + * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 + * seconds, or infinite (-1). + * + * @param proposedId + * A {@code String} in any valid GUID format. May be null. + * @param duration + * The duration of the lease, in seconds, or negative one (-1) for a lease that + * never expires. A non-infinite lease can be between 15 and 60 seconds. + * + * @return + * A reactive response containing the lease ID. + */ + public Mono acquireLease(String proposedId, int duration) { + return this.acquireLease(proposedId, duration, null, null); + } + + /** + * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 + * seconds, or infinite (-1). + * + * @param proposedID + * A {@code String} in any valid GUID format. May be null. + * @param duration + * The duration of the lease, in seconds, or negative one (-1) for a lease that + * never expires. A non-infinite lease can be between 15 and 60 seconds. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response containing the lease ID. + */ + public Mono acquireLease(String proposedID, int duration, ModifiedAccessConditions modifiedAccessConditions, + Context context) { + return containerAsyncRawClient + .acquireLease(proposedID, duration, modifiedAccessConditions, context) + .map(response -> response.deserializedHeaders().leaseId()); + } + + /** + * Renews the blob's previously-acquired lease. + * + * @param leaseID + * The leaseId of the active lease on the blob. + * + * @return + * A reactive response containing the renewed lease ID. + */ + public Mono renewLease(String leaseID) { + return this.renewLease(leaseID, null, null); + } + + /** + * Renews the blob's previously-acquired lease. + * + * @param leaseID + * The leaseId of the active lease on the blob. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response containing the renewed lease ID. + */ + public Mono renewLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions, Context context) { + return containerAsyncRawClient + .renewLease(leaseID, modifiedAccessConditions, context) + .map(response -> response.deserializedHeaders().leaseId()); + } + + /** + * Releases the blob's previously-acquired lease. + * + * @param leaseID + * The leaseId of the active lease on the blob. + * + * @return + * A reactive response signalling completion. + */ + public Mono releaseLease(String leaseID) { + return this.releaseLease(leaseID, null, null); + } + + /** + * Releases the blob's previously-acquired lease. + * + * @param leaseID + * The leaseId of the active lease on the blob. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response signalling completion. + */ + public Mono releaseLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions, Context context) { + return containerAsyncRawClient + .releaseLease(leaseID, modifiedAccessConditions, context) + .then(); + } + + /** + * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant + * to break a fixed-duration lease when it expires or an infinite lease immediately. + * + * @return + * A reactive response containing the remaining time in the broken lease in seconds. + */ + public Mono breakLease() { + return this.breakLease(null, null, null); + } + + /** + * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant + * to break a fixed-duration lease when it expires or an infinite lease immediately. + * + * @param breakPeriodInSeconds + * An optional {@code Integer} representing the proposed duration of seconds that the lease should continue + * before it is broken, between 0 and 60 seconds. This break period is only used if it is shorter than the + * time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be + * available before the break period has expired, but the lease may be held for longer than the break + * period. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response containing the remaining time in the broken lease in seconds. + */ + public Mono breakLease(Integer breakPeriodInSeconds, ModifiedAccessConditions modifiedAccessConditions, + Context context) { + return containerAsyncRawClient + .breakLease(breakPeriodInSeconds, modifiedAccessConditions, context) + .map(response -> response.deserializedHeaders().leaseTime()); + } + + /** + * ChangeLease changes the blob's lease ID. + * + * @param leaseId + * The leaseId of the active lease on the blob. + * @param proposedID + * A {@code String} in any valid GUID format. + * + * @return + * A reactive response containing the new lease ID. + */ + public Mono changeLease(String leaseId, String proposedID) { + return this.changeLease(leaseId, proposedID, null, null); + } + + /** + * ChangeLease changes the blob's lease ID. For more information, see the Azure Docs. + * + * @param leaseId + * The leaseId of the active lease on the blob. + * @param proposedID + * A {@code String} in any valid GUID format. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return A reactive response containing the new lease ID. + */ + public Mono changeLease(String leaseId, String proposedID, ModifiedAccessConditions modifiedAccessConditions, + Context context) { + return containerAsyncRawClient + .changeLease(leaseId, proposedID, modifiedAccessConditions, context) + .map(response -> response.deserializedHeaders().leaseId()); + } + + /** + * Returns the sku name and account kind for the account. For more information, please see the + * Azure Docs. + * + * @return + * A reactive response containing the account info. + */ + public Mono getAccountInfo() { + return this.getAccountInfo(null); + } + + /** + * Returns the sku name and account kind for the account. For more information, please see the + * Azure Docs. + * + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return + * A reactive response containing the account info. + */ + public Mono getAccountInfo(Context context) { + return containerAsyncRawClient + .getAccountInfo(context) + .map(ResponseBase::deserializedHeaders) + .map(StorageAccountInfo::new); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/ContainerAsyncRawClient.java b/storage/client/src/main/java/com/azure/storage/blob/ContainerAsyncRawClient.java new file mode 100644 index 0000000000000..ad444cb837227 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/ContainerAsyncRawClient.java @@ -0,0 +1,859 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.http.HttpPipeline; +import com.azure.core.util.Context; +import com.azure.storage.blob.implementation.AzureBlobStorageImpl; +import com.azure.storage.blob.models.ContainersAcquireLeaseResponse; +import com.azure.storage.blob.models.ContainersBreakLeaseResponse; +import com.azure.storage.blob.models.ContainersChangeLeaseResponse; +import com.azure.storage.blob.models.ContainersCreateResponse; +import com.azure.storage.blob.models.ContainersDeleteResponse; +import com.azure.storage.blob.models.ContainersGetAccessPolicyResponse; +import com.azure.storage.blob.models.ContainersGetAccountInfoResponse; +import com.azure.storage.blob.models.ContainersGetPropertiesResponse; +import com.azure.storage.blob.models.ContainersListBlobFlatSegmentResponse; +import com.azure.storage.blob.models.ContainersListBlobHierarchySegmentResponse; +import com.azure.storage.blob.models.ContainersReleaseLeaseResponse; +import com.azure.storage.blob.models.ContainersRenewLeaseResponse; +import com.azure.storage.blob.models.ContainersSetAccessPolicyResponse; +import com.azure.storage.blob.models.ContainersSetMetadataResponse; +import com.azure.storage.blob.models.LeaseAccessConditions; +import com.azure.storage.blob.models.ModifiedAccessConditions; +import com.azure.storage.blob.models.PublicAccessType; +import com.azure.storage.blob.models.SignedIdentifier; +import reactor.core.publisher.Mono; + +import java.time.temporal.ChronoUnit; +import java.util.List; + +import static com.azure.storage.blob.Utility.postProcessResponse; + +/** + * Represents a URL to a container. It may be obtained by direct construction or via the create method on a + * {@link StorageAsyncRawClient} object. This class does not hold any state about a particular blob but is instead a convenient way + * of sending off appropriate requests to the resource on the service. It may also be used to construct URLs to blobs. + * Please refer to the + * Azure Docs + * for more information on containers. + */ +final class ContainerAsyncRawClient { + + public static final String ROOT_CONTAINER_NAME = "$root"; + + public static final String STATIC_WEBSITE_CONTAINER_NAME = "$web"; + + public static final String LOG_CONTAINER_NAME = "$logs"; + + AzureBlobStorageImpl azureBlobStorage; + + /** + * Creates a {@code ContainerAsyncClient} object pointing to the account specified by the URL and using the provided + * pipeline to make HTTP requests. + */ + ContainerAsyncRawClient(AzureBlobStorageImpl azureBlobStorage) { + this.azureBlobStorage = azureBlobStorage; + } + + /** + * Creates a new container within a storage account. If a container with the same name already exists, the operation + * fails. For more information, see the + * Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_basic "Sample code for ContainerAsyncClient.create")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono create() { + return this.create(null, null, null); + } + + /** + * Creates a new container within a storage account. If a container with the same name already exists, the operation + * fails. For more information, see the + * Azure Docs. + * + * @param metadata + * {@link Metadata} + * @param accessType + * Specifies how the data in this container is available to the public. See the x-ms-blob-public-access header + * in the Azure Docs for more information. Pass null for no public access. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_basic "Sample code for ContainerAsyncClient.create")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono create(Metadata metadata, PublicAccessType accessType, Context context) { + metadata = metadata == null ? new Metadata() : metadata; + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.containers().createWithRestResponseAsync( + null, null, metadata, accessType, null, context)); + + } + + /** + * Marks the specified container for deletion. The container and any blobs contained within it are later + * deleted during garbage collection. For more information, see the + * Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_basic "Sample code for ContainerAsyncClient.delete")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono delete() { + return this.delete(null, null); + } + + /** + * Marks the specified container for deletion. The container and any blobs contained within it are later + * deleted during garbage collection. For more information, see the + * Azure Docs. + * + * @param accessConditions + * {@link ContainerAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_basic "Sample code for ContainerAsyncClient.delete")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono delete(ContainerAccessConditions accessConditions, Context context) { + accessConditions = accessConditions == null ? new ContainerAccessConditions() : accessConditions; + context = context == null ? Context.NONE : context; + + if (!validateNoEtag(accessConditions.modifiedAccessConditions())) { + // Throwing is preferred to Single.error because this will error out immediately instead of waiting until + // subscription. + throw new UnsupportedOperationException("ETag access conditions are not supported for this API."); + } + + return postProcessResponse(this.azureBlobStorage.containers() + .deleteWithRestResponseAsync(null, null, null, + accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), context)); + } + + /** + * Returns the container's metadata and system properties. For more information, see the + * Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_basic "Sample code for ContainerAsyncClient.getProperties")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono getProperties() { + return this.getProperties(null, null); + } + + /** + * Returns the container's metadata and system properties. For more information, see the + * Azure Docs. + * + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_basic "Sample code for ContainerAsyncClient.getProperties")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono getProperties(LeaseAccessConditions leaseAccessConditions, + Context context) { + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.containers() + .getPropertiesWithRestResponseAsync(null, null, null, + leaseAccessConditions, context)); + } + + /** + * Sets the container's metadata. For more information, see the + * Azure Docs. + * + * @param metadata + * {@link Metadata} + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_basic "Sample code for ContainerAsyncClient.setMetadata")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono setMetadata(Metadata metadata) { + return this.setMetadata(metadata, null, null); + } + + /** + * Sets the container's metadata. For more information, see the + * Azure Docs. + * + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link ContainerAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_basic "Sample code for ContainerAsyncClient.setMetadata")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono setMetadata(Metadata metadata, + ContainerAccessConditions accessConditions, Context context) { + metadata = metadata == null ? new Metadata() : metadata; + accessConditions = accessConditions == null ? new ContainerAccessConditions() : accessConditions; + context = context == null ? Context.NONE : context; + if (!validateNoEtag(accessConditions.modifiedAccessConditions()) + || accessConditions.modifiedAccessConditions().ifUnmodifiedSince() != null) { + // Throwing is preferred to Single.error because this will error out immediately instead of waiting until + // subscription. + throw new UnsupportedOperationException( + "If-Modified-Since is the only HTTP access condition supported for this API"); + } + + return postProcessResponse(this.azureBlobStorage.containers() + .setMetadataWithRestResponseAsync(null, null, metadata, null, + accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), context)); + } + + /** + * Returns the container's permissions. The permissions indicate whether container's blobs may be accessed publicly. + * For more information, see the + * Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_policy "Sample code for ContainerAsyncClient.getAccessPolicy")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono getAccessPolicy() { + return this.getAccessPolicy(null, null); + } + + /** + * Returns the container's permissions. The permissions indicate whether container's blobs may be accessed publicly. + * For more information, see the + * Azure Docs. + * + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_policy "Sample code for ContainerAsyncClient.getAccessPolicy")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono getAccessPolicy(LeaseAccessConditions leaseAccessConditions, + Context context) { + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.containers().getAccessPolicyWithRestResponseAsync( + null, null, null, leaseAccessConditions, context)); + } + + /** + * Sets the container's permissions. The permissions indicate whether blobs in a container may be accessed publicly. + * Note that, for each signed identifier, we will truncate the start and expiry times to the nearest second to + * ensure the time formatting is compatible with the service. For more information, see the + * Azure Docs. + * + * @param accessType + * Specifies how the data in this container is available to the public. See the x-ms-blob-public-access header + * in the Azure Docs for more information. Pass null for no public access. + * @param identifiers + * A list of {@link SignedIdentifier} objects that specify the permissions for the container. Please see + * here + * for more information. Passing null will clear all access policies. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_policy "Sample code for ContainerAsyncClient.setAccessPolicy")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono setAccessPolicy(PublicAccessType accessType, + List identifiers) { + return this.setAccessPolicy(accessType, identifiers, null, null); + } + + /** + * Sets the container's permissions. The permissions indicate whether blobs in a container may be accessed publicly. + * Note that, for each signed identifier, we will truncate the start and expiry times to the nearest second to + * ensure the time formatting is compatible with the service. For more information, see the + * Azure Docs. + * + * @param accessType + * Specifies how the data in this container is available to the public. See the x-ms-blob-public-access header + * in the Azure Docs for more information. Pass null for no public access. + * @param identifiers + * A list of {@link SignedIdentifier} objects that specify the permissions for the container. Please see + * here + * for more information. Passing null will clear all access policies. + * @param accessConditions + * {@link ContainerAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_policy "Sample code for ContainerAsyncClient.setAccessPolicy")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono setAccessPolicy(PublicAccessType accessType, + List identifiers, ContainerAccessConditions accessConditions, Context context) { + accessConditions = accessConditions == null ? new ContainerAccessConditions() : accessConditions; + context = context == null ? Context.NONE : context; + + if (!validateNoEtag(accessConditions.modifiedAccessConditions())) { + // Throwing is preferred to Single.error because this will error out immediately instead of waiting until + // subscription. + throw new UnsupportedOperationException("ETag access conditions are not supported for this API."); + } + + /* + We truncate to seconds because the service only supports nanoseconds or seconds, but doing an + OffsetDateTime.now will only give back milliseconds (more precise fields are zeroed and not serialized). This + allows for proper serialization with no real detriment to users as sub-second precision on active time for + signed identifiers is not really necessary. + */ + if (identifiers != null) { + for (SignedIdentifier identifier : identifiers) { + if (identifier.accessPolicy() != null && identifier.accessPolicy().start() != null) { + identifier.accessPolicy().start( + identifier.accessPolicy().start().truncatedTo(ChronoUnit.SECONDS)); + } + if (identifier.accessPolicy() != null && identifier.accessPolicy().expiry() != null) { + identifier.accessPolicy().expiry( + identifier.accessPolicy().expiry().truncatedTo(ChronoUnit.SECONDS)); + } + } + } + + return postProcessResponse(this.azureBlobStorage.containers() + .setAccessPolicyWithRestResponseAsync(null, identifiers, null, accessType, + null, accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), + context)); + + } + + private boolean validateNoEtag(ModifiedAccessConditions modifiedAccessConditions) { + if (modifiedAccessConditions == null) { + return true; + } + return modifiedAccessConditions.ifMatch() == null && modifiedAccessConditions.ifNoneMatch() == null; + } + + /** + * Acquires a lease on the container for delete operations. The lease duration must be between 15 to + * 60 seconds, or infinite (-1). For more information, see the + * Azure Docs. + * + * @apiNote + * ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerAsyncClient.acquireLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/New-Storage-SDK-V10-Preview/src/test/java/com/microsoft/azure/storage/Samples.java) + * + * @param proposedId + * A {@code String} in any valid GUID format. + * @param duration + * The duration of the lease, in seconds, or negative one (-1) for a lease that never expires. + * A non-infinite lease can be between 15 and 60 seconds. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerAsyncClient.acquireLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono acquireLease(String proposedId, int duration) { + return this.acquireLease(proposedId, duration, null, null); + } + + /** + * Acquires a lease on the container for delete operations. The lease duration must be between 15 to + * 60 seconds, or infinite (-1). For more information, see the + * Azure Docs. + * + * @param proposedID + * A {@code String} in any valid GUID format. + * @param duration + * The duration of the lease, in seconds, or negative one (-1) for a lease that never expires. + * A non-infinite lease can be between 15 and 60 seconds. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerAsyncClient.acquireLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono acquireLease(String proposedID, int duration, + ModifiedAccessConditions modifiedAccessConditions, Context context) { + if (!this.validateNoEtag(modifiedAccessConditions)) { + // Throwing is preferred to Single.error because this will error out immediately instead of waiting until + // subscription. + throw new UnsupportedOperationException( + "ETag access conditions are not supported for this API."); + } + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.containers().acquireLeaseWithRestResponseAsync( + null, null, duration, proposedID, null, modifiedAccessConditions, context)); + } + + /** + * Renews the container's previously-acquired lease. For more information, see the + * Azure Docs. + * + * @param leaseID + * The leaseId of the active lease on the container. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerAsyncClient.renewLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono renewLease(String leaseID) { + return this.renewLease(leaseID, null, null); + } + + /** + * Renews the container's previously-acquired lease. For more information, see the + * Azure Docs. + * + * @param leaseID + * The leaseId of the active lease on the container. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerAsyncClient.renewLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono renewLease(String leaseID, + ModifiedAccessConditions modifiedAccessConditions, Context context) { + if (!this.validateNoEtag(modifiedAccessConditions)) { + // Throwing is preferred to Single.error because this will error out immediately instead of waiting until + // subscription. + throw new UnsupportedOperationException( + "ETag access conditions are not supported for this API."); + } + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.containers().renewLeaseWithRestResponseAsync(null, + leaseID, null, null, modifiedAccessConditions, context)); + } + + /** + * Releases the container's previously-acquired lease. For more information, see the + * Azure Docs. + * + * @param leaseID + * The leaseId of the active lease on the container. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerAsyncClient.releaseLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono releaseLease(String leaseID) { + return this.releaseLease(leaseID, null, null); + } + + /** + * Releases the container's previously-acquired lease. For more information, see the + * Azure Docs. + * + * @param leaseID + * The leaseId of the active lease on the container. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerAsyncClient.releaseLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono releaseLease(String leaseID, + ModifiedAccessConditions modifiedAccessConditions, Context context) { + if (!this.validateNoEtag(modifiedAccessConditions)) { + // Throwing is preferred to Single.error because this will error out immediately instead of waiting until + // subscription. + throw new UnsupportedOperationException( + "ETag access conditions are not supported for this API."); + } + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.containers().releaseLeaseWithRestResponseAsync( + null, leaseID, null, null, modifiedAccessConditions, context)); + } + + /** + * Breaks the container's previously-acquired lease. For more information, see the + * Azure Docs. + * + * @apiNote + * ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerAsyncClient.breakLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/New-Storage-SDK-V10-Preview/src/test/java/com/microsoft/azure/storage/Samples.java) + * + * @return Emits the successful response. + */ + public Mono breakLease() { + return this.breakLease(null, null, null); + } + + /** + * Breaks the container's previously-acquired lease. For more information, see the + * Azure Docs. + * + * @param breakPeriodInSeconds + * An optional {@code Integer} representing the proposed duration of seconds that the lease should continue + * before it is broken, between 0 and 60 seconds. This break period is only used if it is shorter than the time + * remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be + * available before the break period has expired, but the lease may be held for longer than the break period. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerAsyncClient.breakLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono breakLease(Integer breakPeriodInSeconds, + ModifiedAccessConditions modifiedAccessConditions, Context context) { + if (!this.validateNoEtag(modifiedAccessConditions)) { + // Throwing is preferred to Single.error because this will error out immediately instead of waiting until + // subscription. + throw new UnsupportedOperationException( + "ETag access conditions are not supported for this API."); + } + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.containers().breakLeaseWithRestResponseAsync(null, + null, breakPeriodInSeconds, null, modifiedAccessConditions, context)); + + } + + /** + * Changes the container's leaseAccessConditions. For more information, see the + * Azure Docs. + * + * @param leaseID + * The leaseId of the active lease on the container. + * @param proposedID + * A {@code String} in any valid GUID format. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerAsyncClient.changeLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono changeLease(String leaseID, String proposedID) { + return this.changeLease(leaseID, proposedID, null, null); + } + + /** + * Changes the container's leaseAccessConditions. For more information, see the + * Azure Docs. + * + * @param leaseID + * The leaseId of the active lease on the container. + * @param proposedID + * A {@code String} in any valid GUID format. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerAsyncClient.changeLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono changeLease(String leaseID, String proposedID, + ModifiedAccessConditions modifiedAccessConditions, Context context) { + if (!this.validateNoEtag(modifiedAccessConditions)) { + // Throwing is preferred to Single.error because this will error out immediately instead of waiting until + // subscription. + throw new UnsupportedOperationException( + "ETag access conditions are not supported for this API."); + } + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.containers().changeLeaseWithRestResponseAsync(null, + leaseID, proposedID, null, null, modifiedAccessConditions, context)); + } + + /** + * Returns a single segment of blobs starting from the specified Marker. Use an empty + * marker to start enumeration from the beginning. Blob names are returned in lexicographic order. + * After getting a segment, process it, and then call ListBlobs again (passing the the previously-returned + * Marker) to get the next segment. For more information, see the + * Azure Docs. + * + * @param marker + * Identifies the portion of the list to be returned with the next list operation. + * This value is returned in the response of a previous list operation as the + * ListBlobsFlatSegmentResponse.body().nextMarker(). Set to null to list the first segment. + * @param options + * {@link ListBlobsOptions} + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_flat "Sample code for ContainerAsyncClient.listBlobsFlatSegment")] \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_flat_helper "helper code for ContainerAsyncClient.listBlobsFlatSegment")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono listBlobsFlatSegment(String marker, ListBlobsOptions options) { + return this.listBlobsFlatSegment(marker, options, null); + } + + /** + * Returns a single segment of blobs starting from the specified Marker. Use an empty + * marker to start enumeration from the beginning. Blob names are returned in lexicographic order. + * After getting a segment, process it, and then call ListBlobs again (passing the the previously-returned + * Marker) to get the next segment. For more information, see the + * Azure Docs. + * + * @param marker + * Identifies the portion of the list to be returned with the next list operation. + * This value is returned in the response of a previous list operation as the + * ListBlobsFlatSegmentResponse.body().nextMarker(). Set to null to list the first segment. + * @param options + * {@link ListBlobsOptions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_flat "Sample code for ContainerAsyncClient.listBlobsFlatSegment")] \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_flat_helper "helper code for ContainerAsyncClient.listBlobsFlatSegment")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono listBlobsFlatSegment(String marker, ListBlobsOptions options, + Context context) { + options = options == null ? new ListBlobsOptions() : options; + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.containers() + .listBlobFlatSegmentWithRestResponseAsync(null, options.prefix(), marker, + options.maxResults(), options.details().toList(), null, null, context)); + } + + /** + * Returns a single segment of blobs and blob prefixes starting from the specified Marker. Use an empty + * marker to start enumeration from the beginning. Blob names are returned in lexicographic order. + * After getting a segment, process it, and then call ListBlobs again (passing the the previously-returned + * Marker) to get the next segment. For more information, see the + * Azure Docs. + * + * @param marker + * Identifies the portion of the list to be returned with the next list operation. + * This value is returned in the response of a previous list operation as the + * ListBlobsHierarchySegmentResponse.body().nextMarker(). Set to null to list the first segment. + * @param delimiter + * The operation returns a BlobPrefix element in the response body that acts as a placeholder for all blobs + * whose names begin with the same substring up to the appearance of the delimiter character. The delimiter may + * be a single character or a string. + * @param options + * {@link ListBlobsOptions} + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_hierarchy "Sample code for ContainerAsyncClient.listBlobsHierarchySegment")] \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_hierarchy_helper "helper code for ContainerAsyncClient.listBlobsHierarchySegment")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono listBlobsHierarchySegment(String marker, String delimiter, + ListBlobsOptions options) { + return this.listBlobsHierarchySegment(marker, delimiter, options, null); + } + + /** + * Returns a single segment of blobs and blob prefixes starting from the specified Marker. Use an empty + * marker to start enumeration from the beginning. Blob names are returned in lexicographic order. + * After getting a segment, process it, and then call ListBlobs again (passing the the previously-returned + * Marker) to get the next segment. For more information, see the + * Azure Docs. + * + * @param marker + * Identifies the portion of the list to be returned with the next list operation. + * This value is returned in the response of a previous list operation as the + * ListBlobsHierarchySegmentResponse.body().nextMarker(). Set to null to list the first segment. + * @param delimiter + * The operation returns a BlobPrefix element in the response body that acts as a placeholder for all blobs + * whose names begin with the same substring up to the appearance of the delimiter character. The delimiter may + * be a single character or a string. + * @param options + * {@link ListBlobsOptions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_hierarchy "Sample code for ContainerAsyncClient.listBlobsHierarchySegment")] \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_hierarchy_helper "helper code for ContainerAsyncClient.listBlobsHierarchySegment")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono listBlobsHierarchySegment(String marker, String delimiter, + ListBlobsOptions options, Context context) { + options = options == null ? new ListBlobsOptions() : options; + if (options.details().snapshots()) { + throw new UnsupportedOperationException("Including snapshots in a hierarchical listing is not supported."); + } + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.containers() + .listBlobHierarchySegmentWithRestResponseAsync(null, delimiter, options.prefix(), marker, + options.maxResults(), options.details().toList(), null, null, context)); + } + + /** + * Returns the sku name and account kind for the account. For more information, please see the + * Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=account_info "Sample code for ContainerAsyncClient.getAccountInfo")] \n + * For more samples, please see the [Samples file] (https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono getAccountInfo() { + return this.getAccountInfo(null); + } + + /** + * Returns the sku name and account kind for the account. For more information, please see the + * Azure Docs. + * + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=account_info "Sample code for ContainerAsyncClient.getAccountInfo")] \n + * For more samples, please see the [Samples file] (https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono getAccountInfo(Context context) { + context = context == null ? Context.NONE : context; + + return postProcessResponse( + this.azureBlobStorage.containers().getAccountInfoWithRestResponseAsync(null, context)); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/ContainerClient.java b/storage/client/src/main/java/com/azure/storage/blob/ContainerClient.java new file mode 100644 index 0000000000000..24d96c410d830 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/ContainerClient.java @@ -0,0 +1,746 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.http.HttpPipeline; +import com.azure.core.util.Context; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.LeaseAccessConditions; +import com.azure.storage.blob.models.ModifiedAccessConditions; +import com.azure.storage.blob.models.PublicAccessType; +import com.azure.storage.blob.models.SignedIdentifier; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.net.MalformedURLException; +import java.net.URL; +import java.time.Duration; +import java.util.List; + +/** + * Client to a container. It may only be instantiated through a {@link ContainerClientBuilder} or via the method + * {@link StorageClient#getContainerClient(String)}. This class does not hold any + * state about a particular container but is instead a convenient way of sending off appropriate requests to + * the resource on the service. It may also be used to construct URLs to blobs. + * + *

+ * This client contains operations on a container. Operations on a blob are available on {@link BlobClient} through + * {@link #getBlobClient(String)}, and operations on the service are available on {@link StorageClient}. + * + *

+ * Please refer to the Azure Docs + * for more information on containers. + */ +public final class ContainerClient { + + private ContainerAsyncClient containerAsyncClient; + private ContainerClientBuilder builder; + + public static final String ROOT_CONTAINER_NAME = "$root"; + + public static final String STATIC_WEBSITE_CONTAINER_NAME = "$web"; + + public static final String LOG_CONTAINER_NAME = "$logs"; + + /** + * Package-private constructor for use by {@link ContainerClientBuilder}. + * @param builder the container client builder + */ + ContainerClient(ContainerClientBuilder builder) { + this.builder = builder; + this.containerAsyncClient = new ContainerAsyncClient(builder); + } + + /** + * @return a new client {@link ContainerClientBuilder} instance. + */ + public static ContainerClientBuilder containerClientBuilder() { + return new ContainerClientBuilder(); + } + + /** + * Creates a new {@link BlockBlobClient} object by concatenating the blobName to the end of + * ContainerAsyncClient's URL. The new BlockBlobClient uses the same request policy pipeline as the ContainerAsyncClient. + * To change the pipeline, create the BlockBlobClient and then call its WithPipeline method passing in the + * desired pipeline object. Or, call this package's NewBlockBlobAsyncClient instead of calling this object's + * NewBlockBlobAsyncClient method. + * + * @param blobName + * A {@code String} representing the name of the blob. + * + * @return A new {@link BlockBlobClient} object which references the blob with the specified name in this container. + */ + public BlockBlobClient getBlockBlobClient(String blobName) { + try { + return new BlockBlobClient(this.builder.copyBuilder().endpoint(Utility.appendToURLPath(new URL(builder.endpoint()), blobName).toString()).buildImpl()); + } catch (MalformedURLException e) { + throw new RuntimeException(e); + } + } + + /** + * Creates creates a new PageBlobClient object by concatenating blobName to the end of + * ContainerAsyncClient's URL. The new PageBlobClient uses the same request policy pipeline as the ContainerAsyncClient. + * To change the pipeline, create the PageBlobClient and then call its WithPipeline method passing in the + * desired pipeline object. Or, call this package's NewPageBlobAsyncClient instead of calling this object's + * NewPageBlobAsyncClient method. + * + * @param blobName + * A {@code String} representing the name of the blob. + * + * @return A new {@link PageBlobClient} object which references the blob with the specified name in this container. + */ + public PageBlobClient getPageBlobClient(String blobName) { + try { + return new PageBlobClient(this.builder.copyBuilder().endpoint(Utility.appendToURLPath(new URL(builder.endpoint()), blobName).toString()).buildImpl()); + } catch (MalformedURLException e) { + throw new RuntimeException(e); + } + } + + /** + * Creates creates a new AppendBlobClient object by concatenating blobName to the end of + * ContainerAsyncClient's URL. The new AppendBlobClient uses the same request policy pipeline as the ContainerAsyncClient. + * To change the pipeline, create the AppendBlobClient and then call its WithPipeline method passing in the + * desired pipeline object. Or, call this package's NewAppendBlobAsyncClient instead of calling this object's + * NewAppendBlobAsyncClient method. + * + * @param blobName + * A {@code String} representing the name of the blob. + * + * @return A new {@link AppendBlobClient} object which references the blob with the specified name in this container. + */ + public AppendBlobClient getAppendBlobClient(String blobName) { + try { + return new AppendBlobClient(this.builder.copyBuilder().endpoint(Utility.appendToURLPath(new URL(builder.endpoint()), blobName).toString()).buildImpl()); + } catch (MalformedURLException e) { + throw new RuntimeException(e); + } + } + + /** + * Initializes a new BlobClient object by concatenating blobName to the end of + * ContainerAsyncClient's URL. The new BlobClient uses the same request policy pipeline as the ContainerAsyncClient. + * To change the pipeline, create the BlobClient and then call its WithPipeline method passing in the + * desired pipeline object. Or, call this package's getBlobAsyncClient instead of calling this object's + * getBlobAsyncClient method. + * + * @param blobName + * A {@code String} representing the name of the blob. + * + * @return A new {@link BlobClient} object which references the blob with the specified name in this container. + */ + public BlobClient getBlobClient(String blobName) { + try { + return new BlobClient(this.builder.copyBuilder().endpoint(Utility.appendToURLPath(new URL(builder.endpoint()), blobName).toString()).buildImpl()); + } catch (MalformedURLException e) { + throw new RuntimeException(e); + } + } + + /** + * Creates a new container within a storage account. If a container with the same name already exists, the operation + * fails. For more information, see the + * Azure Docs. + */ + public void create() { + this.create(null, null, null, null); + } + + /** + * Creates a new container within a storage account. If a container with the same name already exists, the operation + * fails. For more information, see the + * Azure Docs. + * + * @param metadata + * {@link Metadata} + * @param accessType + * Specifies how the data in this container is available to the public. See the x-ms-blob-public-access header + * in the Azure Docs for more information. Pass null for no public access. + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + */ + public void create(Metadata metadata, PublicAccessType accessType, Duration timeout, Context context) { + Mono response = containerAsyncClient.create(metadata, accessType, context); + + if (timeout == null) { + response.block(); + } else { + response.block(timeout); + } + } + + /** + * Marks the specified container for deletion. The container and any blobs contained within it are later + * deleted during garbage collection. For more information, see the + * Azure Docs. + */ + public void delete() { + this.delete(null, null, null); + } + + /** + * Marks the specified container for deletion. The container and any blobs contained within it are later + * deleted during garbage collection. For more information, see the + * Azure Docs. + * + * @param accessConditions + * {@link ContainerAccessConditions} + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + */ + public void delete(ContainerAccessConditions accessConditions, Duration timeout, Context context) { + Mono response = containerAsyncClient.delete(accessConditions, context); + + if (timeout == null) { + response.block(); + } else { + response.block(timeout); + } + } + + /** + * Returns the container's metadata and system properties. For more information, see the + * Azure Docs. + * + * @return + * The container properties. + */ + public ContainerProperties getProperties() { + return this.getProperties(null, null, null); + } + + /** + * Returns the container's metadata and system properties. For more information, see the + * Azure Docs. + * + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return + * The container properties. + */ + public ContainerProperties getProperties(LeaseAccessConditions leaseAccessConditions, + Duration timeout, Context context) { + Mono response = containerAsyncClient.getProperties(leaseAccessConditions, context); + + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Sets the container's metadata. For more information, see the + * Azure Docs. + * + * @param metadata + * {@link Metadata} + */ + public void setMetadata(Metadata metadata) { + this.setMetadata(metadata, null, null, null); + } + + /** + * Sets the container's metadata. For more information, see the + * Azure Docs. + * + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link ContainerAccessConditions} + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + */ + public void setMetadata(Metadata metadata, + ContainerAccessConditions accessConditions, Duration timeout, Context context) { + Mono response = containerAsyncClient.setMetadata(metadata, accessConditions, context); + + if (timeout == null) { + response.block(); + } else { + response.block(timeout); + } + } + + /** + * Returns the container's permissions. The permissions indicate whether container's blobs may be accessed publicly. + * For more information, see the + * Azure Docs. + * + * @return + * The container access policy. + */ + public PublicAccessType getAccessPolicy() { + return this.getAccessPolicy(null, null, null); + } + + /** + * Returns the container's permissions. The permissions indicate whether container's blobs may be accessed publicly. + * For more information, see the + * Azure Docs. + * + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return + * The container access policy. + */ + public PublicAccessType getAccessPolicy(LeaseAccessConditions leaseAccessConditions, + Duration timeout, Context context) { + Mono response = containerAsyncClient.getAccessPolicy(leaseAccessConditions, context); + + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Sets the container's permissions. The permissions indicate whether blobs in a container may be accessed publicly. + * Note that, for each signed identifier, we will truncate the start and expiry times to the nearest second to + * ensure the time formatting is compatible with the service. For more information, see the + * Azure Docs. + * + * @param accessType + * Specifies how the data in this container is available to the public. See the x-ms-blob-public-access header + * in the Azure Docs for more information. Pass null for no public access. + * @param identifiers + * A list of {@link SignedIdentifier} objects that specify the permissions for the container. Please see + * here + * for more information. Passing null will clear all access policies. + */ + public void setAccessPolicy(PublicAccessType accessType, + List identifiers) { + this.setAccessPolicy(accessType, identifiers, null, null,null); + } + + /** + * Sets the container's permissions. The permissions indicate whether blobs in a container may be accessed publicly. + * Note that, for each signed identifier, we will truncate the start and expiry times to the nearest second to + * ensure the time formatting is compatible with the service. For more information, see the + * Azure Docs. + * + * @param accessType + * Specifies how the data in this container is available to the public. See the x-ms-blob-public-access header + * in the Azure Docs for more information. Pass null for no public access. + * @param identifiers + * A list of {@link SignedIdentifier} objects that specify the permissions for the container. Please see + * here + * for more information. Passing null will clear all access policies. + * @param accessConditions + * {@link ContainerAccessConditions} + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + */ + public void setAccessPolicy(PublicAccessType accessType, + List identifiers, ContainerAccessConditions accessConditions, + Duration timeout, Context context) { + Mono response = containerAsyncClient.setAccessPolicy(accessType, identifiers, accessConditions, context); + + if (timeout == null) { + response.block(); + } else { + response.block(timeout); + } + } + + /** + * Returns a lazy loaded list of blobs in this container, with folder structures flattened. + * The returned {@link Iterable} can be iterated through while new items are automatically + * retrieved as needed. + * + *

+ * Blob names are returned in lexicographic order. + * + *

+ * For more information, see the + * Azure Docs. + * + * @return + * The listed blobs, flattened. + */ + public Iterable listBlobsFlat() { + return this.listBlobsFlat(new ListBlobsOptions(), null, null); + } + + /** + * Returns a lazy loaded list of blobs in this container, with folder structures flattened. + * The returned {@link Iterable} can be iterated through while new items are automatically + * retrieved as needed. + * + *

+ * Blob names are returned in lexicographic order. + * + *

+ * For more information, see the + * Azure Docs. + * + * @param options + * {@link ListBlobsOptions} + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return + * The listed blobs, flattened. + */ + public Iterable listBlobsFlat(ListBlobsOptions options, Duration timeout, Context context) { + Flux response = containerAsyncClient.listBlobsFlat(options, context); + + return timeout == null ? + response.toIterable(): + response.timeout(timeout).toIterable(); + } + + /** + * Returns a single segment of blobs and blob prefixes starting from the specified Marker. Use an empty + * marker to start enumeration from the beginning. Blob names are returned in lexicographic order. + * After getting a segment, process it, and then call ListBlobs again (passing the the previously-returned + * Marker) to get the next segment. For more information, see the + * Azure Docs. + * + * @param marker + * Identifies the portion of the list to be returned with the next list operation. + * This value is returned in the response of a previous list operation as the + * ListBlobsHierarchySegmentResponse.body().nextMarker(). Set to null to list the first segment. + * @param delimiter + * The operation returns a BlobPrefix element in the response body that acts as a placeholder for all blobs + * whose names begin with the same substring up to the appearance of the delimiter character. The delimiter may + * be a single character or a string. + * @param options + * {@link ListBlobsOptions} + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_hierarchy "Sample code for ContainerAsyncClient.listBlobsHierarchySegment")] \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_hierarchy_helper "helper code for ContainerAsyncClient.listBlobsHierarchySegment")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ +// public Iterable listBlobsHierarchySegment(String marker, String delimiter, +// ListBlobsOptions options) { +// return this.listBlobsHierarchySegment(marker, delimiter, options, null, null); +// } + + /** + * Returns a single segment of blobs and blob prefixes starting from the specified Marker. Use an empty + * marker to start enumeration from the beginning. Blob names are returned in lexicographic order. + * After getting a segment, process it, and then call ListBlobs again (passing the the previously-returned + * Marker) to get the next segment. For more information, see the + * Azure Docs. + * + * @param marker + * Identifies the portion of the list to be returned with the next list operation. + * This value is returned in the response of a previous list operation as the + * ListBlobsHierarchySegmentResponse.body().nextMarker(). Set to null to list the first segment. + * @param delimiter + * The operation returns a BlobPrefix element in the response body that acts as a placeholder for all blobs + * whose names begin with the same substring up to the appearance of the delimiter character. The delimiter may + * be a single character or a string. + * @param options + * {@link ListBlobsOptions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_hierarchy "Sample code for ContainerAsyncClient.listBlobsHierarchySegment")] \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_hierarchy_helper "helper code for ContainerAsyncClient.listBlobsHierarchySegment")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ +// public Iterable listBlobsHierarchySegment(String marker, String delimiter, +// ListBlobsOptions options, Duration timeout, Context context) { +// Flux response = containerAsyncClient.listBlobsHierarchySegment(marker, delimiter, options, context); +// +// return timeout == null ? +// response.toIterable(): +// response.timeout(timeout).toIterable(); +// } + + /** + * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 + * seconds, or infinite (-1). + * + * @param proposedId + * A {@code String} in any valid GUID format. May be null. + * @param duration + * The duration of the lease, in seconds, or negative one (-1) for a lease that + * never expires. A non-infinite lease can be between 15 and 60 seconds. + * + * @return + * The lease ID. + */ + public String acquireLease(String proposedId, int duration) { + return this.acquireLease(proposedId, duration, null, null); + } + + /** + * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 + * seconds, or infinite (-1). + * + * @param proposedID + * A {@code String} in any valid GUID format. May be null. + * @param duration + * The duration of the lease, in seconds, or negative one (-1) for a lease that + * never expires. A non-infinite lease can be between 15 and 60 seconds. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * + * @return + * The lease ID. + */ + public String acquireLease(String proposedID, int duration, + ModifiedAccessConditions modifiedAccessConditions, Duration timeout) { + Mono response = containerAsyncClient + .acquireLease(proposedID, duration, modifiedAccessConditions, null /*context*/); + + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Renews the blob's previously-acquired lease. + * + * @param leaseID + * The leaseId of the active lease on the blob. + * + * @return + * The renewed lease ID. + */ + public String renewLease(String leaseID) { + return this.renewLease(leaseID, null, null); + } + + /** + * Renews the blob's previously-acquired lease. + * + * @param leaseID + * The leaseId of the active lease on the blob. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * + * @return + * The renewed lease ID. + */ + public String renewLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions, + Duration timeout) { + Mono response = containerAsyncClient + .renewLease(leaseID, modifiedAccessConditions, null /*context*/); + + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Releases the blob's previously-acquired lease. + * + * @param leaseID + * The leaseId of the active lease on the blob. + */ + public void releaseLease(String leaseID) { + this.releaseLease(leaseID, null, null); + } + + /** + * Releases the blob's previously-acquired lease. + * + * @param leaseID + * The leaseId of the active lease on the blob. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + */ + public void releaseLease(String leaseID, + ModifiedAccessConditions modifiedAccessConditions, Duration timeout) { + Mono response = containerAsyncClient + .releaseLease(leaseID, modifiedAccessConditions, null /*context*/); + + if (timeout == null) { + response.block(); + } else { + response.block(timeout); + } + } + + /** + * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant + * to break a fixed-duration lease when it expires or an infinite lease immediately. + * + * @return + * The remaining time in the broken lease in seconds. + */ + public int breakLease() { + return this.breakLease(null, null, null); + } + + /** + * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant + * to break a fixed-duration lease when it expires or an infinite lease immediately. + * + * @param breakPeriodInSeconds + * An optional {@code Integer} representing the proposed duration of seconds that the lease should continue + * before it is broken, between 0 and 60 seconds. This break period is only used if it is shorter than the + * time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be + * available before the break period has expired, but the lease may be held for longer than the break + * period. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * + * @return + * The remaining time in the broken lease in seconds. + */ + public int breakLease(Integer breakPeriodInSeconds, + ModifiedAccessConditions modifiedAccessConditions, Duration timeout) { + Mono response = containerAsyncClient + .breakLease(breakPeriodInSeconds, modifiedAccessConditions, null /*context*/); + + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * ChangeLease changes the blob's lease ID. + * + * @param leaseId + * The leaseId of the active lease on the blob. + * @param proposedID + * A {@code String} in any valid GUID format. + * + * @return + * The new lease ID. + */ + public String changeLease(String leaseId, String proposedID) { + return this.changeLease(leaseId, proposedID, null, null); + } + + /** + * ChangeLease changes the blob's lease ID. For more information, see the Azure Docs. + * + * @param leaseId + * The leaseId of the active lease on the blob. + * @param proposedID + * A {@code String} in any valid GUID format. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * + * @return The new lease ID. + */ + public String changeLease(String leaseId, String proposedID, + ModifiedAccessConditions modifiedAccessConditions, Duration timeout) { + Mono response = containerAsyncClient + .changeLease(leaseId, proposedID, modifiedAccessConditions, null /*context*/); + + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Returns the sku name and account kind for the account. For more information, please see the + * Azure Docs. + * + * @return + * The account info. + */ + public StorageAccountInfo getAccountInfo() { + return this.getAccountInfo(null, null); + } + + /** + * Returns the sku name and account kind for the account. For more information, please see the + * Azure Docs. + * + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return + * The account info. + */ + public StorageAccountInfo getAccountInfo(Duration timeout, Context context) { + Mono response = containerAsyncClient.getAccountInfo(context); + + return timeout == null ? + response.block(): + response.block(timeout); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/ContainerClientBuilder.java b/storage/client/src/main/java/com/azure/storage/blob/ContainerClientBuilder.java new file mode 100644 index 0000000000000..ad7e637199a02 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/ContainerClientBuilder.java @@ -0,0 +1,239 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.configuration.Configuration; +import com.azure.core.http.HttpClient; +import com.azure.core.http.HttpPipeline; +import com.azure.core.http.policy.AddDatePolicy; +import com.azure.core.http.policy.HttpLogDetailLevel; +import com.azure.core.http.policy.HttpLoggingPolicy; +import com.azure.core.http.policy.HttpPipelinePolicy; +import com.azure.core.http.policy.RequestIdPolicy; +import com.azure.core.http.policy.RetryPolicy; +import com.azure.core.http.policy.UserAgentPolicy; +import com.azure.core.implementation.util.ImplUtils; +import com.azure.storage.blob.implementation.AzureBlobStorageBuilder; +import com.azure.storage.blob.implementation.AzureBlobStorageImpl; + +import java.net.MalformedURLException; +import java.net.URL; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * Fluent ContainerClientBuilder for instantiating a {@link ContainerClient} or {@link ContainerAsyncClient}. + * + *

+ * An instance of this builder may only be created from static method {@link ContainerClient#containerClientBuilder()}. + * The following information must be provided on this builder: + * + *

+ * + *

+ * Once all the configurations are set on this builder, call {@code .buildClient()} to create a + * {@link ContainerClient} or {@code .buildAsyncClient()} to create a {@link ContainerAsyncClient}. + */ +public final class ContainerClientBuilder { + private static final String ACCOUNT_NAME = "AccountName".toLowerCase(); + private static final String ACCOUNT_KEY = "AccountKey".toLowerCase(); + + private final List policies; + + private URL endpoint; + private ICredentials credentials = new AnonymousCredentials(); + private HttpClient httpClient; + private HttpLogDetailLevel logLevel; + private RetryPolicy retryPolicy; + private Configuration configuration; + + public ContainerClientBuilder() { + retryPolicy = new RetryPolicy(); + logLevel = HttpLogDetailLevel.NONE; + policies = new ArrayList<>(); + } + + ContainerClientBuilder(List policies, URL endpoint, ICredentials credentials, + HttpClient httpClient, HttpLogDetailLevel logLevel, RetryPolicy retryPolicy, Configuration configuration) { + this.policies = policies; + this.endpoint = endpoint; + this.credentials = credentials; + this.httpClient = httpClient; + this.logLevel = logLevel; + this.retryPolicy = retryPolicy; + this.configuration = configuration; + } + + ContainerClientBuilder copyBuilder() { + return new ContainerClientBuilder(this.policies, this.endpoint, this.credentials, this.httpClient, this.logLevel, this.retryPolicy, this.configuration); + } + + /** + * Constructs an instance of ContainerAsyncClient based on the configurations stored in the appendBlobClientBuilder. + * @return a new client instance + */ + AzureBlobStorageImpl buildImpl() { + Objects.requireNonNull(endpoint); + + // Closest to API goes first, closest to wire goes last. + final List policies = new ArrayList<>(); + + policies.add(new UserAgentPolicy(BlobConfiguration.NAME, BlobConfiguration.VERSION)); + policies.add(new RequestIdPolicy()); + policies.add(new AddDatePolicy()); + policies.add(credentials); // This needs to be a different credential type. + + policies.add(retryPolicy); + + policies.addAll(this.policies); + policies.add(new HttpLoggingPolicy(logLevel)); + + HttpPipeline pipeline = HttpPipeline.builder() + .policies(policies.toArray(new HttpPipelinePolicy[0])) + .httpClient(httpClient) + .build(); + + return new AzureBlobStorageBuilder() + .url(endpoint.toString()) + .pipeline(pipeline) + .build(); + } + + /** + * @return a {@link ContainerClient} created from the configurations in this builder. + */ + public ContainerClient buildClient() { + return new ContainerClient(this); + } + + /** + * @return a {@link ContainerAsyncClient} created from the configurations in this builder. + */ + public ContainerAsyncClient buildAsyncClient() { + return new ContainerAsyncClient(this); + } + + /** + * Sets the service endpoint, additionally parses it for information (SAS token, container name) + * @param endpoint URL of the service + * @return the updated ContainerClientBuilder object + */ + public ContainerClientBuilder endpoint(String endpoint) { + Objects.requireNonNull(endpoint); + try { + this.endpoint = new URL(endpoint); + } catch (MalformedURLException ex) { + throw new IllegalArgumentException("The Azure Storage Queue endpoint url is malformed."); + } + + return this; + } + + String endpoint() { + return this.endpoint.toString(); + } + + /** + * Sets the credentials used to authorize requests sent to the service + * @param credentials authorization credentials + * @return the updated ContainerClientBuilder object + */ + public ContainerClientBuilder credentials(SharedKeyCredentials credentials) { + this.credentials = credentials; + return this; + } + + /** + * Sets the credentials used to authorize requests sent to the service + * @param credentials authorization credentials + * @return the updated ContainerClientBuilder object + */ + public ContainerClientBuilder credentials(TokenCredentials credentials) { + this.credentials = credentials; + return this; + } + + /** + * Clears the credentials used to authorize requests sent to the service + * @return the updated ContainerClientBuilder object + */ + public ContainerClientBuilder anonymousCredentials() { + this.credentials = new AnonymousCredentials(); + return this; + } + + /** + * Sets the connection string for the service, parses it for authentication information (account name, account key) + * @param connectionString connection string from access keys section + * @return the updated ContainerClientBuilder object + */ + public ContainerClientBuilder connectionString(String connectionString) { + Objects.requireNonNull(connectionString); + + Map connectionKVPs = new HashMap<>(); + for (String s : connectionString.split(";")) { + String[] kvp = s.split("=", 2); + connectionKVPs.put(kvp[0].toLowerCase(), kvp[1]); + } + + String accountName = connectionKVPs.get(ACCOUNT_NAME); + String accountKey = connectionKVPs.get(ACCOUNT_KEY); + + if (ImplUtils.isNullOrEmpty(accountName) || ImplUtils.isNullOrEmpty(accountKey)) { + throw new IllegalArgumentException("Connection string must contain 'AccountName' and 'AccountKey'."); + } + + // Use accountName and accountKey to get the SAS token using the credential class. + credentials = new SharedKeyCredentials(accountName, accountKey); + + return this; + } + + /** + * Sets the http client used to send service requests + * @param httpClient http client to send requests + * @return the updated ContainerClientBuilder object + */ + public ContainerClientBuilder httpClient(HttpClient httpClient) { + this.httpClient = httpClient; + return this; + } + + /** + * Adds a pipeline policy to apply on each request sent + * @param pipelinePolicy a pipeline policy + * @return the updated ContainerClientBuilder object + */ + public ContainerClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { + this.policies.add(pipelinePolicy); + return this; + } + + /** + * Sets the logging level for service requests + * @param logLevel logging level + * @return the updated ContainerClientBuilder object + */ + public ContainerClientBuilder httpLogDetailLevel(HttpLogDetailLevel logLevel) { + this.logLevel = logLevel; + return this; + } + + /** + * Sets the configuration object used to retrieve environment configuration values used to buildClient the client with + * when they are not set in the appendBlobClientBuilder, defaults to Configuration.NONE + * @param configuration configuration store + * @return the updated ContainerClientBuilder object + */ + public ContainerClientBuilder configuration(Configuration configuration) { + this.configuration = configuration; + return this; + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/ContainerListDetails.java b/storage/client/src/main/java/com/azure/storage/blob/ContainerListDetails.java new file mode 100644 index 0000000000000..473488f106e4c --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/ContainerListDetails.java @@ -0,0 +1,48 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.storage.blob.models.ListContainersIncludeType; + +/** + * This type allows users to specify additional information the service should return with each container when listing + * containers in an account (via a {@link ServiceURL} object). This type is immutable to ensure thread-safety of + * requests, so changing the details for a different listing operation requires construction of a new object. Null may + * be passed if none of the options are desirable. + */ +public final class ContainerListDetails { + + private boolean metadata; + + public ContainerListDetails() { + + } + + /** + * Whether metadata should be returned. + */ + public boolean metadata() { + return this.metadata; + } + + /** + * Whether metadata should be returned. + */ + public ContainerListDetails withMetadata(boolean metadata) { + this.metadata = metadata; + return this; + } + + /* + This is used internally to convert the details structure into the appropriate type to pass to the protocol layer. + It is intended to mirror the BlobListDetails.toList() method, but is slightly different since there is only one + possible value here currently. The customer should never have need for this. + */ + ListContainersIncludeType toIncludeType() { + if (this.metadata) { + return ListContainersIncludeType.METADATA; + } + return null; + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/ContainerProperties.java b/storage/client/src/main/java/com/azure/storage/blob/ContainerProperties.java new file mode 100644 index 0000000000000..7e8d1f7487146 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/ContainerProperties.java @@ -0,0 +1,34 @@ +package com.azure.storage.blob; + +import com.azure.storage.blob.models.ContainerGetPropertiesHeaders; +import com.azure.storage.blob.models.PublicAccessType; + +public class ContainerProperties { + + private PublicAccessType blobPublicAccess; + + private boolean hasImmutabilityPolicy; + + private boolean hasLegalHold; + + //todo decide datetime representation for last modified time + + + ContainerProperties(ContainerGetPropertiesHeaders generatedResponseHeaders) { + this.blobPublicAccess = generatedResponseHeaders.blobPublicAccess(); + this.hasImmutabilityPolicy = generatedResponseHeaders.hasImmutabilityPolicy(); + this.hasLegalHold = generatedResponseHeaders.hasLegalHold(); + } + + public PublicAccessType blobPublicAccess() { + return blobPublicAccess; + } + + public boolean hasImmutabilityPolicy() { + return hasImmutabilityPolicy; + } + + public boolean hasLegalHold() { + return hasLegalHold; + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/ContainerRawClient.java b/storage/client/src/main/java/com/azure/storage/blob/ContainerRawClient.java new file mode 100644 index 0000000000000..b817db16d7447 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/ContainerRawClient.java @@ -0,0 +1,763 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.http.HttpPipeline; +import com.azure.core.util.Context; +import com.azure.storage.blob.implementation.AzureBlobStorageImpl; +import com.azure.storage.blob.models.ContainersAcquireLeaseResponse; +import com.azure.storage.blob.models.ContainersBreakLeaseResponse; +import com.azure.storage.blob.models.ContainersChangeLeaseResponse; +import com.azure.storage.blob.models.ContainersCreateResponse; +import com.azure.storage.blob.models.ContainersDeleteResponse; +import com.azure.storage.blob.models.ContainersGetAccessPolicyResponse; +import com.azure.storage.blob.models.ContainersGetAccountInfoResponse; +import com.azure.storage.blob.models.ContainersGetPropertiesResponse; +import com.azure.storage.blob.models.ContainersListBlobFlatSegmentResponse; +import com.azure.storage.blob.models.ContainersListBlobHierarchySegmentResponse; +import com.azure.storage.blob.models.ContainersReleaseLeaseResponse; +import com.azure.storage.blob.models.ContainersRenewLeaseResponse; +import com.azure.storage.blob.models.ContainersSetAccessPolicyResponse; +import com.azure.storage.blob.models.ContainersSetMetadataResponse; +import com.azure.storage.blob.models.LeaseAccessConditions; +import com.azure.storage.blob.models.ModifiedAccessConditions; +import com.azure.storage.blob.models.PublicAccessType; +import com.azure.storage.blob.models.SignedIdentifier; +import reactor.core.publisher.Mono; + +import java.time.Duration; +import java.util.List; + +/** + * Represents a URL to a container. It may be obtained by direct construction or via the create method on a + * {@link StorageAsyncRawClient} object. This class does not hold any state about a particular blob but is instead a convenient way + * of sending off appropriate requests to the resource on the service. It may also be used to construct URLs to blobs. + * Please refer to the + * Azure Docs + * for more information on containers. + */ +final class ContainerRawClient { + + private ContainerAsyncRawClient containerAsyncRawClient; + + public static final String ROOT_CONTAINER_NAME = "$root"; + + public static final String STATIC_WEBSITE_CONTAINER_NAME = "$web"; + + public static final String LOG_CONTAINER_NAME = "$logs"; + + + /** + * Creates a {@code ContainerAsyncClient} object pointing to the account specified by the URL and using the provided + * pipeline to make HTTP requests. + */ + public ContainerRawClient(AzureBlobStorageImpl azureBlobStorage) { + this.containerAsyncRawClient = new ContainerAsyncRawClient(azureBlobStorage); + } + + /** + * Creates a new container within a storage account. If a container with the same name already exists, the operation + * fails. For more information, see the + * Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_basic "Sample code for ContainerAsyncClient.create")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ContainersCreateResponse create() { + return this.create(null, null, null, null); + } + + /** + * Creates a new container within a storage account. If a container with the same name already exists, the operation + * fails. For more information, see the + * Azure Docs. + * + * @param metadata + * {@link Metadata} + * @param accessType + * Specifies how the data in this container is available to the public. See the x-ms-blob-public-access header + * in the Azure Docs for more information. Pass null for no public access. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_basic "Sample code for ContainerAsyncClient.create")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ContainersCreateResponse create(Metadata metadata, PublicAccessType accessType, Duration timeout, Context context) { + Mono response = containerAsyncRawClient.create(metadata, accessType, context); + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Marks the specified container for deletion. The container and any blobs contained within it are later + * deleted during garbage collection. For more information, see the + * Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_basic "Sample code for ContainerAsyncClient.delete")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ContainersDeleteResponse delete() { + return this.delete(null, null, null); + } + + /** + * Marks the specified container for deletion. The container and any blobs contained within it are later + * deleted during garbage collection. For more information, see the + * Azure Docs. + * + * @param accessConditions + * {@link ContainerAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_basic "Sample code for ContainerAsyncClient.delete")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ContainersDeleteResponse delete(ContainerAccessConditions accessConditions, Duration timeout, Context context) { + Mono response = containerAsyncRawClient.delete(accessConditions, context); + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Returns the container's metadata and system properties. For more information, see the + * Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_basic "Sample code for ContainerAsyncClient.getProperties")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ContainersGetPropertiesResponse getProperties() { + return this.getProperties(null, null, null); + } + + /** + * Returns the container's metadata and system properties. For more information, see the + * Azure Docs. + * + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_basic "Sample code for ContainerAsyncClient.getProperties")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ContainersGetPropertiesResponse getProperties(LeaseAccessConditions leaseAccessConditions, + Duration timeout, Context context) { + Mono response = containerAsyncRawClient.getProperties(leaseAccessConditions, context); + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Sets the container's metadata. For more information, see the + * Azure Docs. + * + * @param metadata + * {@link Metadata} + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_basic "Sample code for ContainerAsyncClient.setMetadata")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ContainersSetMetadataResponse setMetadata(Metadata metadata) { + return this.setMetadata(metadata, null, null, null); + } + + /** + * Sets the container's metadata. For more information, see the + * Azure Docs. + * + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link ContainerAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_basic "Sample code for ContainerAsyncClient.setMetadata")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ContainersSetMetadataResponse setMetadata(Metadata metadata, + ContainerAccessConditions accessConditions, Duration timeout, Context context) { + Mono response = containerAsyncRawClient.setMetadata(metadata, accessConditions, context); + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Returns the container's permissions. The permissions indicate whether container's blobs may be accessed publicly. + * For more information, see the + * Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_policy "Sample code for ContainerAsyncClient.getAccessPolicy")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ContainersGetAccessPolicyResponse getAccessPolicy() { + return this.getAccessPolicy(null, null, null); + } + + /** + * Returns the container's permissions. The permissions indicate whether container's blobs may be accessed publicly. + * For more information, see the + * Azure Docs. + * + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_policy "Sample code for ContainerAsyncClient.getAccessPolicy")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ContainersGetAccessPolicyResponse getAccessPolicy(LeaseAccessConditions leaseAccessConditions, + Duration timeout, Context context) { + Mono response = containerAsyncRawClient.getAccessPolicy(leaseAccessConditions, context); + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Sets the container's permissions. The permissions indicate whether blobs in a container may be accessed publicly. + * Note that, for each signed identifier, we will truncate the start and expiry times to the nearest second to + * ensure the time formatting is compatible with the service. For more information, see the + * Azure Docs. + * + * @param accessType + * Specifies how the data in this container is available to the public. See the x-ms-blob-public-access header + * in the Azure Docs for more information. Pass null for no public access. + * @param identifiers + * A list of {@link SignedIdentifier} objects that specify the permissions for the container. Please see + * here + * for more information. Passing null will clear all access policies. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_policy "Sample code for ContainerAsyncClient.setAccessPolicy")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ContainersSetAccessPolicyResponse setAccessPolicy(PublicAccessType accessType, + List identifiers) { + return this.setAccessPolicy(accessType, identifiers, null, null, null); + } + + /** + * Sets the container's permissions. The permissions indicate whether blobs in a container may be accessed publicly. + * Note that, for each signed identifier, we will truncate the start and expiry times to the nearest second to + * ensure the time formatting is compatible with the service. For more information, see the + * Azure Docs. + * + * @param accessType + * Specifies how the data in this container is available to the public. See the x-ms-blob-public-access header + * in the Azure Docs for more information. Pass null for no public access. + * @param identifiers + * A list of {@link SignedIdentifier} objects that specify the permissions for the container. Please see + * here + * for more information. Passing null will clear all access policies. + * @param accessConditions + * {@link ContainerAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_policy "Sample code for ContainerAsyncClient.setAccessPolicy")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ContainersSetAccessPolicyResponse setAccessPolicy(PublicAccessType accessType, + List identifiers, ContainerAccessConditions accessConditions, Duration timeout, Context context) { + Mono response = containerAsyncRawClient.setAccessPolicy(accessType, identifiers, accessConditions, context); + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Acquires a lease on the container for delete operations. The lease duration must be between 15 to + * 60 seconds, or infinite (-1). For more information, see the + * Azure Docs. + * + * @apiNote + * ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerAsyncClient.acquireLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/New-Storage-SDK-V10-Preview/src/test/java/com/microsoft/azure/storage/Samples.java) + * + * @param proposedId + * A {@code String} in any valid GUID format. + * @param duration + * The duration of the lease, in seconds, or negative one (-1) for a lease that never expires. + * A non-infinite lease can be between 15 and 60 seconds. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerAsyncClient.acquireLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ContainersAcquireLeaseResponse acquireLease(String proposedId, int duration) { + return this.acquireLease(proposedId, duration, null, null, null); + } + + /** + * Acquires a lease on the container for delete operations. The lease duration must be between 15 to + * 60 seconds, or infinite (-1). For more information, see the + * Azure Docs. + * + * @param proposedID + * A {@code String} in any valid GUID format. + * @param duration + * The duration of the lease, in seconds, or negative one (-1) for a lease that never expires. + * A non-infinite lease can be between 15 and 60 seconds. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerAsyncClient.acquireLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ContainersAcquireLeaseResponse acquireLease(String proposedID, int duration, + ModifiedAccessConditions modifiedAccessConditions, Duration timeout, Context context) { + Mono response = containerAsyncRawClient.acquireLease(proposedID, duration, modifiedAccessConditions, context); + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Renews the container's previously-acquired lease. For more information, see the + * Azure Docs. + * + * @param leaseID + * The leaseId of the active lease on the container. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerAsyncClient.renewLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ContainersRenewLeaseResponse renewLease(String leaseID) { + return this.renewLease(leaseID, null, null, null); + } + + /** + * Renews the container's previously-acquired lease. For more information, see the + * Azure Docs. + * + * @param leaseID + * The leaseId of the active lease on the container. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerAsyncClient.renewLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ContainersRenewLeaseResponse renewLease(String leaseID, + ModifiedAccessConditions modifiedAccessConditions, Duration timeout, Context context) { + Mono response = containerAsyncRawClient.renewLease(leaseID, modifiedAccessConditions, context); + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Releases the container's previously-acquired lease. For more information, see the + * Azure Docs. + * + * @param leaseID + * The leaseId of the active lease on the container. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerAsyncClient.releaseLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ContainersReleaseLeaseResponse releaseLease(String leaseID) { + return this.releaseLease(leaseID, null, null, null); + } + + /** + * Releases the container's previously-acquired lease. For more information, see the + * Azure Docs. + * + * @param leaseID + * The leaseId of the active lease on the container. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerAsyncClient.releaseLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ContainersReleaseLeaseResponse releaseLease(String leaseID, + ModifiedAccessConditions modifiedAccessConditions, Duration timeout, Context context) { + Mono response = containerAsyncRawClient.releaseLease(leaseID, modifiedAccessConditions, context); + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Breaks the container's previously-acquired lease. For more information, see the + * Azure Docs. + * + * @apiNote + * ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerAsyncClient.breakLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/New-Storage-SDK-V10-Preview/src/test/java/com/microsoft/azure/storage/Samples.java) + * + * @return Emits the successful response. + */ + public ContainersBreakLeaseResponse breakLease() { + return this.breakLease(null, null, null, null); + } + + /** + * Breaks the container's previously-acquired lease. For more information, see the + * Azure Docs. + * + * @param breakPeriodInSeconds + * An optional {@code Integer} representing the proposed duration of seconds that the lease should continue + * before it is broken, between 0 and 60 seconds. This break period is only used if it is shorter than the time + * remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be + * available before the break period has expired, but the lease may be held for longer than the break period. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerAsyncClient.breakLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ContainersBreakLeaseResponse breakLease(Integer breakPeriodInSeconds, + ModifiedAccessConditions modifiedAccessConditions, Duration timeout, Context context) { + Mono response = containerAsyncRawClient.breakLease(breakPeriodInSeconds, modifiedAccessConditions, context); + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Changes the container's leaseAccessConditions. For more information, see the + * Azure Docs. + * + * @param leaseID + * The leaseId of the active lease on the container. + * @param proposedID + * A {@code String} in any valid GUID format. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerAsyncClient.changeLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ContainersChangeLeaseResponse changeLease(String leaseID, String proposedID) { + return this.changeLease(leaseID, proposedID, null, null, null); + } + + /** + * Changes the container's leaseAccessConditions. For more information, see the + * Azure Docs. + * + * @param leaseID + * The leaseId of the active lease on the container. + * @param proposedID + * A {@code String} in any valid GUID format. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerAsyncClient.changeLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ContainersChangeLeaseResponse changeLease(String leaseID, String proposedID, + ModifiedAccessConditions modifiedAccessConditions, Duration timeout, Context context) { + Mono response = containerAsyncRawClient.changeLease(leaseID, proposedID, modifiedAccessConditions, context); + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Returns a single segment of blobs starting from the specified Marker. Use an empty + * marker to start enumeration from the beginning. Blob names are returned in lexicographic order. + * After getting a segment, process it, and then call ListBlobs again (passing the the previously-returned + * Marker) to get the next segment. For more information, see the + * Azure Docs. + * + * @param marker + * Identifies the portion of the list to be returned with the next list operation. + * This value is returned in the response of a previous list operation as the + * ListBlobsFlatSegmentResponse.body().nextMarker(). Set to null to list the first segment. + * @param options + * {@link ListBlobsOptions} + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_flat "Sample code for ContainerAsyncClient.listBlobsFlatSegment")] \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_flat_helper "helper code for ContainerAsyncClient.listBlobsFlatSegment")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ContainersListBlobFlatSegmentResponse listBlobsFlatSegment(String marker, ListBlobsOptions options) { + return this.listBlobsFlatSegment(marker, options, null, null); + } + + /** + * Returns a single segment of blobs starting from the specified Marker. Use an empty + * marker to start enumeration from the beginning. Blob names are returned in lexicographic order. + * After getting a segment, process it, and then call ListBlobs again (passing the the previously-returned + * Marker) to get the next segment. For more information, see the + * Azure Docs. + * + * @param marker + * Identifies the portion of the list to be returned with the next list operation. + * This value is returned in the response of a previous list operation as the + * ListBlobsFlatSegmentResponse.body().nextMarker(). Set to null to list the first segment. + * @param options + * {@link ListBlobsOptions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_flat "Sample code for ContainerAsyncClient.listBlobsFlatSegment")] \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_flat_helper "helper code for ContainerAsyncClient.listBlobsFlatSegment")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ContainersListBlobFlatSegmentResponse listBlobsFlatSegment(String marker, ListBlobsOptions options, + Duration timeout, Context context) { + Mono response = containerAsyncRawClient.listBlobsFlatSegment(marker, options, context); + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Returns a single segment of blobs and blob prefixes starting from the specified Marker. Use an empty + * marker to start enumeration from the beginning. Blob names are returned in lexicographic order. + * After getting a segment, process it, and then call ListBlobs again (passing the the previously-returned + * Marker) to get the next segment. For more information, see the + * Azure Docs. + * + * @param marker + * Identifies the portion of the list to be returned with the next list operation. + * This value is returned in the response of a previous list operation as the + * ListBlobsHierarchySegmentResponse.body().nextMarker(). Set to null to list the first segment. + * @param delimiter + * The operation returns a BlobPrefix element in the response body that acts as a placeholder for all blobs + * whose names begin with the same substring up to the appearance of the delimiter character. The delimiter may + * be a single character or a string. + * @param options + * {@link ListBlobsOptions} + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_hierarchy "Sample code for ContainerAsyncClient.listBlobsHierarchySegment")] \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_hierarchy_helper "helper code for ContainerAsyncClient.listBlobsHierarchySegment")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ContainersListBlobHierarchySegmentResponse listBlobsHierarchySegment(String marker, String delimiter, + ListBlobsOptions options) { + return this.listBlobsHierarchySegment(marker, delimiter, options, null, null); + } + + /** + * Returns a single segment of blobs and blob prefixes starting from the specified Marker. Use an empty + * marker to start enumeration from the beginning. Blob names are returned in lexicographic order. + * After getting a segment, process it, and then call ListBlobs again (passing the the previously-returned + * Marker) to get the next segment. For more information, see the + * Azure Docs. + * + * @param marker + * Identifies the portion of the list to be returned with the next list operation. + * This value is returned in the response of a previous list operation as the + * ListBlobsHierarchySegmentResponse.body().nextMarker(). Set to null to list the first segment. + * @param delimiter + * The operation returns a BlobPrefix element in the response body that acts as a placeholder for all blobs + * whose names begin with the same substring up to the appearance of the delimiter character. The delimiter may + * be a single character or a string. + * @param options + * {@link ListBlobsOptions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_hierarchy "Sample code for ContainerAsyncClient.listBlobsHierarchySegment")] \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_hierarchy_helper "helper code for ContainerAsyncClient.listBlobsHierarchySegment")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ContainersListBlobHierarchySegmentResponse listBlobsHierarchySegment(String marker, String delimiter, + ListBlobsOptions options, Duration timeout, Context context) { + Mono response = containerAsyncRawClient.listBlobsHierarchySegment(marker, delimiter, options, context); + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Returns the sku name and account kind for the account. For more information, please see the + * Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=account_info "Sample code for ContainerAsyncClient.getAccountInfo")] \n + * For more samples, please see the [Samples file] (https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ContainersGetAccountInfoResponse getAccountInfo() { + return this.getAccountInfo(null, null); + } + + /** + * Returns the sku name and account kind for the account. For more information, please see the + * Azure Docs. + * + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=account_info "Sample code for ContainerAsyncClient.getAccountInfo")] \n + * For more samples, please see the [Samples file] (https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ContainersGetAccountInfoResponse getAccountInfo(Duration timeout, Context context) { + Mono response = containerAsyncRawClient.getAccountInfo(context); + return timeout == null + ? response.block() + : response.block(timeout); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/ContainerSASPermission.java b/storage/client/src/main/java/com/azure/storage/blob/ContainerSASPermission.java new file mode 100644 index 0000000000000..413f147d4e873 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/ContainerSASPermission.java @@ -0,0 +1,204 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + + +import java.util.Locale; + +/** + * This is a helper class to construct a string representing the permissions granted by a ServiceSAS to a container. + * Setting a value to true means that any SAS which uses these permissions will grant permissions for that operation. + * Once all the values are set, this should be serialized with toString and set as the permissions field on a + * {@link ServiceSASSignatureValues} object. It is possible to construct the permissions string without this class, but + * the order of the permissions is particular and this class guarantees correctness. + */ +final class ContainerSASPermission { + private boolean read; + + private boolean add; + + private boolean create; + + private boolean write; + + private boolean delete; + + private boolean list; + + /** + * Initializes an {@code ContainerSASPermssion} object with all fields set to false. + */ + public ContainerSASPermission() { + } + + /** + * Creates an {@code ContainerSASPermission} from the specified permissions string. This method will throw an + * {@code IllegalArgumentException} if it encounters a character that does not correspond to a valid permission. + * + * @param permString + * A {@code String} which represents the {@code ContainerSASPermission}. + * + * @return A {@code ContainerSASPermission} generated from the given {@code String}. + */ + public static ContainerSASPermission parse(String permString) { + ContainerSASPermission permissions = new ContainerSASPermission(); + + for (int i = 0; i < permString.length(); i++) { + char c = permString.charAt(i); + switch (c) { + case 'r': + permissions.read = true; + break; + case 'a': + permissions.add = true; + break; + case 'c': + permissions.create = true; + break; + case 'w': + permissions.write = true; + break; + case 'd': + permissions.delete = true; + break; + case 'l': + permissions.list = true; + break; + default: + throw new IllegalArgumentException( + String.format(Locale.ROOT, SR.ENUM_COULD_NOT_BE_PARSED_INVALID_VALUE, "Permissions", permString, c)); + } + } + return permissions; + } + + /** + * Specifies Read access granted. + */ + public boolean read() { + return read; + } + + /** + * Specifies Read access granted. + */ + public ContainerSASPermission withRead(boolean read) { + this.read = read; + return this; + } + + /** + * Specifies Add access granted. + */ + public boolean add() { + return add; + } + + /** + * Specifies Add access granted. + */ + public ContainerSASPermission withAdd(boolean add) { + this.add = add; + return this; + } + + /** + * Specifies Create access granted. + */ + public boolean create() { + return create; + } + + /** + * Specifies Create access granted. + */ + public ContainerSASPermission withCreate(boolean create) { + this.create = create; + return this; + } + + /** + * Specifies Write access granted. + */ + public boolean write() { + return write; + } + + /** + * Specifies Write access granted. + */ + public ContainerSASPermission withWrite(boolean write) { + this.write = write; + return this; + } + + /** + * Specifies Delete access granted. + */ + public boolean delete() { + return delete; + } + + /** + * Specifies Delete access granted. + */ + public ContainerSASPermission withDelete(boolean delete) { + this.delete = delete; + return this; + } + + /** + * Specifies List access granted. + */ + public boolean list() { + return list; + } + + /** + * Specifies List access granted. + */ + public ContainerSASPermission withList(boolean list) { + this.list = list; + return this; + } + + /** + * Converts the given permissions to a {@code String}. Using this method will guarantee the permissions are in an + * order accepted by the service. + * + * @return A {@code String} which represents the {@code ContainerSASPermission}. + */ + @Override + public String toString() { + // The order of the characters should be as specified here to ensure correctness: + // https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas + final StringBuilder builder = new StringBuilder(); + + if (this.read) { + builder.append('r'); + } + + if (this.add) { + builder.append('a'); + } + + if (this.create) { + builder.append('c'); + } + + if (this.write) { + builder.append('w'); + } + + if (this.delete) { + builder.append('d'); + } + + if (this.list) { + builder.append('l'); + } + + return builder.toString(); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/DownloadAsyncResponse.java b/storage/client/src/main/java/com/azure/storage/blob/DownloadAsyncResponse.java new file mode 100644 index 0000000000000..0addf86a77346 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/DownloadAsyncResponse.java @@ -0,0 +1,136 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.http.rest.ResponseBase; +import com.azure.storage.blob.models.BlobDownloadHeaders; +import io.netty.buffer.ByteBuf; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Map; +import java.util.function.Function; + +/** + * {@code DownloadAsyncResponse} wraps the protocol-layer response from {@link BlobAsyncClient#download(BlobRange, + * BlobAccessConditions, boolean, com.azure.core.util.Context)} to automatically retry failed reads from the body as + * appropriate. If the download is interrupted, the {@code DownloadAsyncResponse} will make a request to resume the download + * from where it left off, allowing the user to consume the data as one continuous stream, for any interruptions are + * hidden. The retry behavior is defined by the options passed to the {@link #body(ReliableDownloadOptions)}. The + * download will also lock on the blob's etag to ensure consistency. + *

+ * Note that the retries performed as a part of this reader are composed with those of any retries in an {@link + * com.azure.core.http.HttpPipeline} used in conjunction with this reader. That is, if this object issues a request to resume a download, + * an underlying pipeline may issue several retries as a part of that request. Furthermore, this reader only retries on + * network errors; timeouts and unexpected status codes are not retried. Therefore, the behavior of this reader is + * entirely independent of and in no way coupled to an {@link com.azure.core.http.HttpPipeline}'s retry mechanism. + */ +public final class DownloadAsyncResponse { + private final HTTPGetterInfo info; + + private final ResponseBase> rawResponse; + + private final Function> getter; + + + // The constructor is package-private because customers should not be creating their own responses. + // TODO resolve comment vs code mismatch + public DownloadAsyncResponse(ResponseBase> response, + HTTPGetterInfo info, Function> getter) { + Utility.assertNotNull("getter", getter); + Utility.assertNotNull("info", info); + Utility.assertNotNull("info.eTag", info.eTag()); + this.rawResponse = response; + this.info = info; + this.getter = getter; + } + + /** + * Returns the response body which has been modified to enable reliably reading data if desired (if + * {@code options.maxRetryRequests > 0}. If retries are enabled, if a connection fails while reading, the stream + * will make additional requests to reestablish a connection and continue reading. + * + * @param options + * {@link ReliableDownloadOptions} + * + * @return A {@code Flux} which emits the data as {@code ByteBuffer}s. + */ + public Flux body(ReliableDownloadOptions options) { + ReliableDownloadOptions optionsReal = options == null ? new ReliableDownloadOptions() : options; + if (optionsReal.maxRetryRequests() == 0) { + return this.rawResponse.value(); + } + + /* + We pass -1 for currentRetryCount because we want tryContinueFlux to receive a value of 0 for number of + retries as we have not actually retried yet, only made the initial try. Because applyReliableDownload() will + add 1 before calling into tryContinueFlux, we set the initial value to -1. + */ + return this.applyReliableDownload(this.rawResponse.value(), -1, optionsReal); + } + + private Flux tryContinueFlux(Throwable t, int retryCount, ReliableDownloadOptions options) { + // If all the errors are exhausted, return this error to the user. + if (retryCount > options.maxRetryRequests() || !(t instanceof IOException)) { + return Flux.error(t); + } else { + /* + We wrap this in a try catch because we don't know the behavior of the getter. Most errors would probably + come from an unsuccessful request, which would be propagated through the onError methods. However, it is + possible the method call that returns a Single is what throws (like how our apis throw some exceptions at + call time rather than at subscription time. + */ + try { + // Get a new response and try reading from it. + return getter.apply(this.info) + .flatMapMany(response -> + /* + Do not compound the number of retries by passing in another set of downloadOptions; just get + the raw body. + */ + this.applyReliableDownload(this.rawResponse.value(), retryCount, options)); + } catch (Exception e) { + // If the getter fails, return the getter failure to the user. + return Flux.error(e); + } + } + } + + private Flux applyReliableDownload(Flux data, + int currentRetryCount, ReliableDownloadOptions options) { + return data + .doOnNext(buffer -> { + /* + Update how much data we have received in case we need to retry and propagate to the user the data we + have received. + */ + this.info.withOffset(this.info.offset() + buffer.readableBytes()); // was `remaining()` in Rx world + if (this.info.count() != null) { + this.info.withCount(this.info.count() - buffer.readableBytes()); // was `remaining()` in Rx world + } + }) + .onErrorResume(t2 -> { + // Increment the retry count and try again with the new exception. + return tryContinueFlux(t2, currentRetryCount + 1, options); + }); + } + + public int statusCode() { + return this.rawResponse.statusCode(); + } + + public BlobDownloadHeaders headers() { + return this.rawResponse.deserializedHeaders(); + } + + public Map rawHeaders() { + return this.rawResponse.headers().toMap(); + } + + public ResponseBase> rawResponse() { + return this.rawResponse; + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/DownloadResponse.java b/storage/client/src/main/java/com/azure/storage/blob/DownloadResponse.java new file mode 100644 index 0000000000000..c6667d3e16dd1 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/DownloadResponse.java @@ -0,0 +1,33 @@ +package com.azure.storage.blob; + +import io.netty.buffer.ByteBuf; + +import java.io.IOException; +import java.io.OutputStream; + +public class DownloadResponse { + private final DownloadAsyncResponse asyncResponse; + + DownloadResponse(DownloadAsyncResponse asyncResponse) { + this.asyncResponse = asyncResponse; + } + + public void body(OutputStream outputStream, ReliableDownloadOptions options) throws IOException { + for (ByteBuf buffer : this.asyncResponse.body(options).toIterable()) { + buffer.readBytes(outputStream, buffer.readableBytes()); + buffer.release(); + } + } + + //TODO determine signature(s) to use + /*public InputStream body(ReliableDownloadOptions options) { + return new InputStream() { + + DownloadAsyncResponse response = asyncResponse; + @Override + public int read() throws IOException { + return 0; + } + }; + }*/ +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/HTTPGetterInfo.java b/storage/client/src/main/java/com/azure/storage/blob/HTTPGetterInfo.java new file mode 100644 index 0000000000000..cb1fdf86ef2c6 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/HTTPGetterInfo.java @@ -0,0 +1,70 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +/** + * HTTPGetterInfo is a passed to the getter function of a reliable download to specify parameters needed for the GET + * request. + */ +final class HTTPGetterInfo { + private long offset = 0; + + private Long count = null; + + private String eTag = null; + + /** + * The start offset that should be used when creating the HTTP GET request's Range header. Defaults to 0. + */ + public long offset() { + return offset; + } + + /** + * The start offset that should be used when creating the HTTP GET request's Range header. Defaults to 0. + */ + public HTTPGetterInfo withOffset(long offset) { + this.offset = offset; + return this; + } + + /** + * The count of bytes that should be used to calculate the end offset when creating the HTTP GET request's Range + * header. {@code} null is the default and indicates that the entire rest of the blob should be retrieved. + */ + public Long count() { + return count; + } + + /** + * The count of bytes that should be used to calculate the end offset when creating the HTTP GET request's Range + * header. {@code} null is the default and indicates that the entire rest of the blob should be retrieved. + */ + public HTTPGetterInfo withCount(Long count) { + if (count != null) { + Utility.assertInBounds("count", count, 0, Long.MAX_VALUE); + } + this.count = count; + return this; + } + + /** + * The resource's etag that should be used when creating the HTTP GET request's If-Match header. Note that the + * Etag is returned with any operation that modifies the resource and by a call to {@link + * BlobURL#getProperties(BlobAccessConditions, com.microsoft.rest.v2.Context)}. Defaults to null. + */ + public String eTag() { + return eTag; + } + + /** + * The resource's etag that should be used when creating the HTTP GET request's If-Match header. Note that the + * Etag is returned with any operation that modifies the resource and by a call to {@link + * BlobURL#getProperties(BlobAccessConditions, com.microsoft.rest.v2.Context)}. Defaults to null. + */ + public HTTPGetterInfo withETag(String eTag) { + this.eTag = eTag; + return this; + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/ICredentials.java b/storage/client/src/main/java/com/azure/storage/blob/ICredentials.java new file mode 100644 index 0000000000000..c47470fa7d93e --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/ICredentials.java @@ -0,0 +1,14 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.http.policy.HttpPipelinePolicy; + +/** + * Credentials represent any credential type + * it is used to create a credential policy Factory. + */ +public interface ICredentials extends HttpPipelinePolicy { + +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/IPRange.java b/storage/client/src/main/java/com/azure/storage/blob/IPRange.java new file mode 100644 index 0000000000000..02a7c6ca19336 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/IPRange.java @@ -0,0 +1,87 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +/** + * This type specifies a continuous range of IP addresses. It is used to limit permissions on SAS tokens. Null may be + * set if it is not desired to confine the sas permissions to an IP range. Please refer to + * {@link AccountSASSignatureValues} or {@link ServiceSASSignatureValues} for more information. + */ +final class IPRange { + + private String ipMin; + + private String ipMax; + + public IPRange() { + } + + /** + * Creates a {@code IPRange} from the specified string. + * + * @param rangeStr + * The {@code String} representation of the {@code IPRange}. + * + * @return The {@code IPRange} generated from the {@code String}. + */ + public static IPRange parse(String rangeStr) { + String[] addrs = rangeStr.split("-"); + IPRange range = new IPRange(); + range.ipMin = addrs[0]; + if (addrs.length > 1) { + range.ipMax = addrs[1]; + } + return range; + } + + /** + * The minimum IP address of the range. + */ + public String ipMin() { + return ipMin; + } + + /** + * The minimum IP address of the range. + */ + public IPRange withIpMin(String ipMin) { + this.ipMin = ipMin; + return this; + } + + /** + * The maximum IP address of the range. + */ + public String ipMax() { + return ipMax; + } + + /** + * The maximum IP address of the range. + */ + public IPRange withIpMax(String ipMax) { + this.ipMax = ipMax; + return this; + } + + /** + * Output the single IP address or range of IP addresses for. + * + * @return The single IP address or range of IP addresses formatted as a {@code String}. + */ + @Override + public String toString() { + if (this.ipMin == null) { + return ""; + } + this.ipMax = this.ipMax == null ? this.ipMin : this.ipMax; + StringBuilder str = new StringBuilder(this.ipMin); + if (!this.ipMin.equals(this.ipMax)) { + str.append('-'); + str.append(this.ipMax); + } + + return str.toString(); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/IProgressReceiver.java b/storage/client/src/main/java/com/azure/storage/blob/IProgressReceiver.java new file mode 100644 index 0000000000000..ab336e1862892 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/IProgressReceiver.java @@ -0,0 +1,22 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +/** + * An {@code IProgressReceiver} is an object that can be used to report progress on network transfers. When specified on + * transfer operations, the {@code reportProgress} method will be called periodically with the total number of bytes + * transferred. The user may configure this method to report progress in whatever format desired. It is recommended + * that this type be used in conjunction with + * {@link ProgressReporter#addProgressReporting(reactor.core.publisher.Flux, IProgressReceiver)}. + */ +interface IProgressReceiver { + + /** + * The callback function invoked as progress is reported. + * + * @param bytesTransferred + * The total number of bytes transferred during this transaction. + */ + void reportProgress(long bytesTransferred); +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/ListBlobsOptions.java b/storage/client/src/main/java/com/azure/storage/blob/ListBlobsOptions.java new file mode 100644 index 0000000000000..f493c8de5f75f --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/ListBlobsOptions.java @@ -0,0 +1,75 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +/** + * Defines options available to configure the behavior of a call to listBlobsFlatSegment on a {@link ContainerURL} + * object. See the constructor for details on each of the options. + */ +public final class ListBlobsOptions { + + private BlobListDetails details; + + private String prefix; + + private Integer maxResults; + + public ListBlobsOptions() { + this.details = new BlobListDetails(); + } + + /** + * {@link BlobListDetails} + */ + public BlobListDetails details() { + return details; + } + + /** + * {@link BlobListDetails} + */ + public ListBlobsOptions withDetails(BlobListDetails details) { + this.details = details; + return this; + } + + /** + * Filters the results to return only blobs whose names begin with the specified prefix. May be null to return + * all blobs. + */ + public String prefix() { + return prefix; + } + + /** + * Filters the results to return only blobs whose names begin with the specified prefix. May be null to return + * all blobs. + */ + public ListBlobsOptions withPrefix(String prefix) { + this.prefix = prefix; + return this; + } + + /** + * Specifies the maximum number of blobs to return, including all BlobPrefix elements. If the request does not + * specify maxResults or specifies a value greater than 5,000, the server will return up to 5,000 items. + */ + public Integer maxResults() { + return maxResults; + } + + /** + * Specifies the maximum number of blobs to return, including all BlobPrefix elements. If the request does not + * specify maxResults or specifies a value greater than 5,000, the server will return up to 5,000 items. + */ + public ListBlobsOptions withMaxResults(Integer maxResults) { + if (maxResults != null && maxResults <= 0) { + throw new IllegalArgumentException("MaxResults must be greater than 0."); + } + this.maxResults = maxResults; + return this; + } + + +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/ListContainersOptions.java b/storage/client/src/main/java/com/azure/storage/blob/ListContainersOptions.java new file mode 100644 index 0000000000000..7cd6d773fcf19 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/ListContainersOptions.java @@ -0,0 +1,72 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +/** + * Defines options available to configure the behavior of a call to listContainersSegment on a {@link ServiceURL} + * object. See the constructor for details on each of the options. Null may be passed in place of an object of this + * type if no options are desirable. + */ +public final class ListContainersOptions { + + private ContainerListDetails details; + + private String prefix; + + private Integer maxResults; + + public ListContainersOptions() { + this.details = new ContainerListDetails(); + } + + /** + * {@link ContainerListDetails} + */ + public ContainerListDetails details() { + return details; + } + + /** + * {@link ContainerListDetails} + */ + public ListContainersOptions withDetails(ContainerListDetails details) { + this.details = details; + return this; + } + + /** + * Filters the results to return only blobs whose names begin with the specified prefix. * + */ + public String prefix() { + return prefix; + } + + /** + * Filters the results to return only blobs whose names begin with the specified prefix. * + */ + public ListContainersOptions withPrefix(String prefix) { + this.prefix = prefix; + return this; + } + + /** + * Specifies the maximum number of blobs to return, including all BlobPrefix elements. If the request does not + * specify maxResults or specifies a value greater than 5,000, the server will return up to 5,000 items. + */ + public Integer maxResults() { + return maxResults; + } + + /** + * Specifies the maximum number of blobs to return, including all BlobPrefix elements. If the request does not + * specify maxResults or specifies a value greater than 5,000, the server will return up to 5,000 items. + */ + public ListContainersOptions withMaxResults(Integer maxResults) { + if (maxResults != null && maxResults <= 0) { + throw new IllegalArgumentException("MaxResults must be greater than 0."); + } + this.maxResults = maxResults; + return this; + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/LoggingFactory.java b/storage/client/src/main/java/com/azure/storage/blob/LoggingFactory.java new file mode 100644 index 0000000000000..7d6732eccc6e3 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/LoggingFactory.java @@ -0,0 +1,357 @@ +//// Copyright (c) Microsoft Corporation. All rights reserved. +//// Licensed under the MIT License. +// +//package com.azure.storage.blob; +// +//import com.microsoft.rest.v2.http.HttpPipeline; +//import com.microsoft.rest.v2.http.HttpPipelineLogLevel; +//import com.microsoft.rest.v2.http.HttpRequest; +//import com.microsoft.rest.v2.http.HttpResponse; +//import com.microsoft.rest.v2.policy.RequestPolicy; +//import com.microsoft.rest.v2.policy.RequestPolicyFactory; +//import com.microsoft.rest.v2.policy.RequestPolicyOptions; +//import io.reactivex.Single; +//import org.slf4j.LoggerFactory; +// +//import java.io.File; +//import java.net.HttpURLConnection; +//import java.net.MalformedURLException; +//import java.net.URL; +//import java.net.UnknownHostException; +//import java.util.HashMap; +//import java.util.Locale; +//import java.util.Map; +//import java.util.logging.FileHandler; +//import java.util.logging.Level; +//import java.util.logging.Logger; +// +///** +// * This is a factory which creates policies in an {@link HttpPipeline} for logging requests and responses. In most +// * cases, it is sufficient to configure an object of the {@link LoggingOptions} type and set those as a field on a +// * {@link PipelineOptions} structure to configure a default pipeline. The factory and policy must only be used directly +// * when creating a custom pipeline. +// */ +//public final class LoggingFactory implements RequestPolicyFactory { +// +// private static final Logger FORCE_LOGGER = Logger.getLogger(LoggingFactory.class.getName()); +// private static final org.slf4j.Logger SLF4J_LOGGER = LoggerFactory.getLogger(LoggingFactory.class.getName()); +// private static final Map JAVA_LOG_LEVEL_MAP = new HashMap<>(); +// private static boolean defaultLoggerLoaded; +// +// static { +// try { +// FORCE_LOGGER.setLevel(Level.WARNING); +// +// // Create the logs directory if it doesn't exist. +// File logDir = new File(System.getProperty("java.io.tmpdir"), "AzureStorageJavaSDKLogs"); +// if (!logDir.exists()) { +// if (!logDir.mkdir()) { +// throw new Exception("Could not create logs directory"); +// } +// } +// +// /* +// "/" the local pathname separator +// "%t" the system temporary directory +// "%h" the value of the "user.home" system property +// "%g" the generation number to distinguish rotated logs +// "%u" a unique number to resolve conflicts +// "%%" translates to a single percent sign "%" +// +// 10MB files, 5 files +// +// true- append mode +// */ +// FileHandler handler = new FileHandler("%t/AzureStorageJavaSDKLogs/%u%g", 10 * Constants.MB, 5, false); +// handler.setLevel(Level.WARNING); +// FORCE_LOGGER.addHandler(handler); +// +// JAVA_LOG_LEVEL_MAP.put(HttpPipelineLogLevel.ERROR, Level.SEVERE); +// JAVA_LOG_LEVEL_MAP.put(HttpPipelineLogLevel.WARNING, Level.WARNING); +// JAVA_LOG_LEVEL_MAP.put(HttpPipelineLogLevel.INFO, Level.INFO); +// defaultLoggerLoaded = true; +// +// /* +// If we can't setup default logging, there's nothing we can do. We shouldn't interfere with the rest of logging. +// */ +// } catch (Exception e) { +// defaultLoggerLoaded = false; +// System.err.println("Azure Storage default logging could not be configured due to the following exception: " +// + e); +// } +// } +// +// private final LoggingOptions loggingOptions; +// +// /** +// * Creates a factory which can create LoggingPolicy objects to insert in the pipeline. This will allow for logging +// * requests and responses. +// * +// * @param loggingOptions +// * The configurations for this factory. Null will indicate use of the default options. +// */ +// public LoggingFactory(LoggingOptions loggingOptions) { +// this.loggingOptions = loggingOptions == null ? new LoggingOptions() : loggingOptions; +// } +// +// @Override +// public RequestPolicy create(RequestPolicy next, RequestPolicyOptions options) { +// return new LoggingPolicy(this, next, options); +// } +// +// private static final class LoggingPolicy implements RequestPolicy { +// +// private final LoggingFactory factory; +// +// private final RequestPolicy nextPolicy; +// +// private final RequestPolicyOptions options; +// +// // The following fields are not final because they are updated by the policy. +// private int tryCount; +// +// private long operationStartTime; +// +// private long requestStartTime; +// +// /** +// * Creates a policy which configures the logging behavior within the +// * {@link com.microsoft.rest.v2.http.HttpPipeline}. +// * +// * @param nextPolicy +// * {@link RequestPolicy} +// * @param options +// * {@link RequestPolicyOptions} +// * @param factory +// * {@link LoggingFactory} +// */ +// private LoggingPolicy(LoggingFactory factory, RequestPolicy nextPolicy, RequestPolicyOptions options) { +// this.factory = factory; +// this.nextPolicy = nextPolicy; +// this.options = options; +// } +// +// /** +// * Logs as appropriate. +// * +// * @param request +// * The request to log. +// * +// * @return A {@link Single} representing the {@link HttpResponse} that will arrive asynchronously. +// */ +// @Override +// public Single sendAsync(final HttpRequest request) { +// this.tryCount++; +// this.requestStartTime = System.currentTimeMillis(); +// if (this.tryCount == 1) { +// this.operationStartTime = requestStartTime; +// } +// +// if (this.shouldLog(HttpPipelineLogLevel.INFO)) { +// String logMessage = String.format("'%s'==> OUTGOING REQUEST (Try number='%d')%n", +// sanitizeURL(request.url()), this.tryCount); +// this.log(HttpPipelineLogLevel.INFO, logMessage); +// } +// +// return nextPolicy.sendAsync(request) +// .doOnError(throwable -> { +// if (this.shouldLog(HttpPipelineLogLevel.ERROR)) { +// String logMessage = String.format( +// "Unexpected failure attempting to make request.%nError message:'%s'%n", +// throwable.getMessage()); +// this.log(HttpPipelineLogLevel.ERROR, logMessage); +// } +// }) +// .doOnSuccess(response -> { +// long requestEndTime = System.currentTimeMillis(); +// long requestCompletionTime = requestEndTime - requestStartTime; +// long operationDuration = requestEndTime - operationStartTime; +// HttpPipelineLogLevel currentLevel = HttpPipelineLogLevel.INFO; +// +// String logMessage = Constants.EMPTY_STRING; +// if (this.shouldLog(HttpPipelineLogLevel.INFO)) { +// // Assume success and default to informational logging. +// logMessage = "Successfully Received Response" + System.lineSeparator(); +// } +// +// // If the response took too long, we'll upgrade to warning. +// if (requestCompletionTime +// >= factory.loggingOptions.minDurationToLogSlowRequestsInMs()) { +// // Log a warning if the try duration exceeded the specified threshold. +// if (this.shouldLog(HttpPipelineLogLevel.WARNING)) { +// currentLevel = HttpPipelineLogLevel.WARNING; +// logMessage = String.format(Locale.ROOT, +// "SLOW OPERATION. Duration > %d ms.%n", +// factory.loggingOptions.minDurationToLogSlowRequestsInMs()); +// } +// } +// +// if (((response.statusCode() >= 400 && response.statusCode() <= 499) +// && (response.statusCode() != HttpURLConnection.HTTP_NOT_FOUND +// && response.statusCode() != HttpURLConnection.HTTP_CONFLICT +// && response.statusCode() != HttpURLConnection.HTTP_PRECON_FAILED +// && response.statusCode() != 416)) +// /* 416 is missing from the Enum but it is Range Not Satisfiable */ +// || (response.statusCode() >= 500 && response.statusCode() <= 509)) { +// String errorString = String.format(Locale.ROOT, +// "REQUEST ERROR%nHTTP request failed with status code:'%d'%n", +// response.statusCode()); +// if (currentLevel == HttpPipelineLogLevel.WARNING) { +// logMessage += errorString; +// } else { +// logMessage = errorString; +// } +// +// currentLevel = HttpPipelineLogLevel.ERROR; +// } +// +// /* +// We don't want to format the log message unless we have to. Format once we've determined that +// either the customer wants this log level or we need to force log it. +// */ +// if (this.shouldLog(currentLevel)) { +// String additionalMessageInfo = buildAdditionalMessageInfo(request); +// String messageInfo = String.format(Locale.ROOT, +// "Request try:'%d', request duration:'%d' ms, operation duration:'%d' ms%n%s", +// tryCount, requestCompletionTime, operationDuration, additionalMessageInfo); +// this.log(currentLevel, logMessage + messageInfo); +// } +// }); +// } +// +// private String buildAdditionalMessageInfo(final HttpRequest httpRequest) { +// HttpRequest sanitizedRequest = buildSanitizedRequest(httpRequest); +// StringBuilder stringBuilder = new StringBuilder(); +// String format = "%s: %s" + System.lineSeparator(); +// stringBuilder.append(String.format(format, sanitizedRequest.httpMethod().toString(), +// sanitizedRequest.url().toString())); +// sanitizedRequest.headers().forEach((header) -> stringBuilder.append(String.format(format, header.name(), +// header.value()))); +// return stringBuilder.toString(); +// } +// +// private HttpRequest buildSanitizedRequest(final HttpRequest initialRequest) { +// // Build new URL and redact SAS signature, if present +// URL url = sanitizeURL(initialRequest.url()); +// +// // Build resultRequest +// HttpRequest resultRequest = new HttpRequest( +// initialRequest.callerMethod(), +// initialRequest.httpMethod(), +// url, +// initialRequest.headers(), +// initialRequest.body(), +// initialRequest.responseDecoder()); +// +// // Redact Authorization header, if present +// if (resultRequest.headers().value(Constants.HeaderConstants.AUTHORIZATION) != null) { +// resultRequest.headers().set(Constants.HeaderConstants.AUTHORIZATION, Constants.REDACTED); +// } +// +// // Redact Copy Source header SAS signature, if present +// if (resultRequest.headers().value(Constants.HeaderConstants.COPY_SOURCE) != null) { +// try { +// URL copySourceUrl = sanitizeURL(new URL(resultRequest.headers() +// .value(Constants.HeaderConstants.COPY_SOURCE))); +// resultRequest.headers().set(Constants.HeaderConstants.COPY_SOURCE, copySourceUrl.toString()); +// } catch (MalformedURLException e) { +// throw new RuntimeException(e); +// } +// } +// +// return resultRequest; +// } +// +// private URL sanitizeURL(URL initialURL) { +// URL resultURL = initialURL; +// try { +// BlobURLParts urlParts = URLParser.parse(initialURL); +// if (urlParts.sasQueryParameters() == null || urlParts.sasQueryParameters().signature() == null) { +// return resultURL; +// } +// urlParts.withSasQueryParameters(new SASQueryParameters( +// urlParts.sasQueryParameters().version(), +// urlParts.sasQueryParameters().services(), +// urlParts.sasQueryParameters().resourceTypes(), +// urlParts.sasQueryParameters().protocol(), +// urlParts.sasQueryParameters().startTime(), +// urlParts.sasQueryParameters().expiryTime(), +// urlParts.sasQueryParameters().ipRange(), +// urlParts.sasQueryParameters().identifier(), +// urlParts.sasQueryParameters().resource(), +// urlParts.sasQueryParameters().permissions(), +// Constants.REDACTED, +// urlParts.sasQueryParameters().cacheControl(), +// urlParts.sasQueryParameters().contentDisposition(), +// urlParts.sasQueryParameters().contentEncoding(), +// urlParts.sasQueryParameters().contentLanguage(), +// urlParts.sasQueryParameters().contentType(), +// urlParts.sasQueryParameters().userDelegationKey() +// )); +// resultURL = urlParts.toURL(); +// +// /* +// We are only making valid changes to what has already been validated as a URL (since we got it from a +// URL object), so there should be no need for either us or the caller to check this error. +// */ +// } catch (UnknownHostException | MalformedURLException e) { +// throw new RuntimeException(e); +// } +// return resultURL; +// } +// +// /* +// We need to support the HttpPipelineLogger as it already exists. We also want to allow users to hook up SLF4J. +// Finally, we need to do our own default logging. +// */ +// private void log(HttpPipelineLogLevel level, String message) { +// /* +// We need to explicitly check before we send it to the HttpPipelineLogger as its log function may only +// expect to receive messages for which shouldLog() returns true. +// */ +// if (this.options.shouldLog(level)) { +// this.options.log(level, message); +// } +// +// /* +// The Java logger and slf4j logger should do the correct thing given any log level. FORCE_LOGGER is +// configured to only log warnings and errors. +// */ +// if (!this.factory.loggingOptions.disableDefaultLogging() && LoggingFactory.defaultLoggerLoaded) { +// FORCE_LOGGER.log(JAVA_LOG_LEVEL_MAP.get(level), message); +// } +// if (level.equals(HttpPipelineLogLevel.ERROR)) { +// SLF4J_LOGGER.error(message); +// } else if (level.equals(HttpPipelineLogLevel.WARNING)) { +// SLF4J_LOGGER.warn(message); +// } else if (level.equals(HttpPipelineLogLevel.INFO)) { +// SLF4J_LOGGER.info(message); +// } +// } +// +// /* +// Check the HttpPipelineLogger, SLF4J Logger, and Java Logger +// */ +// private boolean shouldLog(HttpPipelineLogLevel level) { +// // Default log Warnings and Errors as long as default logging is enabled. +// if ((level.equals(HttpPipelineLogLevel.WARNING) || level.equals(HttpPipelineLogLevel.ERROR)) +// && !this.factory.loggingOptions.disableDefaultLogging() && LoggingFactory.defaultLoggerLoaded) { +// return true; +// } +// +// // The user has configured the HttpPipelineLogger to log at this level. +// if (this.options.shouldLog(level)) { +// return true; +// } +// +// // The SLF4J logger is configured at the given level. +// if ((level.equals(HttpPipelineLogLevel.INFO) && SLF4J_LOGGER.isInfoEnabled()) +// || (level.equals(HttpPipelineLogLevel.WARNING) && SLF4J_LOGGER.isWarnEnabled()) +// || (level.equals(HttpPipelineLogLevel.ERROR) && SLF4J_LOGGER.isErrorEnabled())) { +// return true; +// } +// +// return false; +// } +// } +//} diff --git a/storage/client/src/main/java/com/azure/storage/blob/LoggingOptions.java b/storage/client/src/main/java/com/azure/storage/blob/LoggingOptions.java new file mode 100644 index 0000000000000..50e0aa1dd51ff --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/LoggingOptions.java @@ -0,0 +1,69 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +/** + * Options to configure the {@link LoggingPolicy}. Please refer to the Factory for more information. + */ +final class LoggingOptions { + + /** + * Default logging options. {@code MinDurationToLogSlowRequestsInMs} is set to 3000; + */ + public static final long defaultMinDurationToLogSlowRequests = 3000; + + private final long minDurationToLogSlowRequestsInMs; + + private final boolean disableDefaultLogging; + + public LoggingOptions() { + this(defaultMinDurationToLogSlowRequests); + } + + /** + * Creates a new {@link LoggingOptions} object. + * + * @param minDurationToLogSlowRequestsInMs + * The duration after which a tried operation will be logged as a warning. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=pipeline_options "Sample code for LoggingOptions constructor")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public LoggingOptions(long minDurationToLogSlowRequestsInMs) { + this(minDurationToLogSlowRequestsInMs, false); + } + + /** + * Creates a new {@link LoggingOptions} object. + * + * @param minDurationToLogSlowRequestsInMs + * The duration after which a tried operation will be logged as a warning. + * @param disableDefaultLogging + * By default, this library will automatically log warnings and errors to some files in the system's temp + * directory. The size of these files is bounded to a few dozen MB and should not impose a burden on the + * system. It is strongly recommended to leave these logs enabled for customer support reasons, but if + * the user desires a different logging story and enables logging via the HttpPipelineLogger or SLF4J, then + * it should be safe to disable default logging. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=pipeline_options "Sample code for LoggingOptions constructor")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public LoggingOptions(long minDurationToLogSlowRequestsInMs, boolean disableDefaultLogging) { + this.minDurationToLogSlowRequestsInMs = minDurationToLogSlowRequestsInMs; + this.disableDefaultLogging = disableDefaultLogging; + } + + /** + * @return The duration after which a tried operation will be logged as a warning. + */ + public long minDurationToLogSlowRequestsInMs() { + return minDurationToLogSlowRequestsInMs; + } + + public boolean disableDefaultLogging() { + return disableDefaultLogging; + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/Metadata.java b/storage/client/src/main/java/com/azure/storage/blob/Metadata.java new file mode 100644 index 0000000000000..be84b52046759 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/Metadata.java @@ -0,0 +1,26 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import java.util.HashMap; +import java.util.Map; + +/** + * Contains metadata key/value pairs to be associated with a storage resource. The user may store any additional + * information about the resource that they like using this map. It is passed to create and setMetadata methods on any + * URL type. Null may be passed to set no metadata. + */ +public final class Metadata extends HashMap { + + // The Metadata is an offshoot of extending HashMap, which implements Serializable. + private static final long serialVersionUID = -6557244540575247796L; + + public Metadata() { + super(); + } + + public Metadata(Map m) { + super(m); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/PageBlobAccessConditions.java b/storage/client/src/main/java/com/azure/storage/blob/PageBlobAccessConditions.java new file mode 100644 index 0000000000000..b1e0e5a82fa33 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/PageBlobAccessConditions.java @@ -0,0 +1,85 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.storage.blob.models.LeaseAccessConditions; +import com.azure.storage.blob.models.ModifiedAccessConditions; +import com.azure.storage.blob.models.SequenceNumberAccessConditions; + +/** + * This class contains values that restrict the successful completion of PageBlob operations to certain conditions. + * It may be set to null if no access conditions are desired. + *

+ * Please refer to the request header section + * here for more conceptual information. + */ +public final class PageBlobAccessConditions { + + private SequenceNumberAccessConditions sequenceNumberAccessConditions; + + private ModifiedAccessConditions modifiedAccessConditions; + + private LeaseAccessConditions leaseAccessConditions; + + /** + * Creates an instance which has fields set to non-null, empty values. + */ + public PageBlobAccessConditions() { + this.sequenceNumberAccessConditions = new SequenceNumberAccessConditions(); + this.modifiedAccessConditions = new ModifiedAccessConditions(); + this.leaseAccessConditions = new LeaseAccessConditions(); + } + + /** + * Access conditions that will fail the request if the sequence number does not meet the provided condition. + */ + public SequenceNumberAccessConditions sequenceNumberAccessConditions() { + return sequenceNumberAccessConditions; + } + + /** + * Access conditions that will fail the request if the sequence number does not meet the provided condition. + */ + public PageBlobAccessConditions withSequenceNumberAccessConditions( + SequenceNumberAccessConditions sequenceNumberAccessConditions) { + this.sequenceNumberAccessConditions = sequenceNumberAccessConditions; + return this; + } + + /** + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used to + * construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + */ + public ModifiedAccessConditions modifiedAccessConditions() { + return modifiedAccessConditions; + } + + /** + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used to + * construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + */ + public PageBlobAccessConditions withModifiedAccessConditions(ModifiedAccessConditions modifiedAccessConditions) { + this.modifiedAccessConditions = modifiedAccessConditions; + return this; + } + + /** + * By setting lease access conditions, requests will fail if the provided lease does not match the active lease on + * the blob. + */ + public LeaseAccessConditions leaseAccessConditions() { + return leaseAccessConditions; + } + + /** + * By setting lease access conditions, requests will fail if the provided lease does not match the active lease on + * the blob. + */ + public PageBlobAccessConditions withLeaseAccessConditions(LeaseAccessConditions leaseAccessConditions) { + this.leaseAccessConditions = leaseAccessConditions; + return this; + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/PageBlobAsyncClient.java b/storage/client/src/main/java/com/azure/storage/blob/PageBlobAsyncClient.java new file mode 100644 index 0000000000000..99f93833a0b23 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/PageBlobAsyncClient.java @@ -0,0 +1,529 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.http.HttpPipeline; +import com.azure.core.http.rest.ResponseBase; +import com.azure.core.util.Context; +import com.azure.storage.blob.implementation.AzureBlobStorageImpl; +import com.azure.storage.blob.models.BlobHTTPHeaders; +import com.azure.storage.blob.models.CopyStatusType; +import com.azure.storage.blob.models.ModifiedAccessConditions; +import com.azure.storage.blob.models.PageBlobClearPagesHeaders; +import com.azure.storage.blob.models.PageBlobCreateHeaders; +import com.azure.storage.blob.models.PageBlobResizeHeaders; +import com.azure.storage.blob.models.PageBlobUpdateSequenceNumberHeaders; +import com.azure.storage.blob.models.PageBlobUploadPagesFromURLHeaders; +import com.azure.storage.blob.models.PageBlobUploadPagesHeaders; +import com.azure.storage.blob.models.PageRange; +import com.azure.storage.blob.models.SequenceNumberActionType; +import com.azure.storage.blob.models.SourceModifiedAccessConditions; +import io.netty.buffer.ByteBuf; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.net.URL; + +/** + * Client to a page blob. It may only be instantiated through a {@link PageBlobClientBuilder}, via + * the method {@link BlobAsyncClient#asPageBlobAsyncClient()}, or via the method + * {@link ContainerAsyncClient#getPageBlobAsyncClient(String)}. This class does not hold + * any state about a particular blob, but is instead a convenient way of sending appropriate + * requests to the resource on the service. + * + *

+ * This client contains operations on a blob. Operations on a container are available on {@link ContainerAsyncClient}, + * and operations on the service are available on {@link StorageAsyncClient}. + * + *

+ * Please refer + * to the Azure Docs + * for more information. + * + *

+ * Note this client is an async client that returns reactive responses from Spring Reactor Core + * project (https://projectreactor.io/). Calling the methods in this client will NOT + * start the actual network operation, until {@code .subscribe()} is called on the reactive response. + * You can simply convert one of these responses to a {@link java.util.concurrent.CompletableFuture} + * object through {@link Mono#toFuture()}. + */ +public final class PageBlobAsyncClient extends BlobAsyncClient { + + private PageBlobAsyncRawClient pageBlobAsyncRawClient; + + /** + * Indicates the number of bytes in a page. + */ + public static final int PAGE_BYTES = 512; + + /** + * Indicates the maximum number of bytes that may be sent in a call to putPage. + */ + public static final int MAX_PUT_PAGES_BYTES = 4 * Constants.MB; + + /** + * Package-private constructor for use by {@link PageBlobClientBuilder}. + * @param azureBlobStorage the API client for blob storage API + */ + PageBlobAsyncClient(AzureBlobStorageImpl azureBlobStorage) { + super(azureBlobStorage); + this.pageBlobAsyncRawClient = new PageBlobAsyncRawClient(azureBlobStorage); + } + + /** + * @return a new client {@link PageBlobClientBuilder} instance. + */ + public static PageBlobClientBuilder builder() { + return new PageBlobClientBuilder(); + } + + + /** + * Creates a page blob of the specified length. Call PutPage to upload data data to a page blob. + * For more information, see the + * Azure Docs. + * + * @param size + * Specifies the maximum size for the page blob, up to 8 TB. The page blob size must be aligned to a + * 512-byte boundary. + * + * @return + * A reactive response containing the information of the created page blob. + */ + public Mono create(long size) { + return this.create(size, null, null, null, null, null); + } + + /** + * Creates a page blob of the specified length. Call PutPage to upload data data to a page blob. + * For more information, see the + * Azure Docs. + * + * @param size + * Specifies the maximum size for the page blob, up to 8 TB. The page blob size must be aligned to a + * 512-byte boundary. + * @param sequenceNumber + * A user-controlled value that you can use to track requests. The value of the sequence number must be + * between 0 and 2^63 - 1.The default value is 0. + * @param headers + * {@link BlobHTTPHeaders} + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response containing the information of the created page blob. + */ + public Mono create(long size, Long sequenceNumber, BlobHTTPHeaders headers, + Metadata metadata, BlobAccessConditions accessConditions, Context context) { + return pageBlobAsyncRawClient + .create(size, sequenceNumber, headers, metadata, accessConditions, context) + .map(ResponseBase::deserializedHeaders); + } + + /** + * Writes 1 or more pages to the page blob. The start and end offsets must be a multiple of 512. + * For more information, see the + * Azure Docs. + *

+ * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flux} must produce the same data each time it is subscribed to. + * + * @param pageRange + * A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start offset must + * be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges are + * 0-511, 512-1023, etc. + * @param body + * The data to upload. Note that this {@code Flux} must be replayable if retries are enabled + * (the default). In other words, the Flowable must produce the same data each time it is subscribed to. + * + * @return + * A reactive response containing the information of the uploaded pages. + */ + public Mono uploadPages(PageRange pageRange, Flux body) { + return this.uploadPages(pageRange, body, null, null); + } + + /** + * Writes 1 or more pages to the page blob. The start and end offsets must be a multiple of 512. + * For more information, see the + * Azure Docs. + *

+ * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flux} must produce the same data each time it is subscribed to. + * + * @param pageRange + * A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start offset + * must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges + * are 0-511, 512-1023, etc. + * @param body + * The data to upload. Note that this {@code Flux} must be replayable if retries are enabled + * (the default). In other words, the Flowable must produce the same data each time it is subscribed to. + * @param pageBlobAccessConditions + * {@link PageBlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response containing the information of the uploaded pages. + */ + public Mono uploadPages(PageRange pageRange, Flux body, + PageBlobAccessConditions pageBlobAccessConditions, Context context) { + return pageBlobAsyncRawClient + .uploadPages(pageRange, body, pageBlobAccessConditions, context) + .map(ResponseBase::deserializedHeaders); + } + + /** + * Writes 1 or more pages from the source page blob to this page blob. The start and end offsets must be a multiple + * of 512. + * For more information, see the + * Azure Docs. + *

+ * + * @param range + * A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start offset + * must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges + * are 0-511, 512-1023, etc. + * @param sourceURL + * The url to the blob that will be the source of the copy. A source blob in the same storage account can be + * authenticated via Shared Key. However, if the source is a blob in another account, the source blob must + * either be public or must be authenticated via a shared access signature. If the source blob is public, no + * authentication is required to perform the operation. + * @param sourceOffset + * The source offset to copy from. Pass null or 0 to copy from the beginning of source page blob. + * + * @return + * A reactive response containing the information of the uploaded pages. + */ + public Mono uploadPagesFromURL(PageRange range, URL sourceURL, Long sourceOffset) { + return this.uploadPagesFromURL(range, sourceURL, sourceOffset, null, null, + null, null); + } + + /** + * Writes 1 or more pages from the source page blob to this page blob. The start and end offsets must be a multiple + * of 512. + * For more information, see the + * Azure Docs. + *

+ * + * @param range + * The destination {@link PageRange} range. Given that pages must be aligned with 512-byte boundaries, the start offset + * must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges + * are 0-511, 512-1023, etc. + * @param sourceURL + * The url to the blob that will be the source of the copy. A source blob in the same storage account can be + * authenticated via Shared Key. However, if the source is a blob in another account, the source blob must + * either be public or must be authenticated via a shared access signature. If the source blob is public, no + * authentication is required to perform the operation. + * @param sourceOffset + * The source offset to copy from. Pass null or 0 to copy from the beginning of source blob. + * @param sourceContentMD5 + * An MD5 hash of the block content from the source blob. If specified, the service will calculate the MD5 + * of the received data and fail the request if it does not match the provided MD5. + * @param destAccessConditions + * {@link PageBlobAccessConditions} + * @param sourceAccessConditions + * {@link SourceModifiedAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response containing the information of the uploaded pages. + */ + public Mono uploadPagesFromURL(PageRange range, URL sourceURL, Long sourceOffset, + byte[] sourceContentMD5, PageBlobAccessConditions destAccessConditions, + SourceModifiedAccessConditions sourceAccessConditions, Context context) { + + return pageBlobAsyncRawClient + .uploadPagesFromURL(range, sourceURL, sourceOffset, sourceContentMD5, destAccessConditions, sourceAccessConditions, context) + .map(ResponseBase::deserializedHeaders); + } + + /** + * Frees the specified pages from the page blob. + * For more information, see the + * Azure Docs. + * + * @param pageRange + * A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start offset + * must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges + * are 0-511, 512-1023, etc. + * + * @return + * A reactive response containing the information of the cleared pages. + */ + public Mono clearPages(PageRange pageRange) { + return this.clearPages(pageRange, null, null); + } + + /** + * Frees the specified pages from the page blob. + * For more information, see the + * Azure Docs. + * + * @param pageRange + * A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start offset + * must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges + * are 0-511, 512-1023, etc. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * @param pageBlobAccessConditions + * {@link PageBlobAccessConditions} + * + * @return + * A reactive response containing the information of the cleared pages. + */ + public Mono clearPages(PageRange pageRange, + PageBlobAccessConditions pageBlobAccessConditions, Context context) { + return pageBlobAsyncRawClient + .clearPages(pageRange, pageBlobAccessConditions, context) + .map(ResponseBase::deserializedHeaders); + } + + /** + * Returns the list of valid page ranges for a page blob or snapshot of a page blob. + * For more information, see the Azure Docs. + * + * @param blobRange + * {@link BlobRange} + * + * @return + * A reactive response containing the information of the cleared pages. + */ + public Flux getPageRanges(BlobRange blobRange) { + return this.getPageRanges(blobRange, null, null); + } + + /** + * Returns the list of valid page ranges for a page blob or snapshot of a page blob. + * For more information, see the Azure Docs. + * + * @param blobRange + * {@link BlobRange} + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response emitting all the page ranges. + */ + public Flux getPageRanges(BlobRange blobRange, + BlobAccessConditions accessConditions, Context context) { + return pageBlobAsyncRawClient + .getPageRanges(blobRange, accessConditions, context) + .flatMapMany(response -> Flux.fromIterable(response.value().pageRange())); + } + + /** + * Gets the collection of page ranges that differ between a specified snapshot and this page blob. + * For more information, see the Azure Docs. + * + * @param blobRange + * {@link BlobRange} + * @param prevSnapshot + * Specifies that the response will contain only pages that were changed between target blob and previous + * snapshot. Changed pages include both updated and cleared pages. The target + * blob may be a snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. + * + * @return + * A reactive response emitting all the different page ranges. + */ + public Flux getPageRangesDiff(BlobRange blobRange, String prevSnapshot) { + return this.getPageRangesDiff(blobRange, prevSnapshot, null, null); + } + + /** + * Gets the collection of page ranges that differ between a specified snapshot and this page blob. + * For more information, see the Azure Docs. + * + * @param blobRange + * {@link BlobRange} + * @param prevSnapshot + * Specifies that the response will contain only pages that were changed between target blob and previous + * snapshot. Changed pages include both updated and cleared pages. The target + * blob may be a snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response emitting all the different page ranges. + */ + public Flux getPageRangesDiff(BlobRange blobRange, String prevSnapshot, + BlobAccessConditions accessConditions, Context context) { + return pageBlobAsyncRawClient + .getPageRangesDiff(blobRange, prevSnapshot, accessConditions, context) + .flatMapMany(response -> Flux.fromIterable(response.value().pageRange())); + } + + /** + * Resizes the page blob to the specified size (which must be a multiple of 512). + * For more information, see the Azure Docs. + * + * @param size + * Resizes a page blob to the specified size. If the specified value is less than the current size of the + * blob, then all pages above the specified value are cleared. + * + * @return + * A reactive response emitting the resized page blob. + */ + public Mono resize(long size) { + return this.resize(size, null, null); + } + + /** + * Resizes the page blob to the specified size (which must be a multiple of 512). + * For more information, see the Azure Docs. + * + * @param size + * Resizes a page blob to the specified size. If the specified value is less than the current size of the + * blob, then all pages above the specified value are cleared. + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response emitting the resized page blob. + */ + public Mono resize(long size, BlobAccessConditions accessConditions, Context context) { + return pageBlobAsyncRawClient + .resize(size, accessConditions, context) + .map(ResponseBase::deserializedHeaders); + } + + /** + * Sets the page blob's sequence number. + * For more information, see the Azure Docs. + * + * @param action + * Indicates how the service should modify the blob's sequence number. + * @param sequenceNumber + * The blob's sequence number. The sequence number is a user-controlled property that you can use to track + * requests and manage concurrency issues. + * + * @return + * A reactive response emitting the updated page blob. + */ + public Mono updateSequenceNumber(SequenceNumberActionType action, + Long sequenceNumber) { + return this.updateSequenceNumber(action, sequenceNumber, null, null); + } + + /** + * Sets the page blob's sequence number. + * For more information, see the Azure Docs. + * + * @param action + * Indicates how the service should modify the blob's sequence number. + * @param sequenceNumber + * The blob's sequence number. The sequence number is a user-controlled property that you can use to track + * requests and manage concurrency issues. + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response emitting the updated page blob. + */ + public Mono updateSequenceNumber(SequenceNumberActionType action, + Long sequenceNumber, BlobAccessConditions accessConditions, Context context) { + return pageBlobAsyncRawClient + .updateSequenceNumber(action, sequenceNumber, accessConditions, context) + .map(ResponseBase::deserializedHeaders); + } + + /** + * Begins an operation to start an incremental copy from one page blob's snapshot to this page + * blob. The snapshot is copied such that only the differential changes between the previously copied snapshot are + * transferred to the destination. The copied snapshots are complete copies of the original snapshot and can be read + * or copied from as usual. For more information, see + * the Azure Docs here and + * here. + * + * @param source + * The source page blob. + * @param snapshot + * The snapshot on the copy source. + * + * @return + * A reactive response emitting the copy status. + */ + public Mono copyIncremental(URL source, String snapshot) { + return this.copyIncremental(source, snapshot, null, null); + } + + /** + * Begins an operation to start an incremental copy from one page blob's snapshot to this page + * blob. The snapshot is copied such that only the differential changes between the previously copied snapshot are + * transferred to the destination. The copied snapshots are complete copies of the original snapshot and can be read + * or copied from as usual. For more information, see + * the Azure Docs here and + * here. + * + * @param source + * The source page blob. + * @param snapshot + * The snapshot on the copy source. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return + * A reactive response emitting the copy status. + */ + public Mono copyIncremental(URL source, String snapshot, + ModifiedAccessConditions modifiedAccessConditions, Context context) { + return pageBlobAsyncRawClient + .copyIncremental(source, snapshot, modifiedAccessConditions, context) + .map(response -> response.deserializedHeaders().copyStatus()); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/PageBlobAsyncRawClient.java b/storage/client/src/main/java/com/azure/storage/blob/PageBlobAsyncRawClient.java new file mode 100644 index 0000000000000..6b19f1f00a415 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/PageBlobAsyncRawClient.java @@ -0,0 +1,671 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.implementation.http.UrlBuilder; +import com.azure.core.util.Context; +import com.azure.storage.blob.implementation.AzureBlobStorageImpl; +import com.azure.storage.blob.models.BlobHTTPHeaders; +import com.azure.storage.blob.models.ModifiedAccessConditions; +import com.azure.storage.blob.models.PageBlobsClearPagesResponse; +import com.azure.storage.blob.models.PageBlobsCopyIncrementalResponse; +import com.azure.storage.blob.models.PageBlobsCreateResponse; +import com.azure.storage.blob.models.PageBlobsGetPageRangesDiffResponse; +import com.azure.storage.blob.models.PageBlobsGetPageRangesResponse; +import com.azure.storage.blob.models.PageBlobsResizeResponse; +import com.azure.storage.blob.models.PageBlobsUpdateSequenceNumberResponse; +import com.azure.storage.blob.models.PageBlobsUploadPagesFromURLResponse; +import com.azure.storage.blob.models.PageBlobsUploadPagesResponse; +import com.azure.storage.blob.models.PageRange; +import com.azure.storage.blob.models.SequenceNumberActionType; +import com.azure.storage.blob.models.SourceModifiedAccessConditions; +import io.netty.buffer.ByteBuf; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.net.MalformedURLException; +import java.net.URL; + +import static com.azure.storage.blob.Utility.postProcessResponse; + +/** + * Represents a URL to a page blob. It may be obtained by direct construction or via the create method on a + * {@link ContainerAsyncClient} object. This class does not hold any state about a particular blob but is instead a convenient + * way of sending off appropriate requests to the resource on the service. Please refer to the + * Azure Docs + * for more information. + */ +final class PageBlobAsyncRawClient extends BlobAsyncRawClient { + + /** + * Indicates the number of bytes in a page. + */ + public static final int PAGE_BYTES = 512; + + /** + * Indicates the maximum number of bytes that may be sent in a call to putPage. + */ + public static final int MAX_PUT_PAGES_BYTES = 4 * Constants.MB; + + /** + * Creates a {@code PageBlobAsyncRawClient} object pointing to the account specified by the URL and using the provided + * pipeline to make HTTP requests. + * + */ + public PageBlobAsyncRawClient(AzureBlobStorageImpl azureBlobStorage) { + super(azureBlobStorage); + } + + private static String pageRangeToString(PageRange pageRange) { + if (pageRange.start() < 0 || pageRange.end() <= 0) { + throw new IllegalArgumentException("PageRange's start and end values must be greater than or equal to " + + "0 if specified."); + } + if (pageRange.start() % PageBlobAsyncRawClient.PAGE_BYTES != 0) { + throw new IllegalArgumentException("PageRange's start value must be a multiple of 512."); + } + if (pageRange.end() % PageBlobAsyncRawClient.PAGE_BYTES != PageBlobAsyncRawClient.PAGE_BYTES - 1) { + throw new IllegalArgumentException("PageRange's end value must be 1 less than a multiple of 512."); + } + if (pageRange.end() <= pageRange.start()) { + throw new IllegalArgumentException("PageRange's End value must be after the start."); + } + return new StringBuilder("bytes=").append(pageRange.start()).append('-').append(pageRange.end()).toString(); + } + + /** + * Creates a page blob of the specified length. Call PutPage to upload data data to a page blob. + * For more information, see the + * Azure Docs. + * + * @param size + * Specifies the maximum size for the page blob, up to 8 TB. The page blob size must be aligned to a + * 512-byte boundary. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobAsyncRawClient.create")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono create(long size) { + return this.create(size, null, null, null, null, null); + } + + /** + * Creates a page blob of the specified length. Call PutPage to upload data data to a page blob. + * For more information, see the + * Azure Docs. + * + * @param size + * Specifies the maximum size for the page blob, up to 8 TB. The page blob size must be aligned to a + * 512-byte boundary. + * @param sequenceNumber + * A user-controlled value that you can use to track requests. The value of the sequence number must be + * between 0 and 2^63 - 1.The default value is 0. + * @param headers + * {@link BlobHTTPHeaders} + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobAsyncRawClient.create")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono create(long size, Long sequenceNumber, BlobHTTPHeaders headers, + Metadata metadata, BlobAccessConditions accessConditions, Context context) { + accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; + + if (size % PageBlobAsyncRawClient.PAGE_BYTES != 0) { + // Throwing is preferred to Single.error because this will error out immediately instead of waiting until + // subscription. + throw new IllegalArgumentException("size must be a multiple of PageBlobAsyncRawClient.PAGE_BYTES."); + } + if (sequenceNumber != null && sequenceNumber < 0) { + // Throwing is preferred to Single.error because this will error out immediately instead of waiting until + // subscription. + throw new IllegalArgumentException("SequenceNumber must be greater than or equal to 0."); + } + metadata = metadata == null ? new Metadata() : metadata; + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.pageBlobs().createWithRestResponseAsync(null, + null, 0, size, null, metadata, null, null, + null, sequenceNumber, null, headers, accessConditions.leaseAccessConditions(), + accessConditions.modifiedAccessConditions(), context)); + } + + /** + * Writes 1 or more pages to the page blob. The start and end offsets must be a multiple of 512. + * For more information, see the + * Azure Docs. + *

+ * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flux} must produce the same data each time it is subscribed to. + * + * @param pageRange + * A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start offset must + * be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges are + * 0-511, 512-1023, etc. + * @param body + * The data to upload. Note that this {@code Flux} must be replayable if retries are enabled + * (the default). In other words, the Flowable must produce the same data each time it is subscribed to. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobAsyncRawClient.uploadPages")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono uploadPages(PageRange pageRange, Flux body) { + return this.uploadPages(pageRange, body, null, null); + } + + /** + * Writes 1 or more pages to the page blob. The start and end offsets must be a multiple of 512. + * For more information, see the + * Azure Docs. + *

+ * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flux} must produce the same data each time it is subscribed to. + * + * @param pageRange + * A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start offset + * must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges + * are 0-511, 512-1023, etc. + * @param body + * The data to upload. Note that this {@code Flux} must be replayable if retries are enabled + * (the default). In other words, the Flowable must produce the same data each time it is subscribed to. + * @param pageBlobAccessConditions + * {@link PageBlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobAsyncRawClient.uploadPages")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono uploadPages(PageRange pageRange, Flux body, + PageBlobAccessConditions pageBlobAccessConditions, Context context) { + pageBlobAccessConditions = pageBlobAccessConditions == null ? new PageBlobAccessConditions() + : pageBlobAccessConditions; + + if (pageRange == null) { + // Throwing is preferred to Single.error because this will error out immediately instead of waiting until + // subscription. + throw new IllegalArgumentException("pageRange cannot be null."); + } + String pageRangeStr = pageRangeToString(pageRange); + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.pageBlobs().uploadPagesWithRestResponseAsync(null, + null, body, pageRange.end() - pageRange.start() + 1, null, + null, pageRangeStr, null, null, null, null, + pageBlobAccessConditions.leaseAccessConditions(), pageBlobAccessConditions.sequenceNumberAccessConditions(), + pageBlobAccessConditions.modifiedAccessConditions(), context)); + } + + /** + * Writes 1 or more pages from the source page blob to this page blob. The start and end offsets must be a multiple + * of 512. + * For more information, see the + * Azure Docs. + *

+ * + * @param range + * A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start offset + * must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges + * are 0-511, 512-1023, etc. + * @param sourceURL + * The url to the blob that will be the source of the copy. A source blob in the same storage account can be + * authenticated via Shared Key. However, if the source is a blob in another account, the source blob must + * either be public or must be authenticated via a shared access signature. If the source blob is public, no + * authentication is required to perform the operation. + * @param sourceOffset + * The source offset to copy from. Pass null or 0 to copy from the beginning of source page blob. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_from_url "Sample code for PageBlobAsyncRawClient.uploadPagesFromURL")] + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono uploadPagesFromURL(PageRange range, URL sourceURL, Long sourceOffset) { + return this.uploadPagesFromURL(range, sourceURL, sourceOffset, null, null, + null, null); + } + + /** + * Writes 1 or more pages from the source page blob to this page blob. The start and end offsets must be a multiple + * of 512. + * For more information, see the + * Azure Docs. + *

+ * + * @param range + * The destination {@link PageRange} range. Given that pages must be aligned with 512-byte boundaries, the start offset + * must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges + * are 0-511, 512-1023, etc. + * @param sourceURL + * The url to the blob that will be the source of the copy. A source blob in the same storage account can be + * authenticated via Shared Key. However, if the source is a blob in another account, the source blob must + * either be public or must be authenticated via a shared access signature. If the source blob is public, no + * authentication is required to perform the operation. + * @param sourceOffset + * The source offset to copy from. Pass null or 0 to copy from the beginning of source blob. + * @param sourceContentMD5 + * An MD5 hash of the block content from the source blob. If specified, the service will calculate the MD5 + * of the received data and fail the request if it does not match the provided MD5. + * @param destAccessConditions + * {@link PageBlobAccessConditions} + * @param sourceAccessConditions + * {@link SourceModifiedAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_from_url "Sample code for PageBlobAsyncRawClient.uploadPagesFromURL")] + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono uploadPagesFromURL(PageRange range, URL sourceURL, Long sourceOffset, + byte[] sourceContentMD5, PageBlobAccessConditions destAccessConditions, + SourceModifiedAccessConditions sourceAccessConditions, Context context) { + + if (range == null) { + // Throwing is preferred to Single.error because this will error out immediately instead of waiting until + // subscription. + throw new IllegalArgumentException("range cannot be null."); + } + + String rangeString = pageRangeToString(range); + + if (sourceOffset == null) { + sourceOffset = 0L; + } + + String sourceRangeString = pageRangeToString(new PageRange().start(sourceOffset).end(sourceOffset + (range.end() - range.start()))); + + destAccessConditions = destAccessConditions == null ? new PageBlobAccessConditions() : destAccessConditions; + + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.pageBlobs().uploadPagesFromURLWithRestResponseAsync( + null, null, sourceURL, sourceRangeString, 0, rangeString, sourceContentMD5, + null, null, destAccessConditions.leaseAccessConditions(), + destAccessConditions.sequenceNumberAccessConditions(), destAccessConditions.modifiedAccessConditions(), + sourceAccessConditions, context)); + } + + /** + * Frees the specified pages from the page blob. + * For more information, see the + * Azure Docs. + * + * @param pageRange + * A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start offset + * must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges + * are 0-511, 512-1023, etc. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobAsyncRawClient.clearPages")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono clearPages(PageRange pageRange) { + return this.clearPages(pageRange, null, null); + } + + /** + * Frees the specified pages from the page blob. + * For more information, see the + * Azure Docs. + * + * @param pageRange + * A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start offset + * must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges + * are 0-511, 512-1023, etc. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * @param pageBlobAccessConditions + * {@link PageBlobAccessConditions} + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobAsyncRawClient.clearPages")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono clearPages(PageRange pageRange, + PageBlobAccessConditions pageBlobAccessConditions, Context context) { + pageBlobAccessConditions = pageBlobAccessConditions == null ? new PageBlobAccessConditions() + : pageBlobAccessConditions; + if (pageRange == null) { + // Throwing is preferred to Single.error because this will error out immediately instead of waiting until + // subscription. + throw new IllegalArgumentException("pageRange cannot be null."); + } + String pageRangeStr = pageRangeToString(pageRange); + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.pageBlobs().clearPagesWithRestResponseAsync(null, + null, 0, null, pageRangeStr, null, + pageBlobAccessConditions.leaseAccessConditions(), pageBlobAccessConditions.sequenceNumberAccessConditions(), + pageBlobAccessConditions.modifiedAccessConditions(), context)); + } + + /** + * Returns the list of valid page ranges for a page blob or snapshot of a page blob. + * For more information, see the Azure Docs. + * + * @param blobRange + * {@link BlobRange} + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobAsyncRawClient.getPageRanges")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono getPageRanges(BlobRange blobRange) { + return this.getPageRanges(blobRange, null, null); + } + + /** + * Returns the list of valid page ranges for a page blob or snapshot of a page blob. + * For more information, see the Azure Docs. + * + * @param blobRange + * {@link BlobRange} + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobAsyncRawClient.getPageRanges")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono getPageRanges(BlobRange blobRange, + BlobAccessConditions accessConditions, Context context) { + blobRange = blobRange == null ? new BlobRange(0) : blobRange; + accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.pageBlobs().getPageRangesWithRestResponseAsync( + null, null, null, null, null, blobRange.toHeaderValue(), + null, accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), + context)); + } + + /** + * Gets the collection of page ranges that differ between a specified snapshot and this page blob. + * For more information, see the Azure Docs. + * + * @param blobRange + * {@link BlobRange} + * @param prevSnapshot + * Specifies that the response will contain only pages that were changed between target blob and previous + * snapshot. Changed pages include both updated and cleared pages. The target + * blob may be a snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_diff "Sample code for PageBlobAsyncRawClient.getPageRangesDiff")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono getPageRangesDiff(BlobRange blobRange, String prevSnapshot) { + return this.getPageRangesDiff(blobRange, prevSnapshot, null, null); + } + + /** + * Gets the collection of page ranges that differ between a specified snapshot and this page blob. + * For more information, see the Azure Docs. + * + * @param blobRange + * {@link BlobRange} + * @param prevSnapshot + * Specifies that the response will contain only pages that were changed between target blob and previous + * snapshot. Changed pages include both updated and cleared pages. The target + * blob may be a snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_diff "Sample code for PageBlobAsyncRawClient.getPageRangesDiff")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono getPageRangesDiff(BlobRange blobRange, String prevSnapshot, + BlobAccessConditions accessConditions, Context context) { + blobRange = blobRange == null ? new BlobRange(0) : blobRange; + accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; + context = context == null ? Context.NONE : context; + + if (prevSnapshot == null) { + throw new IllegalArgumentException("prevSnapshot cannot be null"); + } + + return postProcessResponse(this.azureBlobStorage.pageBlobs().getPageRangesDiffWithRestResponseAsync( + null, null, null, null, null, prevSnapshot, + blobRange.toHeaderValue(), null, accessConditions.leaseAccessConditions(), + accessConditions.modifiedAccessConditions(), context)); + } + + /** + * Resizes the page blob to the specified size (which must be a multiple of 512). + * For more information, see the Azure Docs. + * + * @param size + * Resizes a page blob to the specified size. If the specified value is less than the current size of the + * blob, then all pages above the specified value are cleared. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobAsyncRawClient.resize")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono resize(long size) { + return this.resize(size, null, null); + } + + /** + * Resizes the page blob to the specified size (which must be a multiple of 512). + * For more information, see the Azure Docs. + * + * @param size + * Resizes a page blob to the specified size. If the specified value is less than the current size of the + * blob, then all pages above the specified value are cleared. + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobAsyncRawClient.resize")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono resize(long size, BlobAccessConditions accessConditions, Context context) { + if (size % PageBlobAsyncRawClient.PAGE_BYTES != 0) { + // Throwing is preferred to Single.error because this will error out immediately instead of waiting until + // subscription. + throw new IllegalArgumentException("size must be a multiple of PageBlobAsyncRawClient.PAGE_BYTES."); + } + accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; + context = context == null ? Context.NONE : context; + + return postProcessResponse(this.azureBlobStorage.pageBlobs().resizeWithRestResponseAsync(null, + null, size, null, null, accessConditions.leaseAccessConditions(), + accessConditions.modifiedAccessConditions(), context)); + } + + /** + * Sets the page blob's sequence number. + * For more information, see the Azure Docs. + * + * @param action + * Indicates how the service should modify the blob's sequence number. + * @param sequenceNumber + * The blob's sequence number. The sequence number is a user-controlled property that you can use to track + * requests and manage concurrency issues. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobAsyncRawClient.updateSequenceNumber")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono updateSequenceNumber(SequenceNumberActionType action, + Long sequenceNumber) { + return this.updateSequenceNumber(action, sequenceNumber, null, null); + } + + /** + * Sets the page blob's sequence number. + * For more information, see the Azure Docs. + * + * @param action + * Indicates how the service should modify the blob's sequence number. + * @param sequenceNumber + * The blob's sequence number. The sequence number is a user-controlled property that you can use to track + * requests and manage concurrency issues. + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobAsyncRawClient.updateSequenceNumber")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono updateSequenceNumber(SequenceNumberActionType action, + Long sequenceNumber, BlobAccessConditions accessConditions, Context context) { + if (sequenceNumber != null && sequenceNumber < 0) { + // Throwing is preferred to Single.error because this will error out immediately instead of waiting until + // subscription. + throw new IllegalArgumentException("SequenceNumber must be greater than or equal to 0."); + } + accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; + sequenceNumber = action == SequenceNumberActionType.INCREMENT ? null : sequenceNumber; + context = context == null ? Context.NONE : context; + + return postProcessResponse( + this.azureBlobStorage.pageBlobs().updateSequenceNumberWithRestResponseAsync(null, + null, action, null, sequenceNumber, null, + accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions(), context)); + } + + /** + * Begins an operation to start an incremental copy from one page blob's snapshot to this page + * blob. The snapshot is copied such that only the differential changes between the previously copied snapshot are + * transferred to the destination. The copied snapshots are complete copies of the original snapshot and can be read + * or copied from as usual. For more information, see + * the Azure Docs here and + * here. + * + * @param source + * The source page blob. + * @param snapshot + * The snapshot on the copy source. + * + * @return Emits the successful response. + */ + public Mono copyIncremental(URL source, String snapshot) { + return this.copyIncremental(source, snapshot, null, null); + } + + /** + * Begins an operation to start an incremental copy from one page blob's snapshot to this page + * blob. The snapshot is copied such that only the differential changes between the previously copied snapshot are + * transferred to the destination. The copied snapshots are complete copies of the original snapshot and can be read + * or copied from as usual. For more information, see + * the Azure Docs here and + * here. + * + * @param source + * The source page blob. + * @param snapshot + * The snapshot on the copy source. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + */ + public Mono copyIncremental(URL source, String snapshot, + ModifiedAccessConditions modifiedAccessConditions, Context context) { + context = context == null ? Context.NONE : context; + + UrlBuilder builder = UrlBuilder.parse(source); + builder.setQueryParameter(Constants.SNAPSHOT_QUERY_PARAMETER, snapshot); + try { + source = builder.toURL(); + } catch (MalformedURLException e) { + // We are parsing a valid url and adding a query parameter. If this fails, we can't recover. + throw new Error(e); + } + return postProcessResponse(this.azureBlobStorage.pageBlobs().copyIncrementalWithRestResponseAsync( + null, null, source, null, null, modifiedAccessConditions, context)); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/PageBlobClient.java b/storage/client/src/main/java/com/azure/storage/blob/PageBlobClient.java new file mode 100644 index 0000000000000..216f2758896d3 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/PageBlobClient.java @@ -0,0 +1,566 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.http.HttpPipeline; +import com.azure.core.util.Context; +import com.azure.storage.blob.implementation.AzureBlobStorageImpl; +import com.azure.storage.blob.models.BlobHTTPHeaders; +import com.azure.storage.blob.models.CopyStatusType; +import com.azure.storage.blob.models.ModifiedAccessConditions; +import com.azure.storage.blob.models.PageBlobClearPagesHeaders; +import com.azure.storage.blob.models.PageBlobCreateHeaders; +import com.azure.storage.blob.models.PageBlobResizeHeaders; +import com.azure.storage.blob.models.PageBlobUpdateSequenceNumberHeaders; +import com.azure.storage.blob.models.PageBlobUploadPagesFromURLHeaders; +import com.azure.storage.blob.models.PageBlobUploadPagesHeaders; +import com.azure.storage.blob.models.PageRange; +import com.azure.storage.blob.models.SequenceNumberActionType; +import com.azure.storage.blob.models.SourceModifiedAccessConditions; +import io.netty.buffer.ByteBuf; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.net.URL; +import java.time.Duration; + +/** + * Client to a page blob. It may only be instantiated through a {@link PageBlobClientBuilder}, via + * the method {@link BlobClient#asPageBlobClient()}, or via the method + * {@link ContainerClient#getPageBlobClient(String)}. This class does not hold + * any state about a particular blob, but is instead a convenient way of sending appropriate + * requests to the resource on the service. + * + *

+ * This client contains operations on a blob. Operations on a container are available on {@link ContainerClient}, + * and operations on the service are available on {@link StorageClient}. + * + *

+ * Please refer to the Azure Docs + * for more information. + */ +public final class PageBlobClient extends BlobClient { + + private PageBlobAsyncClient pageBlobAsyncClient; + + /** + * Indicates the number of bytes in a page. + */ + public static final int PAGE_BYTES = 512; + + /** + * Indicates the maximum number of bytes that may be sent in a call to putPage. + */ + public static final int MAX_PUT_PAGES_BYTES = 4 * Constants.MB; + + /** + * Package-private constructor for use by {@link PageBlobClientBuilder}. + * @param azureBlobStorage the API client for blob storage API + */ + PageBlobClient(AzureBlobStorageImpl azureBlobStorage) { + super(azureBlobStorage); + this.pageBlobAsyncClient = new PageBlobAsyncClient(azureBlobStorage); + } + + /** + * @return a new client {@link PageBlobClientBuilder} instance. + */ + public static PageBlobClientBuilder pageBlobClientBuilder() { + return new PageBlobClientBuilder(); + } + + // TODO: Figure out if this method needs to change to public to access method in wrappers + private static String pageRangeToString(PageRange pageRange) { + if (pageRange.start() < 0 || pageRange.end() <= 0) { + throw new IllegalArgumentException("PageRange's start and end values must be greater than or equal to " + + "0 if specified."); + } + if (pageRange.start() % PageBlobClient.PAGE_BYTES != 0) { + throw new IllegalArgumentException("PageRange's start value must be a multiple of 512."); + } + if (pageRange.end() % PageBlobClient.PAGE_BYTES != PageBlobClient.PAGE_BYTES - 1) { + throw new IllegalArgumentException("PageRange's end value must be 1 less than a multiple of 512."); + } + if (pageRange.end() <= pageRange.start()) { + throw new IllegalArgumentException("PageRange's End value must be after the start."); + } + return new StringBuilder("bytes=").append(pageRange.start()).append('-').append(pageRange.end()).toString(); + } + + /** + * Creates a page blob of the specified length. Call PutPage to upload data data to a page blob. + * For more information, see the + * Azure Docs. + * + * @param size + * Specifies the maximum size for the page blob, up to 8 TB. The page blob size must be aligned to a + * 512-byte boundary. + * + * @return + * The information of the created page blob. + */ + public PageBlobCreateHeaders create(long size) { + return this.create(size, null, null, null, null, null, null); + } + + /** + * Creates a page blob of the specified length. Call PutPage to upload data data to a page blob. + * For more information, see the + * Azure Docs. + * + * @param size + * Specifies the maximum size for the page blob, up to 8 TB. The page blob size must be aligned to a + * 512-byte boundary. + * @param sequenceNumber + * A user-controlled value that you can use to track requests. The value of the sequence number must be + * between 0 and 2^63 - 1.The default value is 0. + * @param headers + * {@link BlobHTTPHeaders} + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link BlobAccessConditions} + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * The information of the created page blob. + */ + public PageBlobCreateHeaders create(long size, Long sequenceNumber, BlobHTTPHeaders headers, + Metadata metadata, BlobAccessConditions accessConditions, Duration timeout, Context context) { + Mono response = pageBlobAsyncClient.create(size, sequenceNumber, headers, metadata, accessConditions, context); + return timeout == null? + response.block(): + response.block(timeout); + } + + /** + * Writes 1 or more pages to the page blob. The start and end offsets must be a multiple of 512. + * For more information, see the + * Azure Docs. + *

+ * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flux} must produce the same data each time it is subscribed to. + * + * @param pageRange + * A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start offset must + * be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges are + * 0-511, 512-1023, etc. + * @param body + * The data to upload. Note that this {@code Flux} must be replayable if retries are enabled + * (the default). In other words, the Flowable must produce the same data each time it is subscribed to. + * + * @return + * The information of the uploaded pages. + */ + public PageBlobUploadPagesHeaders uploadPages(PageRange pageRange, Flux body) { + return this.uploadPages(pageRange, body, null, null, null); + } + + /** + * Writes 1 or more pages to the page blob. The start and end offsets must be a multiple of 512. + * For more information, see the + * Azure Docs. + *

+ * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flux} must produce the same data each time it is subscribed to. + * + * @param pageRange + * A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start offset + * must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges + * are 0-511, 512-1023, etc. + * @param body + * The data to upload. Note that this {@code Flux} must be replayable if retries are enabled + * (the default). In other words, the Flowable must produce the same data each time it is subscribed to. + * @param pageBlobAccessConditions + * {@link PageBlobAccessConditions} + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * The information of the uploaded pages. + */ + public PageBlobUploadPagesHeaders uploadPages(PageRange pageRange, Flux body, + PageBlobAccessConditions pageBlobAccessConditions, Duration timeout, Context context) { + Mono response = pageBlobAsyncClient.uploadPages(pageRange, body, pageBlobAccessConditions, context); + return timeout == null? + response.block(): + response.block(timeout); + } + + /** + * Writes 1 or more pages from the source page blob to this page blob. The start and end offsets must be a multiple + * of 512. + * For more information, see the + * Azure Docs. + *

+ * + * @param range + * A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start offset + * must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges + * are 0-511, 512-1023, etc. + * @param sourceURL + * The url to the blob that will be the source of the copy. A source blob in the same storage account can be + * authenticated via Shared Key. However, if the source is a blob in another account, the source blob must + * either be public or must be authenticated via a shared access signature. If the source blob is public, no + * authentication is required to perform the operation. + * @param sourceOffset + * The source offset to copy from. Pass null or 0 to copy from the beginning of source page blob. + * + * @return + * The information of the uploaded pages. + */ + public PageBlobUploadPagesFromURLHeaders uploadPagesFromURL(PageRange range, URL sourceURL, Long sourceOffset) { + return this.uploadPagesFromURL(range, sourceURL, sourceOffset, null, null, + null, null, null); + } + + /** + * Writes 1 or more pages from the source page blob to this page blob. The start and end offsets must be a multiple + * of 512. + * For more information, see the + * Azure Docs. + *

+ * + * @param range + * The destination {@link PageRange} range. Given that pages must be aligned with 512-byte boundaries, the start offset + * must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges + * are 0-511, 512-1023, etc. + * @param sourceURL + * The url to the blob that will be the source of the copy. A source blob in the same storage account can be + * authenticated via Shared Key. However, if the source is a blob in another account, the source blob must + * either be public or must be authenticated via a shared access signature. If the source blob is public, no + * authentication is required to perform the operation. + * @param sourceOffset + * The source offset to copy from. Pass null or 0 to copy from the beginning of source blob. + * @param sourceContentMD5 + * An MD5 hash of the block content from the source blob. If specified, the service will calculate the MD5 + * of the received data and fail the request if it does not match the provided MD5. + * @param destAccessConditions + * {@link PageBlobAccessConditions} + * @param sourceAccessConditions + * {@link SourceModifiedAccessConditions} + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * The information of the uploaded pages. + */ + public PageBlobUploadPagesFromURLHeaders uploadPagesFromURL(PageRange range, URL sourceURL, Long sourceOffset, + byte[] sourceContentMD5, PageBlobAccessConditions destAccessConditions, + SourceModifiedAccessConditions sourceAccessConditions, Duration timeout, Context context) { + + Mono response = pageBlobAsyncClient.uploadPagesFromURL(range, sourceURL, sourceOffset, sourceContentMD5, destAccessConditions, sourceAccessConditions, context); + return timeout == null ? + response.block(): + response.block(timeout); + } + + /** + * Frees the specified pages from the page blob. + * For more information, see the + * Azure Docs. + * + * @param pageRange + * A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start offset + * must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges + * are 0-511, 512-1023, etc. + * + * @return + * The information of the cleared pages. + */ + public PageBlobClearPagesHeaders clearPages(PageRange pageRange) { + return this.clearPages(pageRange, null, null, null); + } + + /** + * Frees the specified pages from the page blob. + * For more information, see the + * Azure Docs. + * + * @param pageRange + * A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start offset + * must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges + * are 0-511, 512-1023, etc. + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * @param pageBlobAccessConditions + * {@link PageBlobAccessConditions} + * + * @return + * The information of the cleared pages. + */ + public PageBlobClearPagesHeaders clearPages(PageRange pageRange, + PageBlobAccessConditions pageBlobAccessConditions, Duration timeout, Context context) { + Mono response = pageBlobAsyncClient.clearPages(pageRange, pageBlobAccessConditions, context); + + return timeout == null ? + response.block(): + response.block(timeout); + } + + /** + * Returns the list of valid page ranges for a page blob or snapshot of a page blob. + * For more information, see the Azure Docs. + * + * @param blobRange + * {@link BlobRange} + * + * @return + * The information of the cleared pages. + */ + public Iterable getPageRanges(BlobRange blobRange) { + return this.getPageRanges(blobRange, null, null, null); + } + + /** + * Returns the list of valid page ranges for a page blob or snapshot of a page blob. + * For more information, see the Azure Docs. + * + * @param blobRange + * {@link BlobRange} + * @param accessConditions + * {@link BlobAccessConditions} + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * All the page ranges. + */ + public Iterable getPageRanges(BlobRange blobRange, + BlobAccessConditions accessConditions, Duration timeout, Context context) { + Flux response = pageBlobAsyncClient.getPageRanges(blobRange, accessConditions, context); + return timeout == null? + response.toIterable(): + response.timeout(timeout).toIterable(); + } + + /** + * Gets the collection of page ranges that differ between a specified snapshot and this page blob. + * For more information, see the Azure Docs. + * + * @param blobRange + * {@link BlobRange} + * @param prevSnapshot + * Specifies that the response will contain only pages that were changed between target blob and previous + * snapshot. Changed pages include both updated and cleared pages. The target + * blob may be a snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. + * + * @return + * All the different page ranges. + */ + public Iterable getPageRangesDiff(BlobRange blobRange, String prevSnapshot) { + return this.getPageRangesDiff(blobRange, prevSnapshot, null, null, null); + } + + /** + * Gets the collection of page ranges that differ between a specified snapshot and this page blob. + * For more information, see the Azure Docs. + * + * @param blobRange + * {@link BlobRange} + * @param prevSnapshot + * Specifies that the response will contain only pages that were changed between target blob and previous + * snapshot. Changed pages include both updated and cleared pages. The target + * blob may be a snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. + * @param accessConditions + * {@link BlobAccessConditions} + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * All the different page ranges. + */ + public Iterable getPageRangesDiff(BlobRange blobRange, String prevSnapshot, + BlobAccessConditions accessConditions, Duration timeout, Context context) { + Flux response = pageBlobAsyncClient.getPageRangesDiff(blobRange, prevSnapshot, accessConditions, context); + return timeout == null? + response.toIterable(): + response.timeout(timeout).toIterable(); + } + + /** + * Resizes the page blob to the specified size (which must be a multiple of 512). + * For more information, see the Azure Docs. + * + * @param size + * Resizes a page blob to the specified size. If the specified value is less than the current size of the + * blob, then all pages above the specified value are cleared. + * + * @return + * The resized page blob. + */ + public PageBlobResizeHeaders resize(long size) { + return this.resize(size, null, null, null); + } + + /** + * Resizes the page blob to the specified size (which must be a multiple of 512). + * For more information, see the Azure Docs. + * + * @param size + * Resizes a page blob to the specified size. If the specified value is less than the current size of the + * blob, then all pages above the specified value are cleared. + * @param accessConditions + * {@link BlobAccessConditions} + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * The resized page blob. + */ + public PageBlobResizeHeaders resize(long size, BlobAccessConditions accessConditions, Duration timeout, Context context) { + Mono response = pageBlobAsyncClient.resize(size, accessConditions, context); + return timeout == null? + response.block(): + response.block(timeout); + } + + /** + * Sets the page blob's sequence number. + * For more information, see the Azure Docs. + * + * @param action + * Indicates how the service should modify the blob's sequence number. + * @param sequenceNumber + * The blob's sequence number. The sequence number is a user-controlled property that you can use to track + * requests and manage concurrency issues. + * + * @return + * The updated page blob. + */ + public PageBlobUpdateSequenceNumberHeaders updateSequenceNumber(SequenceNumberActionType action, + Long sequenceNumber) { + return this.updateSequenceNumber(action, sequenceNumber, null, null,null); + } + + /** + * Sets the page blob's sequence number. + * For more information, see the Azure Docs. + * + * @param action + * Indicates how the service should modify the blob's sequence number. + * @param sequenceNumber + * The blob's sequence number. The sequence number is a user-controlled property that you can use to track + * requests and manage concurrency issues. + * @param accessConditions + * {@link BlobAccessConditions} + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * The updated page blob. + */ + public PageBlobUpdateSequenceNumberHeaders updateSequenceNumber(SequenceNumberActionType action, + Long sequenceNumber, BlobAccessConditions accessConditions, Duration timeout, Context context) { + Mono response = pageBlobAsyncClient.updateSequenceNumber(action, sequenceNumber, accessConditions, context); + return timeout == null? + response.block(): + response.block(timeout); + } + + /** + * Begins an operation to start an incremental copy from one page blob's snapshot to this page + * blob. The snapshot is copied such that only the differential changes between the previously copied snapshot are + * transferred to the destination. The copied snapshots are complete copies of the original snapshot and can be read + * or copied from as usual. For more information, see + * the Azure Docs here and + * here. + * + * @param source + * The source page blob. + * @param snapshot + * The snapshot on the copy source. + * + * @return + * The copy status. + */ + public CopyStatusType copyIncremental(URL source, String snapshot) { + return this.copyIncremental(source, snapshot, null, null, null); + } + + /** + * Begins an operation to start an incremental copy from one page blob's snapshot to this page + * blob. The snapshot is copied such that only the differential changes between the previously copied snapshot are + * transferred to the destination. The copied snapshots are complete copies of the original snapshot and can be read + * or copied from as usual. For more information, see + * the Azure Docs here and + * here. + * + * @param source + * The source page blob. + * @param snapshot + * The snapshot on the copy source. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return + * The copy status. + */ + public CopyStatusType copyIncremental(URL source, String snapshot, + ModifiedAccessConditions modifiedAccessConditions, Duration timeout, Context context) { + Mono response = pageBlobAsyncClient.copyIncremental(source, snapshot, modifiedAccessConditions, context); + return timeout == null? + response.block(): + response.block(timeout); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/PageBlobClientBuilder.java b/storage/client/src/main/java/com/azure/storage/blob/PageBlobClientBuilder.java new file mode 100644 index 0000000000000..0406db7dcfdb6 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/PageBlobClientBuilder.java @@ -0,0 +1,220 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.configuration.Configuration; +import com.azure.core.http.HttpClient; +import com.azure.core.http.HttpPipeline; +import com.azure.core.http.policy.AddDatePolicy; +import com.azure.core.http.policy.HttpLogDetailLevel; +import com.azure.core.http.policy.HttpLoggingPolicy; +import com.azure.core.http.policy.HttpPipelinePolicy; +import com.azure.core.http.policy.RequestIdPolicy; +import com.azure.core.http.policy.RetryPolicy; +import com.azure.core.http.policy.UserAgentPolicy; +import com.azure.core.implementation.util.ImplUtils; +import com.azure.storage.blob.implementation.AzureBlobStorageBuilder; +import com.azure.storage.blob.implementation.AzureBlobStorageImpl; + +import java.net.MalformedURLException; +import java.net.URL; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * Fluent PageBlobClientBuilder for instantiating a {@link PageBlobClient} or {@link PageBlobAsyncClient}. + * + *

+ * An instance of this builder may only be created from static method {@link PageBlobClient#pageBlobClientBuilder()}. + * The following information must be provided on this builder: + * + *

+ * + *

+ * Once all the configurations are set on this builder, call {@code .buildClient()} to create a + * {@link PageBlobClient} or {@code .buildAsyncClient()} to create a {@link PageBlobAsyncClient}. + */ +public final class PageBlobClientBuilder { + private static final String ACCOUNT_NAME = "AccountName".toLowerCase(); + private static final String ACCOUNT_KEY = "AccountKey".toLowerCase(); + + private final List policies; + + private URL endpoint; + private ICredentials credentials = new AnonymousCredentials(); + private HttpClient httpClient; + private HttpLogDetailLevel logLevel; + private RetryPolicy retryPolicy; + private Configuration configuration; + + public PageBlobClientBuilder() { + retryPolicy = new RetryPolicy(); + logLevel = HttpLogDetailLevel.NONE; + policies = new ArrayList<>(); + } + + /** + * Constructs an instance of PageBlobAsyncClient based on the configurations stored in the appendBlobClientBuilder. + * @return a new client instance + */ + private AzureBlobStorageImpl buildImpl() { + Objects.requireNonNull(endpoint); + + // Closest to API goes first, closest to wire goes last. + final List policies = new ArrayList<>(); + + policies.add(new UserAgentPolicy(BlobConfiguration.NAME, BlobConfiguration.VERSION)); + policies.add(new RequestIdPolicy()); + policies.add(new AddDatePolicy()); + policies.add(credentials); // This needs to be a different credential type. + + policies.add(retryPolicy); + + policies.addAll(this.policies); + policies.add(new HttpLoggingPolicy(logLevel)); + + HttpPipeline pipeline = HttpPipeline.builder() + .policies(policies.toArray(new HttpPipelinePolicy[0])) + .httpClient(httpClient) + .build(); + + return new AzureBlobStorageBuilder() + .url(endpoint.toString()) + .pipeline(pipeline) + .build(); + } + + /** + * @return a {@link PageBlobClient} created from the configurations in this builder. + */ + public PageBlobClient buildClient() { + return new PageBlobClient(buildImpl()); + } + + /** + * @return a {@link PageBlobAsyncClient} created from the configurations in this builder. + */ + public PageBlobAsyncClient buildAsyncClient() { + return new PageBlobAsyncClient(buildImpl()); + } + + /** + * Sets the service endpoint, additionally parses it for information (SAS token, container name) + * @param endpoint URL of the service + * @return the updated PageBlobClientBuilder object + */ + public PageBlobClientBuilder endpoint(String endpoint) { + Objects.requireNonNull(endpoint); + try { + this.endpoint = new URL(endpoint); + } catch (MalformedURLException ex) { + throw new IllegalArgumentException("The Azure Storage Queue endpoint url is malformed."); + } + + return this; + } + + /** + * Sets the credentials used to authorize requests sent to the service + * @param credentials authorization credentials + * @return the updated PageBlobClientBuilder object + */ + public PageBlobClientBuilder credentials(SharedKeyCredentials credentials) { + this.credentials = credentials; + return this; + } + + /** + * Sets the credentials used to authorize requests sent to the service + * @param credentials authorization credentials + * @return the updated PageBlobClientBuilder object + */ + public PageBlobClientBuilder credentials(TokenCredentials credentials) { + this.credentials = credentials; + return this; + } + + /** + * Clears the credentials used to authorize requests sent to the service + * @return the updated PageBlobClientBuilder object + */ + public PageBlobClientBuilder anonymousCredentials() { + this.credentials = new AnonymousCredentials(); + return this; + } + + /** + * Sets the connection string for the service, parses it for authentication information (account name, account key) + * @param connectionString connection string from access keys section + * @return the updated PageBlobClientBuilder object + */ + public PageBlobClientBuilder connectionString(String connectionString) { + Objects.requireNonNull(connectionString); + + Map connectionKVPs = new HashMap<>(); + for (String s : connectionString.split(";")) { + String[] kvp = s.split("=", 2); + connectionKVPs.put(kvp[0].toLowerCase(), kvp[1]); + } + + String accountName = connectionKVPs.get(ACCOUNT_NAME); + String accountKey = connectionKVPs.get(ACCOUNT_KEY); + + if (ImplUtils.isNullOrEmpty(accountName) || ImplUtils.isNullOrEmpty(accountKey)) { + throw new IllegalArgumentException("Connection string must contain 'AccountName' and 'AccountKey'."); + } + + // Use accountName and accountKey to get the SAS token using the credential class. + credentials = new SharedKeyCredentials(accountName, accountKey); + + return this; + } + + /** + * Sets the http client used to send service requests + * @param httpClient http client to send requests + * @return the updated PageBlobClientBuilder object + */ + public PageBlobClientBuilder httpClient(HttpClient httpClient) { + this.httpClient = httpClient; + return this; + } + + /** + * Adds a pipeline policy to apply on each request sent + * @param pipelinePolicy a pipeline policy + * @return the updated PageBlobClientBuilder object + */ + public PageBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { + this.policies.add(pipelinePolicy); + return this; + } + + /** + * Sets the logging level for service requests + * @param logLevel logging level + * @return the updated PageBlobClientBuilder object + */ + public PageBlobClientBuilder httpLogDetailLevel(HttpLogDetailLevel logLevel) { + this.logLevel = logLevel; + return this; + } + + /** + * Sets the configuration object used to retrieve environment configuration values used to buildClient the client with + * when they are not set in the appendBlobClientBuilder, defaults to Configuration.NONE + * @param configuration configuration store + * @return the updated PageBlobClientBuilder object + */ + public PageBlobClientBuilder configuration(Configuration configuration) { + this.configuration = configuration; + return this; + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/PageBlobRawClient.java b/storage/client/src/main/java/com/azure/storage/blob/PageBlobRawClient.java new file mode 100644 index 0000000000000..1e26cfcd473a9 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/PageBlobRawClient.java @@ -0,0 +1,579 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.http.HttpPipeline; +import com.azure.core.util.Context; +import com.azure.storage.blob.implementation.AzureBlobStorageImpl; +import com.azure.storage.blob.models.BlobHTTPHeaders; +import com.azure.storage.blob.models.ModifiedAccessConditions; +import com.azure.storage.blob.models.PageBlobsClearPagesResponse; +import com.azure.storage.blob.models.PageBlobsCopyIncrementalResponse; +import com.azure.storage.blob.models.PageBlobsCreateResponse; +import com.azure.storage.blob.models.PageBlobsGetPageRangesDiffResponse; +import com.azure.storage.blob.models.PageBlobsGetPageRangesResponse; +import com.azure.storage.blob.models.PageBlobsResizeResponse; +import com.azure.storage.blob.models.PageBlobsUpdateSequenceNumberResponse; +import com.azure.storage.blob.models.PageBlobsUploadPagesFromURLResponse; +import com.azure.storage.blob.models.PageBlobsUploadPagesResponse; +import com.azure.storage.blob.models.PageRange; +import com.azure.storage.blob.models.SequenceNumberActionType; +import com.azure.storage.blob.models.SourceModifiedAccessConditions; +import io.netty.buffer.ByteBuf; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.net.URL; +import java.time.Duration; + +/** + * Represents a URL to a page blob. It may be obtained by direct construction or via the create method on a + * {@link ContainerAsyncClient} object. This class does not hold any state about a particular blob but is instead a convenient + * way of sending off appropriate requests to the resource on the service. Please refer to the + * Azure Docs + * for more information. + */ +final class PageBlobRawClient extends BlobRawClient { + + private PageBlobAsyncRawClient pageBlobAsyncRawClient; + + /** + * Indicates the number of bytes in a page. + */ + public static final int PAGE_BYTES = 512; + + /** + * Indicates the maximum number of bytes that may be sent in a call to putPage. + */ + public static final int MAX_PUT_PAGES_BYTES = 4 * Constants.MB; + + /** + * Creates a {@code PageBlobAsyncRawClient} object pointing to the account specified by the URL and using the provided + */ + public PageBlobRawClient(AzureBlobStorageImpl azureBlobStorage) { + super(azureBlobStorage); + this.pageBlobAsyncRawClient = new PageBlobAsyncRawClient(azureBlobStorage); + } + + // TODO: Figure out if this method needs to change to public to access method in wrappers + private static String pageRangeToString(PageRange pageRange) { + if (pageRange.start() < 0 || pageRange.end() <= 0) { + throw new IllegalArgumentException("PageRange's start and end values must be greater than or equal to " + + "0 if specified."); + } + if (pageRange.start() % PageBlobRawClient.PAGE_BYTES != 0) { + throw new IllegalArgumentException("PageRange's start value must be a multiple of 512."); + } + if (pageRange.end() % PageBlobRawClient.PAGE_BYTES != PageBlobRawClient.PAGE_BYTES - 1) { + throw new IllegalArgumentException("PageRange's end value must be 1 less than a multiple of 512."); + } + if (pageRange.end() <= pageRange.start()) { + throw new IllegalArgumentException("PageRange's End value must be after the start."); + } + return new StringBuilder("bytes=").append(pageRange.start()).append('-').append(pageRange.end()).toString(); + } + + /** + * Creates a page blob of the specified length. Call PutPage to upload data data to a page blob. + * For more information, see the + * Azure Docs. + * + * @param size + * Specifies the maximum size for the page blob, up to 8 TB. The page blob size must be aligned to a + * 512-byte boundary. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobAsyncRawClient.create")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public PageBlobsCreateResponse create(long size) { + return this.create(size, null, null, null, null, null, null); + } + + /** + * Creates a page blob of the specified length. Call PutPage to upload data data to a page blob. + * For more information, see the + * Azure Docs. + * + * @param size + * Specifies the maximum size for the page blob, up to 8 TB. The page blob size must be aligned to a + * 512-byte boundary. + * @param sequenceNumber + * A user-controlled value that you can use to track requests. The value of the sequence number must be + * between 0 and 2^63 - 1.The default value is 0. + * @param headers + * {@link BlobHTTPHeaders} + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobAsyncRawClient.create")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public PageBlobsCreateResponse create(long size, Long sequenceNumber, BlobHTTPHeaders headers, + Metadata metadata, BlobAccessConditions accessConditions, Duration timeout, Context context) { + Mono response = pageBlobAsyncRawClient.create(size, sequenceNumber, headers, metadata, accessConditions, context); + return timeout == null? + response.block(): + response.block(timeout); + } + + /** + * Writes 1 or more pages to the page blob. The start and end offsets must be a multiple of 512. + * For more information, see the + * Azure Docs. + *

+ * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flux} must produce the same data each time it is subscribed to. + * + * @param pageRange + * A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start offset must + * be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges are + * 0-511, 512-1023, etc. + * @param body + * The data to upload. Note that this {@code Flux} must be replayable if retries are enabled + * (the default). In other words, the Flowable must produce the same data each time it is subscribed to. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobAsyncRawClient.uploadPages")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public PageBlobsUploadPagesResponse uploadPages(PageRange pageRange, Flux body) { + return this.uploadPages(pageRange, body, null, null, null); + } + + /** + * Writes 1 or more pages to the page blob. The start and end offsets must be a multiple of 512. + * For more information, see the + * Azure Docs. + *

+ * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flux} must produce the same data each time it is subscribed to. + * + * @param pageRange + * A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start offset + * must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges + * are 0-511, 512-1023, etc. + * @param body + * The data to upload. Note that this {@code Flux} must be replayable if retries are enabled + * (the default). In other words, the Flowable must produce the same data each time it is subscribed to. + * @param pageBlobAccessConditions + * {@link PageBlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobAsyncRawClient.uploadPages")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public PageBlobsUploadPagesResponse uploadPages(PageRange pageRange, Flux body, + PageBlobAccessConditions pageBlobAccessConditions, Duration timeout, Context context) { + Mono response = pageBlobAsyncRawClient.uploadPages(pageRange, body, pageBlobAccessConditions, context); + return timeout == null? + response.block(): + response.block(timeout); + } + + /** + * Writes 1 or more pages from the source page blob to this page blob. The start and end offsets must be a multiple + * of 512. + * For more information, see the + * Azure Docs. + *

+ * + * @param range + * A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start offset + * must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges + * are 0-511, 512-1023, etc. + * @param sourceURL + * The url to the blob that will be the source of the copy. A source blob in the same storage account can be + * authenticated via Shared Key. However, if the source is a blob in another account, the source blob must + * either be public or must be authenticated via a shared access signature. If the source blob is public, no + * authentication is required to perform the operation. + * @param sourceOffset + * The source offset to copy from. Pass null or 0 to copy from the beginning of source page blob. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_from_url "Sample code for PageBlobAsyncRawClient.uploadPagesFromURL")] + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public PageBlobsUploadPagesFromURLResponse uploadPagesFromURL(PageRange range, URL sourceURL, Long sourceOffset) { + return this.uploadPagesFromURL(range, sourceURL, sourceOffset, null, null, + null, null, null); + } + + /** + * Writes 1 or more pages from the source page blob to this page blob. The start and end offsets must be a multiple + * of 512. + * For more information, see the + * Azure Docs. + *

+ * + * @param range + * The destination {@link PageRange} range. Given that pages must be aligned with 512-byte boundaries, the start offset + * must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges + * are 0-511, 512-1023, etc. + * @param sourceURL + * The url to the blob that will be the source of the copy. A source blob in the same storage account can be + * authenticated via Shared Key. However, if the source is a blob in another account, the source blob must + * either be public or must be authenticated via a shared access signature. If the source blob is public, no + * authentication is required to perform the operation. + * @param sourceOffset + * The source offset to copy from. Pass null or 0 to copy from the beginning of source blob. + * @param sourceContentMD5 + * An MD5 hash of the block content from the source blob. If specified, the service will calculate the MD5 + * of the received data and fail the request if it does not match the provided MD5. + * @param destAccessConditions + * {@link PageBlobAccessConditions} + * @param sourceAccessConditions + * {@link SourceModifiedAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_from_url "Sample code for PageBlobAsyncRawClient.uploadPagesFromURL")] + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public PageBlobsUploadPagesFromURLResponse uploadPagesFromURL(PageRange range, URL sourceURL, Long sourceOffset, + byte[] sourceContentMD5, PageBlobAccessConditions destAccessConditions, + SourceModifiedAccessConditions sourceAccessConditions, Duration timeout, Context context) { + + Mono response = pageBlobAsyncRawClient.uploadPagesFromURL(range, sourceURL, sourceOffset, sourceContentMD5, destAccessConditions, sourceAccessConditions, context); + return timeout == null? + response.block(): + response.block(timeout); + } + + /** + * Frees the specified pages from the page blob. + * For more information, see the + * Azure Docs. + * + * @param pageRange + * A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start offset + * must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges + * are 0-511, 512-1023, etc. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobAsyncRawClient.clearPages")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public PageBlobsClearPagesResponse clearPages(PageRange pageRange) { + return this.clearPages(pageRange, null, null, null); + } + + /** + * Frees the specified pages from the page blob. + * For more information, see the + * Azure Docs. + * + * @param pageRange + * A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start offset + * must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges + * are 0-511, 512-1023, etc. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * @param pageBlobAccessConditions + * {@link PageBlobAccessConditions} + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobAsyncRawClient.clearPages")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public PageBlobsClearPagesResponse clearPages(PageRange pageRange, + PageBlobAccessConditions pageBlobAccessConditions, Duration timeout, Context context) { + Mono response = pageBlobAsyncRawClient.clearPages(pageRange, pageBlobAccessConditions, context); + return timeout == null? + response.block(): + response.block(timeout); + } + + /** + * Returns the list of valid page ranges for a page blob or snapshot of a page blob. + * For more information, see the Azure Docs. + * + * @param blobRange + * {@link BlobRange} + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobAsyncRawClient.getPageRanges")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public PageBlobsGetPageRangesResponse getPageRanges(BlobRange blobRange) { + return this.getPageRanges(blobRange, null, null, null); + } + + /** + * Returns the list of valid page ranges for a page blob or snapshot of a page blob. + * For more information, see the Azure Docs. + * + * @param blobRange + * {@link BlobRange} + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobAsyncRawClient.getPageRanges")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public PageBlobsGetPageRangesResponse getPageRanges(BlobRange blobRange, + BlobAccessConditions accessConditions, Duration timeout, Context context) { + Mono response = pageBlobAsyncRawClient.getPageRanges(blobRange, accessConditions, context); + return timeout == null? + response.block(): + response.block(timeout); + } + + /** + * Gets the collection of page ranges that differ between a specified snapshot and this page blob. + * For more information, see the Azure Docs. + * + * @param blobRange + * {@link BlobRange} + * @param prevSnapshot + * Specifies that the response will contain only pages that were changed between target blob and previous + * snapshot. Changed pages include both updated and cleared pages. The target + * blob may be a snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_diff "Sample code for PageBlobAsyncRawClient.getPageRangesDiff")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public PageBlobsGetPageRangesDiffResponse getPageRangesDiff(BlobRange blobRange, String prevSnapshot) { + return this.getPageRangesDiff(blobRange, prevSnapshot, null, null, null); + } + + /** + * Gets the collection of page ranges that differ between a specified snapshot and this page blob. + * For more information, see the Azure Docs. + * + * @param blobRange + * {@link BlobRange} + * @param prevSnapshot + * Specifies that the response will contain only pages that were changed between target blob and previous + * snapshot. Changed pages include both updated and cleared pages. The target + * blob may be a snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_diff "Sample code for PageBlobAsyncRawClient.getPageRangesDiff")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public PageBlobsGetPageRangesDiffResponse getPageRangesDiff(BlobRange blobRange, String prevSnapshot, + BlobAccessConditions accessConditions, Duration timeout, Context context) { + Mono response =pageBlobAsyncRawClient.getPageRangesDiff(blobRange, prevSnapshot, accessConditions, context); + return timeout == null? + response.block(): + response.block(timeout); + } + + /** + * Resizes the page blob to the specified size (which must be a multiple of 512). + * For more information, see the Azure Docs. + * + * @param size + * Resizes a page blob to the specified size. If the specified value is less than the current size of the + * blob, then all pages above the specified value are cleared. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobAsyncRawClient.resize")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public PageBlobsResizeResponse resize(long size) { + return this.resize(size, null, null, null); + } + + /** + * Resizes the page blob to the specified size (which must be a multiple of 512). + * For more information, see the Azure Docs. + * + * @param size + * Resizes a page blob to the specified size. If the specified value is less than the current size of the + * blob, then all pages above the specified value are cleared. + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobAsyncRawClient.resize")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public PageBlobsResizeResponse resize(long size, BlobAccessConditions accessConditions, Duration timeout, Context context) { + Mono response = pageBlobAsyncRawClient.resize(size, accessConditions, context); + return timeout == null? + response.block(): + response.block(timeout); + } + + /** + * Sets the page blob's sequence number. + * For more information, see the Azure Docs. + * + * @param action + * Indicates how the service should modify the blob's sequence number. + * @param sequenceNumber + * The blob's sequence number. The sequence number is a user-controlled property that you can use to track + * requests and manage concurrency issues. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobAsyncRawClient.updateSequenceNumber")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public PageBlobsUpdateSequenceNumberResponse updateSequenceNumber(SequenceNumberActionType action, + Long sequenceNumber) { + return this.updateSequenceNumber(action, sequenceNumber, null, null, null); + } + + /** + * Sets the page blob's sequence number. + * For more information, see the Azure Docs. + * + * @param action + * Indicates how the service should modify the blob's sequence number. + * @param sequenceNumber + * The blob's sequence number. The sequence number is a user-controlled property that you can use to track + * requests and manage concurrency issues. + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobAsyncRawClient.updateSequenceNumber")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public PageBlobsUpdateSequenceNumberResponse updateSequenceNumber(SequenceNumberActionType action, + Long sequenceNumber, BlobAccessConditions accessConditions, Duration timeout, Context context) { + Mono response = pageBlobAsyncRawClient.updateSequenceNumber(action, sequenceNumber, accessConditions, context); + return timeout == null? + response.block(): + response.block(timeout); + } + + /** + * Begins an operation to start an incremental copy from one page blob's snapshot to this page + * blob. The snapshot is copied such that only the differential changes between the previously copied snapshot are + * transferred to the destination. The copied snapshots are complete copies of the original snapshot and can be read + * or copied from as usual. For more information, see + * the Azure Docs here and + * here. + * + * @param source + * The source page blob. + * @param snapshot + * The snapshot on the copy source. + * + * @return Emits the successful response. + */ + public PageBlobsCopyIncrementalResponse copyIncremental(URL source, String snapshot) { + return this.copyIncremental(source, snapshot, null, null, null); + } + + /** + * Begins an operation to start an incremental copy from one page blob's snapshot to this page + * blob. The snapshot is copied such that only the differential changes between the previously copied snapshot are + * transferred to the destination. The copied snapshots are complete copies of the original snapshot and can be read + * or copied from as usual. For more information, see + * the Azure Docs here and + * here. + * + * @param source + * The source page blob. + * @param snapshot + * The snapshot on the copy source. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + */ + public PageBlobsCopyIncrementalResponse copyIncremental(URL source, String snapshot, + ModifiedAccessConditions modifiedAccessConditions, Duration timeout, Context context) { + Mono response = pageBlobAsyncRawClient.copyIncremental(source, snapshot, modifiedAccessConditions, context); + return timeout == null? + response.block(): + response.block(timeout); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/PipelineOptions.java b/storage/client/src/main/java/com/azure/storage/blob/PipelineOptions.java new file mode 100644 index 0000000000000..89b7d56933abe --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/PipelineOptions.java @@ -0,0 +1,88 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.http.HttpClient; + +/** + * This type encapsulates all the possible configuration for the default pipeline. It may be passed to the + * createPipeline method on {@link StorageURL}. All the options fields have default values if nothing is passed, and + * no logger will be used if it is not set. An HttpClient must be set, however. + */ +public final class PipelineOptions { + /* + PipelineOptions is mutable, but its fields refer to immutable objects. The createPipeline method can pass the + fields to other methods, but the PipelineOptions object itself can only be used for the duration of this call; it + must not be passed to anything with a longer lifetime. + */ + + private HttpClient client; + + private RequestRetryOptions requestRetryOptions = new RequestRetryOptions(); + + private LoggingOptions loggingOptions = new LoggingOptions(); + + private TelemetryOptions telemetryOptions = new TelemetryOptions(); + + + /** + * Specifies which HttpClient to use to send the requests. + */ + public HttpClient client() { + return client; + } + + /** + * Specifies which HttpClient to use to send the requests. + */ + public PipelineOptions withClient(HttpClient client) { + this.client = client; + return this; + } + + /** + * Configures the retry policy's behavior. + */ + public RequestRetryOptions requestRetryOptions() { + return requestRetryOptions; + } + + /** + * Configures the retry policy's behavior. + */ + public PipelineOptions withRequestRetryOptions(RequestRetryOptions requestRetryOptions) { + this.requestRetryOptions = requestRetryOptions; + return this; + } + + /** + * Configures the built-in request logging policy. + */ + public LoggingOptions loggingOptions() { + return loggingOptions; + } + + /** + * Configures the built-in request logging policy. + */ + public PipelineOptions withLoggingOptions(LoggingOptions loggingOptions) { + this.loggingOptions = loggingOptions; + return this; + } + + /** + * Configures the built-in telemetry policy behavior. + */ + public TelemetryOptions telemetryOptions() { + return telemetryOptions; + } + + /** + * Configures the built-in telemetry policy behavior. + */ + public PipelineOptions withTelemetryOptions(TelemetryOptions telemetryOptions) { + this.telemetryOptions = telemetryOptions; + return this; + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/ProgressReporter.java b/storage/client/src/main/java/com/azure/storage/blob/ProgressReporter.java new file mode 100644 index 0000000000000..fc882d4c30aa4 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/ProgressReporter.java @@ -0,0 +1,167 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.nio.ByteBuffer; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.Lock; + +/** + * {@code ProgressReporterImpl} offers a convenient way to add progress tracking to a given Flowable. + */ +final class ProgressReporter { + + private abstract static class ProgressReporterImpl implements IProgressReceiver { + long blockProgress; + + final IProgressReceiver progressReceiver; + + ProgressReporterImpl(IProgressReceiver progressReceiver) { + this.blockProgress = 0; + this.progressReceiver = progressReceiver; + } + + @Override + public void reportProgress(long bytesTransferred) { + this.blockProgress += bytesTransferred; + } + + void rewindProgress() { + this.blockProgress = 0; + } + + Flux addProgressReporting(Flux data) { + return Mono.just(this) + .flatMapMany(progressReporter -> { + /* + Each time there is a new subscription, we will rewind the progress. This is desirable specifically + for retries, which resubscribe on each try. The first time this flowable is subscribed to, the + rewind will be a noop as there will have been no progress made. Subsequent rewinds will work as + expected. + */ + progressReporter.rewindProgress(); + /* + Every time we emit some data, report it to the Tracker, which will pass it on to the end user. + */ + return data.doOnNext(buffer -> + progressReporter.reportProgress(buffer.remaining())); + }); + } + } + + /** + * This type is used to keep track of the total amount of data transferred for a single request. This is the type + * we will use when the customer uses the factory to add progress reporting to their Flowable. We need this + * additional type because we can't keep local state directly as lambdas require captured local variables to be + * effectively final. + */ + private static class SequentialProgressReporter extends ProgressReporterImpl { + SequentialProgressReporter(IProgressReceiver progressReceiver) { + super(progressReceiver); + } + + @Override + public void reportProgress(long bytesTransferred) { + super.reportProgress(bytesTransferred); + this.progressReceiver.reportProgress(this.blockProgress); + } + } + + /** + * This type is used to keep track of the total amount of data transferred as a part of a parallel upload in order + * to coordinate progress reporting to the end user. We need this additional type because we can't keep local state + * directly as lambdas require captured local variables to be effectively final. + */ + private static class ParallelProgressReporter extends ProgressReporterImpl { + /* + This lock will be instantiated by the operation initiating the whole transfer to coordinate each + ProgressReporterImpl. + */ + private final Lock transferLock; + + /* + We need an AtomicLong to be able to update the value referenced. Because we are already synchronizing with the + lock, we don't incur any additional performance hit here by the synchronization. + */ + private AtomicLong totalProgress; + + ParallelProgressReporter(IProgressReceiver progressReceiver, Lock lock, AtomicLong totalProgress) { + super(progressReceiver); + this.transferLock = lock; + this.totalProgress = totalProgress; + } + + @Override + public void reportProgress(long bytesTransferred) { + super.reportProgress(bytesTransferred); + + /* + It is typically a bad idea to lock around customer code (which the progressReceiver is) because they could + never release the lock. However, we have decided that it is sufficiently difficult for them to make their + progressReporting code threadsafe that we will take that burden and the ensuing risks. Although it is the + case that only one thread is allowed to be in onNext at once, however there are multiple independent + requests happening at once to stage/download separate chunks, so we still need to lock either way. + */ + transferLock.lock(); + this.progressReceiver.reportProgress(this.totalProgress.addAndGet(bytesTransferred)); + transferLock.unlock(); + } + + /* + This is used in the case of retries to rewind the amount of progress reported so as not to over-report at the + end. + */ + @Override + public void rewindProgress() { + /* + Blocks do not interfere with each other's block progress and there is no way that, for a single block, one + thread will be trying to add to the progress while the other is trying to zero it. The updates are strictly + sequential. Avoiding using the lock is ideal. + */ + this.totalProgress.addAndGet(-1 * this.blockProgress); + super.rewindProgress(); + } + + } + + /** + * Adds progress reporting functionality to the given {@code Flux}. Each subscription (and therefore each + * retry) will rewind the progress reported so as not to over-report. The data reported will be the total amount + * of data emitted so far, or the "current position" of the Flowable. + * + * @param data + * The data whose transfer progress is to be tracked. + * @param progressReceiver + * {@link IProgressReceiver} + * + * @return A {@code Flux} that emits the same data as the source but calls a callback to report the total amount + * of data emitted so far. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=progress "Sample code for ProgressReporterFactor.addProgressReporting")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public static Flux addProgressReporting(Flux data, + IProgressReceiver progressReceiver) { + if (progressReceiver == null) { + return data; + } else { + ProgressReporterImpl tracker = new SequentialProgressReporter(progressReceiver); + return tracker.addProgressReporting(data); + } + } + + static Flux addParallelProgressReporting(Flux data, + IProgressReceiver progressReceiver, Lock lock, AtomicLong totalProgress) { + if (progressReceiver == null) { + return data; + } else { + ParallelProgressReporter tracker = new ParallelProgressReporter(progressReceiver, lock, totalProgress); + return tracker.addProgressReporting(data); + } + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/ReliableDownloadOptions.java b/storage/client/src/main/java/com/azure/storage/blob/ReliableDownloadOptions.java new file mode 100644 index 0000000000000..1b816ad3547d7 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/ReliableDownloadOptions.java @@ -0,0 +1,36 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +/** + * {@code ReliableDownloadOptions} contains properties which help the {@code Flux} returned from + * {@link DownloadAsyncResponse#body(ReliableDownloadOptions)} determine when to retry. + */ +public final class ReliableDownloadOptions { + + /* + We use "retry" here because by the time the user passes this type, the initial request, or try, has already been + issued and returned. This is in contrast to the retry policy options, which includes the initial try in its count, + thus the difference in verbiage. + */ + private int maxRetryRequests = 0; + + /** + * Specifies the maximum number of additional HTTP Get requests that will be made while reading the data from a + * response body. + */ + public int maxRetryRequests() { + return maxRetryRequests; + } + + /** + * Specifies the maximum number of additional HTTP Get requests that will be made while reading the data from a + * response body. + */ + public ReliableDownloadOptions withMaxRetryRequests(int maxRetryRequests) { + Utility.assertInBounds("options.maxRetryRequests", maxRetryRequests, 0, Integer.MAX_VALUE); + this.maxRetryRequests = maxRetryRequests; + return this; + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/RequestIDPolicy.java b/storage/client/src/main/java/com/azure/storage/blob/RequestIDPolicy.java new file mode 100644 index 0000000000000..87c3bf631b2a6 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/RequestIDPolicy.java @@ -0,0 +1,27 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.http.HttpPipelineCallContext; +import com.azure.core.http.HttpPipelineNextPolicy; +import com.azure.core.http.HttpResponse; +import com.azure.core.http.policy.HttpPipelinePolicy; +import reactor.core.publisher.Mono; + +import java.util.UUID; + +/** + * This is a factory which creates policies in an {@link com.azure.core.http.HttpPipeline} for setting a unique request ID in the + * x-ms-client-request-id header as is required for all requests to the service. In most cases, it is sufficient to + * allow the default pipeline to add this factory automatically and assume that it works. The factory and policy must + * only be used directly when creating a custom pipeline. + */ +final class RequestIDPolicy implements HttpPipelinePolicy { + + @Override + public Mono process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { + context.httpRequest().headers().put(Constants.HeaderConstants.CLIENT_REQUEST_ID_HEADER, UUID.randomUUID().toString()); + return next.process(); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/RequestRetryFactory.java b/storage/client/src/main/java/com/azure/storage/blob/RequestRetryFactory.java new file mode 100644 index 0000000000000..8fe35ef426228 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/RequestRetryFactory.java @@ -0,0 +1,224 @@ +//// Copyright (c) Microsoft Corporation. All rights reserved. +//// Licensed under the MIT License. +// +//package com.azure.storage.blob; +// +// +//import java.io.IOException; +//import java.net.MalformedURLException; +//import java.nio.ByteBuffer; +//import java.util.concurrent.ThreadLocalRandom; +//import java.util.concurrent.TimeUnit; +//import java.util.concurrent.TimeoutException; +// +///** +// * This is a factory which creates policies in an {@link HttpPipeline} for retrying a given HTTP request. The request +// * that is retried will be identical each time it is reissued. In most cases, it is sufficient to configure a {@link +// * RequestRetryOptions} object and set those as a field on a {@link PipelineOptions} object to configure a default +// * pipeline. Retries will try against a secondary if one is specified and the type of operation/error indicates that the +// * secondary can handle the request. Exponential and fixed backoff are supported. The factory and policy must only be +// * used directly when creating a custom pipeline. +// */ +//public final class RequestRetryFactory implements RequestPolicyFactory { +// +// private final RequestRetryOptions requestRetryOptions; +// +// /** +// * Creates a factory capable of generating RequestRetry policies for the {@link HttpPipeline}. +// * +// * @param requestRetryOptions +// * {@link RequestRetryOptions} +// */ +// public RequestRetryFactory(RequestRetryOptions requestRetryOptions) { +// this.requestRetryOptions = requestRetryOptions == null ? new RequestRetryOptions() : requestRetryOptions; +// } +// +// @Override +// public RequestPolicy create(RequestPolicy next, RequestPolicyOptions options) { +// return new RequestRetryPolicy(next, this.requestRetryOptions); +// } +// +// private static final class RequestRetryPolicy implements RequestPolicy { +// +// private final RequestPolicy nextPolicy; +// +// private final RequestRetryOptions requestRetryOptions; +// +// private RequestRetryPolicy(RequestPolicy nextPolicy, RequestRetryOptions requestRetryOptions) { +// this.nextPolicy = nextPolicy; +// this.requestRetryOptions = requestRetryOptions; +// } +// +// @Override +// public Single sendAsync(HttpRequest httpRequest) { +// boolean considerSecondary = (httpRequest.httpMethod().equals(HttpMethod.GET) +// || httpRequest.httpMethod().equals(HttpMethod.HEAD)) +// && (this.requestRetryOptions.secondaryHost() != null); +// +// return this.attemptAsync(httpRequest, 1, considerSecondary, 1); +// } +// +// // This is to log for debugging purposes only. Comment/uncomment as necessary for releasing/debugging. +// private void logf(String s, Object... args) { +// //System.out.println(String.format(s, args)); +// } +// +// /** +// * This method actually attempts to send the request and determines if we should attempt again and, if so, how +// * long to wait before sending out the next request. +// *

+// * Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2) When to retry: connection failure +// * or an HTTP status code of 500 or greater, except 501 and 505 If using a secondary: Odd tries go against +// * primary; even tries go against the secondary For a primary wait ((2 ^ primaryTries - 1) * delay * random(0.8, +// * 1.2) If secondary gets a 404, don't fail, retry but future retries are only against the primary When retrying +// * against a secondary, ignore the retry count and wait (.1 second * random(0.8, 1.2)) +// * +// * @param httpRequest +// * The request to try. +// * @param primaryTry +// * This indicates how man tries we've attempted against the primary DC. +// * @param considerSecondary +// * Before each try, we'll select either the primary or secondary URL if appropriate. +// * @param attempt +// * This indicates the total number of attempts to send the request. +// * +// * @return A single containing either the successful response or an error that was not retryable because either +// * the maxTries was exceeded or retries will not mitigate the issue. +// */ +// private Single attemptAsync(final HttpRequest httpRequest, final int primaryTry, +// final boolean considerSecondary, +// final int attempt) { +// logf("\n=====> Try=%d\n", attempt); +// +// // Determine which endpoint to try. It's primary if there is no secondary or if it is an odd number attempt. +// final boolean tryingPrimary = !considerSecondary || (attempt % 2 != 0); +// +// // Select the correct host and delay. +// long delayMs; +// if (tryingPrimary) { +// // The first attempt returns 0 delay. +// delayMs = this.requestRetryOptions.calculateDelayInMs(primaryTry); +// logf("Primary try=%d, Delay=%d\n", primaryTry, delayMs); +// } else { +// // Delay with some jitter before trying the secondary. +// delayMs = (long) ((ThreadLocalRandom.current().nextFloat() / 2 + 0.8) * 1000); // Add jitter +// logf("Secondary try=%d, Delay=%d\n", attempt - primaryTry, delayMs); +// } +// +// /* +// Clone the original request to ensure that each try starts with the original (unmutated) request. We cannot +// simply call httpRequest.buffer() because although the body will start emitting from the beginning of the +// stream, the buffers that were emitted will have already been consumed (their position set to their limit), +// so it is not a true reset. By adding the map function, we ensure that anything which consumes the +// ByteBuffers downstream will only actually consume a duplicate so the original is preserved. This only +// duplicates the ByteBuffer object, not the underlying data. +// */ +// HttpHeaders bufferedHeaders = new HttpHeaders(httpRequest.headers()); +// Flowable bufferedBody = httpRequest.body() == null +// ? null : httpRequest.body().map(ByteBuffer::duplicate); +// final HttpRequest requestCopy = new HttpRequest(httpRequest.callerMethod(), httpRequest.httpMethod(), +// httpRequest.url(), bufferedHeaders, bufferedBody, httpRequest.responseDecoder()); +// if (!tryingPrimary) { +// UrlBuilder builder = UrlBuilder.parse(requestCopy.url()); +// builder.withHost(this.requestRetryOptions.secondaryHost()); +// try { +// requestCopy.withUrl(builder.toURL()); +// } catch (MalformedURLException e) { +// return Single.error(e); +// } +// } +// requestCopy.withContext(httpRequest.context()); +// +// // Deadline stuff +// +// /* +// We want to send the request with a given timeout, but we don't want to kickoff that timeout-bound operation +// until after the retry backoff delay, so we call delaySubscription. +// */ +// return this.nextPolicy.sendAsync(requestCopy) +// .timeout(this.requestRetryOptions.tryTimeout(), TimeUnit.SECONDS) +// .delaySubscription(delayMs, TimeUnit.MILLISECONDS) +// .flatMap(response -> { +// boolean newConsiderSecondary = considerSecondary; +// String action; +// int statusCode = response.statusCode(); +// +// /* +// If attempt was against the secondary & it returned a StatusNotFound (404), then the +// resource was not found. This may be due to replication delay. So, in this case, +// we'll never try the secondary again for this operation. +// */ +// if (!tryingPrimary && statusCode == 404) { +// newConsiderSecondary = false; +// action = "Retry: Secondary URL returned 404"; +// } else if (statusCode == 503 || statusCode == 500) { +// action = "Retry: Temporary error or server timeout"; +// } else { +// action = "NoRetry: Successful HTTP request"; +// } +// +// logf("Action=%s\n", action); +// if (action.charAt(0) == 'R' && attempt < requestRetryOptions.maxTries()) { +// /* +// We increment primaryTry if we are about to try the primary again (which is when we +// consider the secondary and tried the secondary this time (tryingPrimary==false) or +// we do not consider the secondary at all (considerSecondary==false)). This will +// ensure primaryTry is correct when passed to calculate the delay. +// */ +// int newPrimaryTry = !tryingPrimary || !considerSecondary +// ? primaryTry + 1 : primaryTry; +// return attemptAsync(httpRequest, newPrimaryTry, newConsiderSecondary, +// attempt + 1); +// } +// return Single.just(response); +// }) +// .onErrorResumeNext(throwable -> { +// /* +// It is likely that many users will not realize that their Flowable must be replayable and +// get an error upon retries when the provided data length does not match the length of the exact +// data. We cannot enforce the desired Flowable behavior, so we provide a hint when this is likely +// the root cause. +// */ +// if (throwable instanceof UnexpectedLengthException && attempt > 1) { +// return Single.error(new IllegalStateException("The request failed because the " +// + "size of the contents of the provided Flowable did not match the provided " +// + "data size upon attempting to retry. This is likely caused by the Flowable " +// + "not being replayable. To support retries, all Flowables must produce the " +// + "same data for each subscriber. Please ensure this behavior.", throwable)); +// } +// +// /* +// IOException is a catch-all for IO related errors. Technically it includes many types which may +// not be network exceptions, but we should not hit those unless there is a bug in our logic. In +// either case, it is better to optimistically retry instead of failing too soon. +// A Timeout Exception is a client-side timeout coming from Rx. +// */ +// String action; +// if (throwable instanceof IOException) { +// action = "Retry: Network error"; +// } else if (throwable instanceof TimeoutException) { +// action = "Retry: Client timeout"; +// } else { +// action = "NoRetry: Unknown error"; +// } +// +// +// +// logf("Action=%s\n", action); +// if (action.charAt(0) == 'R' && attempt < requestRetryOptions.maxTries()) { +// /* +// We increment primaryTry if we are about to try the primary again (which is when we +// consider the secondary and tried the secondary this time (tryingPrimary==false) or +// we do not consider the secondary at all (considerSecondary==false)). This will +// ensure primaryTry is correct when passed to calculate the delay. +// */ +// int newPrimaryTry = !tryingPrimary || !considerSecondary +// ? primaryTry + 1 : primaryTry; +// return attemptAsync(httpRequest, newPrimaryTry, considerSecondary, +// attempt + 1); +// } +// return Single.error(throwable); +// }); +// } +// } +//} diff --git a/storage/client/src/main/java/com/azure/storage/blob/RequestRetryOptions.java b/storage/client/src/main/java/com/azure/storage/blob/RequestRetryOptions.java new file mode 100644 index 0000000000000..66219da36e836 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/RequestRetryOptions.java @@ -0,0 +1,165 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import java.util.concurrent.TimeUnit; + +/** + * Options for configuring the {@link RequestRetryPolicy}. Please refer to the Factory for more information. Note + * that there is no option for overall operation timeout. This is because Rx object have a timeout field which provides + * this functionality. + */ +final class RequestRetryOptions { + + private final int maxTries; + private final int tryTimeout; + private final long retryDelayInMs; + private final long maxRetryDelayInMs; + /** + * A {@link RetryPolicyType} telling the pipeline what kind of retry policy to use. + */ + private RetryPolicyType retryPolicyType; + private String secondaryHost; + + /** + * Constructor with default retry values: Exponential backoff, maxTries=4, tryTimeout=30, retryDelayInMs=4000, + * maxRetryDelayInMs=120000, secondaryHost=null. + */ + public RequestRetryOptions() { + this(RetryPolicyType.EXPONENTIAL, null, + null, null, null, null); + } + + /** + * Configures how the {@link com.microsoft.rest.v2.http.HttpPipeline} should retry requests. + * + * @param retryPolicyType + * A {@link RetryPolicyType} specifying the type of retry pattern to use. A value of {@code null} accepts + * the default. + * @param maxTries + * Specifies the maximum number of attempts an operation will be tried before producing an error. A value of + * {@code null} means that you accept our default policy. A value of 1 means 1 try and no retries. + * @param tryTimeout + * Indicates the maximum time allowed for any single try of an HTTP request. A value of {@code null} means + * that you accept our default. NOTE: When transferring large amounts of data, the default TryTimeout will + * probably not be sufficient. You should override this value based on the bandwidth available to the host + * machine and proximity to the Storage service. A good starting point may be something like (60 seconds per + * MB of anticipated-payload-size). + * @param retryDelayInMs + * Specifies the amount of delay to use before retrying an operation. A value of {@code null} means you + * accept the default value. The delay increases (exponentially or linearly) with each retry up to a maximum + * specified by MaxRetryDelay. If you specify {@code null}, then you must also specify {@code null} for + * MaxRetryDelay. + * @param maxRetryDelayInMs + * Specifies the maximum delay allowed before retrying an operation. A value of {@code null} means you + * accept the default value. If you specify {@code null}, then you must also specify {@code null} for + * RetryDelay. + * @param secondaryHost + * If a secondaryHost is specified, retries will be tried against this host. If secondaryHost is + * {@code null} (the default) then operations are not retried against another host. NOTE: Before setting + * this field, make sure you understand the issues around reading stale and potentially-inconsistent data at + * this webpage + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=pipeline_options "Sample code for RequestRetryOptions constructor")] \n + * For more samples, please see the [Samples file] (https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public RequestRetryOptions(RetryPolicyType retryPolicyType, Integer maxTries, Integer tryTimeout, + Long retryDelayInMs, Long maxRetryDelayInMs, String secondaryHost) { + this.retryPolicyType = retryPolicyType == null ? RetryPolicyType.EXPONENTIAL : retryPolicyType; + if (maxTries != null) { + Utility.assertInBounds("maxRetries", maxTries, 1, Integer.MAX_VALUE); + this.maxTries = maxTries; + } else { + this.maxTries = 4; + } + + if (tryTimeout != null) { + Utility.assertInBounds("tryTimeout", tryTimeout, 1, Integer.MAX_VALUE); + this.tryTimeout = tryTimeout; + } else { + this.tryTimeout = 60; + } + + if ((retryDelayInMs == null && maxRetryDelayInMs != null) + || (retryDelayInMs != null && maxRetryDelayInMs == null)) { + throw new IllegalArgumentException("Both retryDelay and maxRetryDelay must be null or neither can be null"); + } + + if (retryDelayInMs != null && maxRetryDelayInMs != null) { + Utility.assertInBounds("maxRetryDelayInMs", maxRetryDelayInMs, 1, Long.MAX_VALUE); + Utility.assertInBounds("retryDelayInMs", retryDelayInMs, 1, maxRetryDelayInMs); + this.maxRetryDelayInMs = maxRetryDelayInMs; + this.retryDelayInMs = retryDelayInMs; + } else { + switch (this.retryPolicyType) { + case EXPONENTIAL: + this.retryDelayInMs = TimeUnit.SECONDS.toMillis(4); + break; + case FIXED: + this.retryDelayInMs = TimeUnit.SECONDS.toMillis(30); + break; + default: + throw new IllegalArgumentException("Unrecognize retry policy type."); + } + this.maxRetryDelayInMs = TimeUnit.SECONDS.toMillis(120); + } + + this.secondaryHost = secondaryHost; + } + + int maxTries() { + return this.maxTries; + } + + int tryTimeout() { + return this.tryTimeout; + } + + String secondaryHost() { + return this.secondaryHost; + } + + long retryDelayInMs() { + return retryDelayInMs; + } + + long maxRetryDelayInMs() { + return maxRetryDelayInMs; + } + + /** + * Calculates how long to delay before sending the next request. + * + * @param tryCount + * An {@code int} indicating which try we are on. + * + * @return A {@code long} value of how many milliseconds to delay. + */ + long calculateDelayInMs(int tryCount) { + long delay = 0; + switch (this.retryPolicyType) { + case EXPONENTIAL: + delay = (pow(2L, tryCount - 1) - 1L) * this.retryDelayInMs; + break; + + case FIXED: + delay = this.retryDelayInMs; + break; + default: + throw new IllegalArgumentException("Invalid retry policy type."); + } + + return Math.min(delay, this.maxRetryDelayInMs); + } + + private long pow(long number, int exponent) { + long result = 1; + for (int i = 0; i < exponent; i++) { + result *= number; + } + + return result; + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/RetryPolicyType.java b/storage/client/src/main/java/com/azure/storage/blob/RetryPolicyType.java new file mode 100644 index 0000000000000..e91fb8f7f7176 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/RetryPolicyType.java @@ -0,0 +1,19 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +/** + * This type holds possible options for retry backoff algorithms. They may be used with {@link RequestRetryOptions}. + */ +enum RetryPolicyType { + /** + * Tells the pipeline to use an exponential back-off retry policy. + */ + EXPONENTIAL, + + /** + * Tells the pipeline to use a fixed back-off retry policy. + */ + FIXED +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/SASProtocol.java b/storage/client/src/main/java/com/azure/storage/blob/SASProtocol.java new file mode 100644 index 0000000000000..86ca4d94da4cc --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/SASProtocol.java @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import java.util.Locale; + +/** + * Specifies the set of possible permissions for a shared access signature protocol. Values of this type can be used + * to set the fields on the {@link AccountSASSignatureValues} and {@link ServiceSASSignatureValues} types. + */ +enum SASProtocol { + /** + * Permission to use SAS only through https granted. + */ + HTTPS_ONLY(Constants.HTTPS), + + /** + * Permission to use SAS only through https or http granted. + */ + HTTPS_HTTP(Constants.HTTPS_HTTP); + + private final String protocols; + + SASProtocol(String p) { + this.protocols = p; + } + + /** + * Parses a {@code String} into a {@code SASProtocl} value if possible. + * + * @param str + * The value to try to parse. + * + * @return A {@code SASProtocol} value that represents the string if possible. + */ + public static SASProtocol parse(String str) { + if (str.equals(Constants.HTTPS)) { + return SASProtocol.HTTPS_ONLY; + } else if (str.equals(Constants.HTTPS_HTTP)) { + return SASProtocol.HTTPS_HTTP; + } + throw new IllegalArgumentException(String.format(Locale.ROOT, + "%s could not be parsed into a SASProtocl value.", str)); + } + + @Override + public String toString() { + return this.protocols; + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/SASQueryParameters.java b/storage/client/src/main/java/com/azure/storage/blob/SASQueryParameters.java new file mode 100644 index 0000000000000..25748c73c507c --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/SASQueryParameters.java @@ -0,0 +1,641 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.storage.blob.models.UserDelegationKey; + +import java.net.UnknownHostException; +import java.time.OffsetDateTime; +import java.util.Map; + +import static com.azure.storage.blob.Utility.safeURLEncode; + +/** + * Represents the components that make up an Azure Storage SAS' query parameters. This type is not constructed directly + * by the user; it is only generated by the {@link AccountSASSignatureValues} and {@link ServiceSASSignatureValues} + * types. Once generated, it can be set on a {@link BlobURLParts} object to be constructed as part of a URL or it can + * be encoded into a {@code String} and appended to a URL directly (though caution should be taken here in case there + * are existing query parameters, which might affect the appropriate means of appending these query parameters). + * NOTE: Instances of this class are immutable to ensure thread safety. + */ +final class SASQueryParameters { + + private final String version; + + private final String services; + + private final String resourceTypes; + + private final SASProtocol protocol; + + private final OffsetDateTime startTime; + + private final OffsetDateTime expiryTime; + + private final IPRange ipRange; + + private final String identifier; + + private final String keyOid; + + private final String keyTid; + + private final OffsetDateTime keyStart; + + private final OffsetDateTime keyExpiry; + + private final String keyService; + + private final String keyVersion; + + private final String resource; + + private final String permissions; + + private final String signature; + + private final String cacheControl; + + private final String contentDisposition; + + private final String contentEncoding; + + private final String contentLanguage; + + private final String contentType; + + /** + * Creates a new {@link SASQueryParameters} object. + * + * @param queryParamsMap + * All query parameters for the request as key-value pairs + * @param removeSASParametersFromMap + * When {@code true}, the SAS query parameters will be removed from queryParamsMap + */ + SASQueryParameters(Map queryParamsMap, boolean removeSASParametersFromMap) + throws UnknownHostException { + + String[] queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_SERVICE_VERSION); + if (queryValue != null) { + this.version = queryValue[0]; + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_SERVICE_VERSION); + } + } else { + this.version = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_SERVICES); + if (queryValue != null) { + this.services = queryValue[0]; + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_SERVICES); + } + } else { + this.services = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_RESOURCES_TYPES); + if (queryValue != null) { + this.resourceTypes = queryValue[0]; + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_RESOURCES_TYPES); + } + } else { + this.resourceTypes = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_PROTOCOL); + if (queryValue != null) { + this.protocol = SASProtocol.parse(queryValue[0]); + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_PROTOCOL); + } + } else { + this.protocol = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_START_TIME); + if (queryValue != null) { + this.startTime = Utility.parseDate(queryValue[0]); + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_START_TIME); + } + } else { + this.startTime = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_EXPIRY_TIME); + if (queryValue != null) { + this.expiryTime = Utility.parseDate(queryValue[0]); + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_EXPIRY_TIME); + } + } else { + this.expiryTime = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_IP_RANGE); + if (queryValue != null) { + this.ipRange = IPRange.parse(queryValue[0]); + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_IP_RANGE); + } + } else { + this.ipRange = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_SIGNED_IDENTIFIER); + if (queryValue != null) { + this.identifier = queryValue[0]; + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_SIGNED_IDENTIFIER); + } + } else { + this.identifier = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_SIGNED_OBJECT_ID); + if (queryValue != null) { + this.keyOid = queryValue[0]; + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_SIGNED_OBJECT_ID); + } + } else { + this.keyOid = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_SIGNED_TENANT_ID); + if (queryValue != null) { + this.keyTid = queryValue[0]; + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_SIGNED_TENANT_ID); + } + } else { + this.keyTid = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_SIGNED_KEY_START); + if (queryValue != null) { + this.keyStart = Utility.parseDate(queryValue[0]); + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_SIGNED_KEY_START); + } + } else { + this.keyStart = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_SIGNED_KEY_EXPIRY); + if (queryValue != null) { + this.keyExpiry = Utility.parseDate(queryValue[0]); + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_SIGNED_KEY_EXPIRY); + } + } else { + this.keyExpiry = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_SIGNED_KEY_SERVICE); + if (queryValue != null) { + this.keyService = queryValue[0]; + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_SIGNED_KEY_SERVICE); + } + } else { + this.keyService = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_SIGNED_KEY_VERSION); + if (queryValue != null) { + this.keyVersion = queryValue[0]; + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_SIGNED_KEY_VERSION); + } + } else { + this.keyVersion = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_SIGNED_RESOURCE); + if (queryValue != null) { + this.resource = queryValue[0]; + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_SIGNED_RESOURCE); + } + } else { + this.resource = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_SIGNED_PERMISSIONS); + if (queryValue != null) { + this.permissions = queryValue[0]; + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_SIGNED_PERMISSIONS); + } + } else { + this.permissions = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_SIGNATURE); + if (queryValue != null) { + this.signature = queryValue[0]; + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_SIGNATURE); + } + } else { + this.signature = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_CACHE_CONTROL); + if (queryValue != null) { + this.cacheControl = queryValue[0]; + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_CACHE_CONTROL); + } + } else { + this.cacheControl = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_CONTENT_DISPOSITION); + if (queryValue != null) { + this.contentDisposition = queryValue[0]; + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_CONTENT_DISPOSITION); + } + } else { + this.contentDisposition = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_CONTENT_ENCODING); + if (queryValue != null) { + this.contentEncoding = queryValue[0]; + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_CONTENT_ENCODING); + } + } else { + this.contentEncoding = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_CONTENT_LANGUAGE); + if (queryValue != null) { + this.contentLanguage = queryValue[0]; + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_CONTENT_LANGUAGE); + } + } else { + this.contentLanguage = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_CONTENT_TYPE); + if (queryValue != null) { + this.contentType = queryValue[0]; + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_CONTENT_TYPE); + } + } else { + this.contentType = null; + } + } + + /** + * Creates a new {@link SASQueryParameters} object. These objects are only created internally by + * *SASSignatureValues classes. + * + * @param version + * A {@code String} representing the storage version. + * @param services + * A {@code String} representing the storage services being accessed (only for Account SAS). + * @param resourceTypes + * A {@code String} representing the storage resource types being accessed (only for Account SAS). + * @param protocol + * A {@code String} representing the allowed HTTP protocol(s) or {@code null}. + * @param startTime + * A {@code java.util.Date} representing the start time for this SAS token or {@code null}. + * @param expiryTime + * A {@code java.util.Date} representing the expiry time for this SAS token. + * @param ipRange + * A {@link IPRange} representing the range of valid IP addresses for this SAS token or {@code null}. + * @param identifier + * A {@code String} representing the signed identifier (only for Service SAS) or {@code null}. + * @param resource + * A {@code String} representing the storage container or blob (only for Service SAS). + * @param permissions + * A {@code String} representing the storage permissions or {@code null}. + * @param signature + * A {@code String} representing the signature for the SAS token. + */ + SASQueryParameters(String version, String services, String resourceTypes, SASProtocol protocol, + OffsetDateTime startTime, OffsetDateTime expiryTime, IPRange ipRange, String identifier, + String resource, String permissions, String signature, String cacheControl, String contentDisposition, + String contentEncoding, String contentLanguage, String contentType, UserDelegationKey key) { + + this.version = version; + this.services = services; + this.resourceTypes = resourceTypes; + this.protocol = protocol; + this.startTime = startTime; + this.expiryTime = expiryTime; + this.ipRange = ipRange; + this.identifier = identifier; + this.resource = resource; + this.permissions = permissions; + this.signature = signature; + this.cacheControl = cacheControl; + this.contentDisposition = contentDisposition; + this.contentEncoding = contentEncoding; + this.contentLanguage = contentLanguage; + this.contentType = contentType; + + if (key != null) { + this.keyOid = key.signedOid(); + this.keyTid = key.signedTid(); + this.keyStart = key.signedStart(); + this.keyExpiry = key.signedExpiry(); + this.keyService = key.signedService(); + this.keyVersion = key.signedVersion(); + } else { + this.keyOid = null; + this.keyTid = null; + this.keyStart = null; + this.keyExpiry = null; + this.keyService = null; + this.keyVersion = null; + } + } + + /** + * @return The storage version + */ + public String version() { + return version; + } + + /** + * @return The storage services being accessed (only for Account SAS). Please refer to {@link AccountSASService} for + * more details. + */ + public String services() { + return services; + } + + /** + * @return The storage resource types being accessed (only for Account SAS). Please refer to + * {@link AccountSASResourceType} for more details. + */ + public String resourceTypes() { + return resourceTypes; + } + + /** + * @return The allowed HTTP protocol(s) or {@code null}. Please refer to {@link SASProtocol} for more details. + */ + public SASProtocol protocol() { + return protocol; + } + + /** + * @return The start time for this SAS token or {@code null}. + */ + public OffsetDateTime startTime() { + return startTime; + } + + /** + * @return The expiry time for this SAS token. + */ + public OffsetDateTime expiryTime() { + return expiryTime; + } + + /** + * @return {@link IPRange} + */ + public IPRange ipRange() { + return ipRange; + } + + /** + * @return The signed identifier (only for {@link ServiceSASSignatureValues}) or {@code null}. Please see + * here + * for more information. + */ + public String identifier() { + return identifier; + } + + /** + * @return The storage container or blob (only for {@link ServiceSASSignatureValues}). + */ + public String resource() { + return resource; + } + + /** + * @return Please refer to {@link AccountSASPermission}, {@link BlobSASPermission}, or {@link ContainerSASPermission} + * for more details. + */ + public String permissions() { + return permissions; + } + + /** + * @return The signature for the SAS token. + */ + public String signature() { + return signature; + } + + /** + * @return The Cache-Control header value when a client accesses the resource with this sas token. + */ + public String cacheControl() { + return cacheControl; + } + + /** + * @return The Content-Disposition header value when a client accesses the resource with this sas token. + */ + public String contentDisposition() { + return contentDisposition; + } + + /** + * @return The Content-Encoding header value when a client accesses the resource with this sas token. + */ + public String contentEncoding() { + return contentEncoding; + } + + /** + * @return The Content-Language header value when a client accesses the resource with this sas token. + */ + public String contentLanguage() { + return contentLanguage; + } + + /** + * @return The Content-Type header value when a client accesses the resource with this sas token. + */ + public String contentType() { + return contentType; + } + + public String keyOid() { + return keyOid; + } + + public String keyTid() { + return keyTid; + } + + public OffsetDateTime keyStart() { + return keyStart; + } + + public OffsetDateTime keyExpiry() { + return keyExpiry; + } + + public String keyService() { + return keyService; + } + + public String keyVersion() { + return keyVersion; + } + + UserDelegationKey userDelegationKey() { + return new UserDelegationKey() + .signedExpiry(this.keyExpiry) + .signedOid(this.keyOid) + .signedService(this.keyService) + .signedStart(this.keyStart) + .signedTid(this.keyTid) + .signedVersion(this.keyVersion); + } + + private void tryAppendQueryParameter(StringBuilder sb, String param, Object value) { + if (value != null) { + if (sb.length() == 0) { + sb.append('?'); + } else { + sb.append('&'); + } + sb.append(safeURLEncode(param)).append('=').append(safeURLEncode(value.toString())); + } + } + + /** + * Encodes all SAS query parameters into a string that can be appended to a URL. + * + * @return A {@code String} representing all SAS query parameters. + */ + public String encode() { + /* + We should be url-encoding each key and each value, but because we know all the keys and values will encode to + themselves, we cheat except for the signature value. + */ + String[] params = { + Constants.UrlConstants.SAS_SERVICE_VERSION, + Constants.UrlConstants.SAS_SERVICES, + Constants.UrlConstants.SAS_RESOURCES_TYPES, + Constants.UrlConstants.SAS_PROTOCOL, + Constants.UrlConstants.SAS_START_TIME, + Constants.UrlConstants.SAS_EXPIRY_TIME, + Constants.UrlConstants.SAS_IP_RANGE, + Constants.UrlConstants.SAS_SIGNED_IDENTIFIER, + Constants.UrlConstants.SAS_SIGNED_OBJECT_ID, + Constants.UrlConstants.SAS_SIGNED_TENANT_ID, + Constants.UrlConstants.SAS_SIGNED_KEY_START, + Constants.UrlConstants.SAS_SIGNED_KEY_EXPIRY, + Constants.UrlConstants.SAS_SIGNED_KEY_SERVICE, + Constants.UrlConstants.SAS_SIGNED_KEY_VERSION, + Constants.UrlConstants.SAS_SIGNED_RESOURCE, + Constants.UrlConstants.SAS_SIGNED_PERMISSIONS, + Constants.UrlConstants.SAS_SIGNATURE, + Constants.UrlConstants.SAS_CACHE_CONTROL, + Constants.UrlConstants.SAS_CONTENT_DISPOSITION, + Constants.UrlConstants.SAS_CONTENT_ENCODING, + Constants.UrlConstants.SAS_CONTENT_LANGUAGE, + Constants.UrlConstants.SAS_CONTENT_TYPE + }; + StringBuilder sb = new StringBuilder(); + for (String param : params) { + switch (param) { + case Constants.UrlConstants.SAS_SERVICE_VERSION: + tryAppendQueryParameter(sb, param, this.version); + break; + case Constants.UrlConstants.SAS_SERVICES: + tryAppendQueryParameter(sb, param, this.services); + break; + case Constants.UrlConstants.SAS_RESOURCES_TYPES: + tryAppendQueryParameter(sb, param, this.resourceTypes); + break; + case Constants.UrlConstants.SAS_PROTOCOL: + tryAppendQueryParameter(sb, param, this.protocol); + break; + case Constants.UrlConstants.SAS_START_TIME: + tryAppendQueryParameter(sb, param, + this.startTime == null ? null : Utility.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime)); + break; + case Constants.UrlConstants.SAS_EXPIRY_TIME: + tryAppendQueryParameter(sb, param, + this.expiryTime == null ? null : Utility.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime)); + break; + case Constants.UrlConstants.SAS_IP_RANGE: + tryAppendQueryParameter(sb, param, this.ipRange); + break; + case Constants.UrlConstants.SAS_SIGNED_IDENTIFIER: + tryAppendQueryParameter(sb, param, this.identifier); + break; + case Constants.UrlConstants.SAS_SIGNED_OBJECT_ID: + tryAppendQueryParameter(sb, param, this.keyOid); + break; + case Constants.UrlConstants.SAS_SIGNED_TENANT_ID: + tryAppendQueryParameter(sb, param, this.keyTid); + break; + case Constants.UrlConstants.SAS_SIGNED_KEY_START: + tryAppendQueryParameter(sb, param, + this.keyStart == null ? null : Utility.ISO_8601_UTC_DATE_FORMATTER.format(this.keyStart)); + break; + case Constants.UrlConstants.SAS_SIGNED_KEY_EXPIRY: + tryAppendQueryParameter(sb, param, + this.keyExpiry == null ? null : Utility.ISO_8601_UTC_DATE_FORMATTER.format(this.keyExpiry)); + break; + case Constants.UrlConstants.SAS_SIGNED_KEY_SERVICE: + tryAppendQueryParameter(sb, param, this.keyService); + break; + case Constants.UrlConstants.SAS_SIGNED_KEY_VERSION: + tryAppendQueryParameter(sb, param, this.keyVersion); + break; + case Constants.UrlConstants.SAS_SIGNED_RESOURCE: + tryAppendQueryParameter(sb, param, this.resource); + break; + case Constants.UrlConstants.SAS_SIGNED_PERMISSIONS: + tryAppendQueryParameter(sb, param, this.permissions); + break; + case Constants.UrlConstants.SAS_SIGNATURE: + tryAppendQueryParameter(sb, param, this.signature); + break; + case Constants.UrlConstants.SAS_CACHE_CONTROL: + tryAppendQueryParameter(sb, param, this.cacheControl); + break; + case Constants.UrlConstants.SAS_CONTENT_DISPOSITION: + tryAppendQueryParameter(sb, param, this.contentDisposition); + break; + case Constants.UrlConstants.SAS_CONTENT_ENCODING: + tryAppendQueryParameter(sb, param, this.contentEncoding); + break; + case Constants.UrlConstants.SAS_CONTENT_LANGUAGE: + tryAppendQueryParameter(sb, param, this.contentLanguage); + break; + case Constants.UrlConstants.SAS_CONTENT_TYPE: + tryAppendQueryParameter(sb, param, this.contentType); + break; + default: + throw new IllegalArgumentException("Invalid URL constant."); + } + } + return sb.toString(); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/SR.java b/storage/client/src/main/java/com/azure/storage/blob/SR.java new file mode 100644 index 0000000000000..6806fbc87c799 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/SR.java @@ -0,0 +1,119 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +/** + * RESERVED FOR INTERNAL USE. Provides a standard set of errors that could be thrown from the client library. + */ +final class SR { + // TODO: Do we want to keep any of what's left? + public static final String ACCOUNT_NAME_NULL_OR_EMPTY = "The account name is null or empty."; + public static final String ACCOUNT_NAME_MISMATCH = "The account name does not match the existing account name on the credentials."; + public static final String ARGUMENT_NULL_OR_EMPTY = "The argument must not be null or an empty string. Argument name: %s."; + public static final String ARGUMENT_OUT_OF_RANGE_ERROR = "The argument is out of range. Argument name: %s, Value passed: %s."; + public static final String BLOB_OVER_MAX_BLOCK_LIMIT = "The total blocks for this upload exceeds the maximum allowable limit."; + public static final String BLOB_DATA_CORRUPTED = "Blob data corrupted (integrity check failed), Expected value is %s, retrieved %s"; + public static final String BLOB_ENDPOINT_NOT_CONFIGURED = "No blob endpoint configured."; + public static final String BLOB_HASH_MISMATCH = "Blob hash mismatch (integrity check failed), Expected value is %s, retrieved %s."; + public static final String BLOB_MD5_NOT_SUPPORTED_FOR_PAGE_BLOBS = "Blob level MD5 is not supported for page blobs."; + public static final String CANNOT_CREATE_SAS_FOR_GIVEN_CREDENTIALS = "Cannot create Shared Access Signature as the credentials does not have account name information. Please check that the credentials provided support creating Shared Access Signature."; + public static final String CANNOT_CREATE_SAS_WITHOUT_ACCOUNT_KEY = "Cannot create Shared Access Signature unless the Account Key credentials are used by the ServiceClient."; + public static final String CANNOT_TRANSFORM_NON_HTTPS_URI_WITH_HTTPS_ONLY_CREDENTIALS = "Cannot use HTTP with credentials that only support HTTPS."; + public static final String CONTAINER = "container"; + public static final String CONTENT_LENGTH_MISMATCH = "An incorrect number of bytes was read from the connection. The connection may have been closed."; + public static final String CREATING_NETWORK_STREAM = "Creating a NetworkInputStream and expecting to read %s bytes."; + public static final String CREDENTIALS_CANNOT_SIGN_REQUEST = "CloudBlobClient, CloudQueueClient and CloudTableClient require credentials that can sign a request."; + public static final String DEFAULT_SERVICE_VERSION_ONLY_SET_FOR_BLOB_SERVICE = "DefaultServiceVersion can only be set for the Blob service."; + public static final String DELETE_SNAPSHOT_NOT_VALID_ERROR = "The option '%s' must be 'None' to delete a specific snapshot specified by '%s'."; + public static final String ENUMERATION_ERROR = "An error occurred while enumerating the result, check the original exception for details."; + public static final String ENDPOINT_INFORMATION_UNAVAILABLE = "Endpoint information not available for Account using Shared Access Credentials."; + public static final String ETAG_INVALID_FOR_DELETE = "Delete requires a valid ETag (which may be the '*' wildcard)."; + public static final String ETAG_INVALID_FOR_MERGE = "Merge requires a valid ETag (which may be the '*' wildcard)."; + public static final String ETAG_INVALID_FOR_UPDATE = "Replace requires a valid ETag (which may be the '*' wildcard)."; + public static final String ENUM_COULD_NOT_BE_PARSED_INVALID_VALUE = "%s could not be parsed from '%s' due to invalid value %s."; + public static final String INCORRECT_STREAM_LENGTH = "An incorrect stream length was specified, resulting in an authentication failure. Please specify correct length, or -1."; + public static final String INPUT_STREAM_SHOULD_BE_MARKABLE = "Input stream must be markable."; + public static final String INVALID_ACCOUNT_NAME = "Invalid account name."; + public static final String INVALID_ACL_ACCESS_TYPE = "Invalid acl public access type returned '%s'. Expected blob or container."; + public static final String INVALID_BLOB_TYPE = "Incorrect Blob type, please use the correct Blob type to access a blob on the server. Expected %s, actual %s."; + public static final String INVALID_BLOCK_ID = "Invalid blockID, blockID must be a valid Base64 String."; + public static final String INVALID_BLOCK_SIZE = "Block data should not exceed BlockBlobURL.MAX_STAGE_BLOCK_BYTES"; + public static final String INVALID_CONDITIONAL_HEADERS = "The conditionals specified for this operation did not match server."; + public static final String INVALID_CONNECTION_STRING = "Invalid connection string."; + public static final String INVALID_CONNECTION_STRING_DEV_STORE_NOT_TRUE = "Invalid connection string, the UseDevelopmentStorage key must always be paired with 'true'. Remove the flag entirely otherwise."; + public static final String INVALID_CONTENT_LENGTH = "ContentLength must be set to -1 or positive Long value."; + public static final String INVALID_CONTENT_TYPE = "An incorrect Content-Type was returned from the server."; + public static final String INVALID_CORS_RULE = "A CORS rule must contain at least one allowed origin and allowed method, and MaxAgeInSeconds cannot have a value less than zero."; + public static final String INVALID_DATE_STRING = "Invalid Date String: %s."; + public static final String INVALID_EDMTYPE_VALUE = "Invalid value '%s' for EdmType."; + public static final String INVALID_FILE_LENGTH = "File length must be greater than or equal to 0 bytes."; + public static final String INVALID_GEO_REPLICATION_STATUS = "Null or Invalid geo-replication status in response: %s."; + public static final String INVALID_IP_ADDRESS = "Error when parsing IPv4 address: IP address '%s' is invalid."; + public static final String INVALID_KEY = "Storage Key is not a valid base64 encoded string."; + public static final String INVALID_LISTING_DETAILS = "Invalid blob listing details specified."; + public static final String INVALID_LOGGING_LEVEL = "Invalid logging operations specified."; + public static final String INVALID_MAX_WRITE_SIZE = "Max write size is 4MB. Please specify a smaller range."; + public static final String INVALID_MESSAGE_LENGTH = "The message size cannot be larger than %s bytes."; + public static final String INVALID_MIME_RESPONSE = "Invalid MIME response received."; + public static final String INVALID_NUMBER_OF_BYTES_IN_THE_BUFFER = "Page data must be a multiple of 512 bytes. Buffer currently contains %d bytes."; + public static final String INVALID_OPERATION_FOR_A_SNAPSHOT = "Cannot perform this operation on a blob representing a snapshot."; + public static final String INVALID_PAGE_BLOB_LENGTH = "Page blob length must be multiple of 512."; + public static final String INVALID_PAGE_START_OFFSET = "Page start offset must be multiple of 512."; + public static final String INVALID_RANGE_CONTENT_MD5_HEADER = "Cannot specify x-ms-range-get-content-md5 header on ranges larger than 4 MB. Either use a BlobReadStream via openRead, or disable TransactionalMD5 via the BlobRequestOptions."; + public static final String INVALID_RESOURCE_NAME = "Invalid %s name. Check MSDN for more information about valid naming."; + public static final String INVALID_RESOURCE_NAME_LENGTH = "Invalid %s name length. The name must be between %s and %s characters long."; + public static final String INVALID_RESOURCE_RESERVED_NAME = "Invalid %s name. This name is reserved."; + public static final String INVALID_RESPONSE_RECEIVED = "The response received is invalid or improperly formatted."; + public static final String INVALID_STORAGE_PROTOCOL_VERSION = "Storage protocol version prior to 2009-09-19 do not support shared key authentication."; + public static final String INVALID_STORAGE_SERVICE = "Invalid storage service specified."; + public static final String INVALID_STREAM_LENGTH = "Invalid stream length; stream must be between 0 and %s MB in length."; + public static final String ITERATOR_EMPTY = "There are no more elements in this enumeration."; + public static final String KEY_AND_RESOLVER_MISSING = "Key and Resolver are not initialized. Decryption requires either of them to be initialized."; + public static final String LEASE_CONDITION_ON_SOURCE = "A lease condition cannot be specified on the source of a copy."; + public static final String LOG_STREAM_END_ERROR = "Error parsing log record: unexpected end of stream."; + public static final String LOG_STREAM_DELIMITER_ERROR = "Error parsing log record: unexpected delimiter encountered."; + public static final String LOG_STREAM_QUOTE_ERROR = "Error parsing log record: unexpected quote character encountered."; + public static final String LOG_VERSION_UNSUPPORTED = "A storage log version of %s is unsupported."; + public static final String MARK_EXPIRED = "Stream mark expired."; + public static final String MAXIMUM_EXECUTION_TIMEOUT_EXCEPTION = "The client could not finish the operation within specified maximum execution timeout."; + public static final String MISSING_CREDENTIALS = "No credentials provided."; + public static final String MISSING_MANDATORY_DATE_HEADER = "Canonicalization did not find a non-empty x-ms-date header in the request. Please use a request with a valid x-ms-date header in RFC 123 format."; + public static final String MISSING_MANDATORY_PARAMETER_FOR_SAS = "Missing mandatory parameters for valid Shared Access Signature."; + public static final String MISSING_MD5 = "ContentMD5 header is missing in the response."; + public static final String MISSING_NULLARY_CONSTRUCTOR = "Class type must contain contain a nullary constructor."; + public static final String MULTIPLE_CREDENTIALS_PROVIDED = "Cannot provide credentials as part of the address and as constructor parameter. Either pass in the address or use a different constructor."; + public static final String PARAMETER_NOT_IN_RANGE = "The value of the parameter '%s' should be between %s and %s."; + public static final String PARAMETER_SHOULD_BE_GREATER = "The value of the parameter '%s' should be greater than %s."; + public static final String PARAMETER_SHOULD_BE_GREATER_OR_EQUAL = "The value of the parameter '%s' should be greater than or equal to %s."; + public static final String PATH_STYLE_URI_MISSING_ACCOUNT_INFORMATION = "Missing account name information inside path style URI. Path style URIs should be of the form http:///"; + public static final String PRIMARY_ONLY_COMMAND = "This operation can only be executed against the primary storage location."; + public static final String PROPERTY_CANNOT_BE_SERIALIZED_AS_GIVEN_EDMTYPE = "Property %s with Edm Type %s cannot be de-serialized."; + public static final String PRECONDITION_FAILURE_IGNORED = "Pre-condition failure on a retry is being ignored since the request should have succeeded in the first attempt."; + public static final String RELATIVE_ADDRESS_NOT_PERMITTED = "Address %s is a relative address. Only absolute addresses are permitted."; + public static final String RESOURCE_NAME_EMPTY = "Invalid %s name. The name may not be null, empty, or whitespace only."; + public static final String RESPONSE_RECEIVED_IS_INVALID = "The response received is invalid or improperly formatted."; + public static final String SCHEME_NULL_OR_EMPTY = "The protocol to use is null. Please specify whether to use http or https."; + public static final String SECONDARY_ONLY_COMMAND = "This operation can only be executed against the secondary storage location."; + public static final String SNAPSHOT_LISTING_ERROR = "Listing snapshots is only supported in flat mode (no delimiter). Consider setting useFlatBlobListing to true."; + public static final String SNAPSHOT_QUERY_OPTION_ALREADY_DEFINED = "Snapshot query parameter is already defined in the blob URI. Either pass in a snapshotTime parameter or use a full URL with a snapshot query parameter."; + public static final String STORAGE_CREDENTIALS_NULL_OR_ANONYMOUS = "StorageCredentials cannot be null or anonymous for this service."; + public static final String STORAGE_CLIENT_OR_SAS_REQUIRED = "Either a SAS token or a service client must be specified."; + public static final String STORAGE_URI_MISSING_LOCATION = "The URI for the target storage location is not specified. Please consider changing the request's location mode."; + public static final String STORAGE_URI_MUST_MATCH = "Primary and secondary location URIs in a StorageUri must point to the same resource."; + public static final String STORAGE_URI_NOT_NULL = "Primary and secondary location URIs in a StorageUri must not both be null."; + public static final String STREAM_CLOSED = "Stream is already closed."; + public static final String STREAM_SKIP_FAILED = "The supplied stream has failed to skip to the correct position after successive attempts. Please ensure there are bytes available and try your upload again."; + public static final String STREAM_LENGTH_GREATER_THAN_4MB = "Invalid stream length, length must be less than or equal to 4 MB in size."; + public static final String STREAM_LENGTH_GREATER_THAN_100MB = "Invalid stream length, length must be less than or equal to 100 MB in size."; + public static final String STREAM_LENGTH_NEGATIVE = "Invalid stream length, specify -1 for unknown length stream, or a positive number of bytes."; + public static final String STRING_NOT_VALID = "The String is not a valid Base64-encoded string."; + public static final String TAKE_COUNT_ZERO_OR_NEGATIVE = "Take count must be positive and greater than 0."; + public static final String TOO_MANY_PATH_SEGMENTS = "The count of URL path segments (strings between '/' characters) as part of the blob name cannot exceed 254."; + public static final String TOO_MANY_SHARED_ACCESS_POLICY_IDENTIFIERS = "Too many %d shared access policy identifiers provided. Server does not support setting more than %d on a single container, queue, or table."; + public static final String TOO_MANY_SHARED_ACCESS_POLICY_IDS = "Too many %d shared access policy identifiers provided. Server does not support setting more than %d on a single container."; + public static final String UNEXPECTED_CONTINUATION_TYPE = "The continuation type passed in is unexpected. Please verify that the correct continuation type is passed in. Expected {%s}, found {%s}."; + public static final String UNEXPECTED_FIELD_NAME = "Unexpected field name. Expected: '%s'. Actual: '%s'."; + public static final String UNEXPECTED_STATUS_CODE_RECEIVED = "Unexpected http status code received."; + public static final String UNEXPECTED_STREAM_READ_ERROR = "Unexpected error. Stream returned unexpected number of bytes."; +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/ServiceSASSignatureValues.java b/storage/client/src/main/java/com/azure/storage/blob/ServiceSASSignatureValues.java new file mode 100644 index 0000000000000..26ab4f8a4db7f --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/ServiceSASSignatureValues.java @@ -0,0 +1,467 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.storage.blob.models.UserDelegationKey; + +import java.security.InvalidKeyException; +import java.time.OffsetDateTime; + +/** + * ServiceSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage service. Once + * all the values here are set appropriately, call generateSASQueryParameters to obtain a representation of the SAS + * which can actually be applied to blob urls. Note: that both this class and {@link SASQueryParameters} exist because + * the former is mutable and a logical representation while the latter is immutable and used to generate actual REST + * requests. + *

+ * Please see here + * for more conceptual information on SAS. + *

+ * Please see here for + * more details on each value, including which are required. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_sas "Sample code for ServiceSASSignatureValues")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ +final class ServiceSASSignatureValues { + + private String version = Constants.HeaderConstants.TARGET_STORAGE_VERSION; + + private SASProtocol protocol; + + private OffsetDateTime startTime; + + private OffsetDateTime expiryTime; + + private String permissions; + + private IPRange ipRange; + + private String containerName; + + private String blobName; + + private String snapshotId; + + private String identifier; + + private String cacheControl; + + private String contentDisposition; + + private String contentEncoding; + + private String contentLanguage; + + private String contentType; + + /** + * Creates an object with empty values for all fields. + */ + public ServiceSASSignatureValues() { + } + + /** + * The version of the service this SAS will target. If not specified, it will default to the version targeted by the + * library. + */ + public String version() { + return version; + } + + /** + * The version of the service this SAS will target. If not specified, it will default to the version targeted by the + * library. + */ + public ServiceSASSignatureValues withVersion(String version) { + this.version = version; + return this; + } + + /** + * {@link SASProtocol} + */ + public SASProtocol protocol() { + return protocol; + } + + /** + * {@link SASProtocol} + */ + public ServiceSASSignatureValues withProtocol(SASProtocol protocol) { + this.protocol = protocol; + return this; + } + + /** + * When the SAS will take effect. + */ + public OffsetDateTime startTime() { + return startTime; + } + + /** + * When the SAS will take effect. + */ + public ServiceSASSignatureValues withStartTime(OffsetDateTime startTime) { + this.startTime = startTime; + return this; + } + + /** + * The time after which the SAS will no longer work. + */ + public OffsetDateTime expiryTime() { + return expiryTime; + } + + /** + * The time after which the SAS will no longer work. + */ + public ServiceSASSignatureValues withExpiryTime(OffsetDateTime expiryTime) { + this.expiryTime = expiryTime; + return this; + } + + /** + * Please refer to either {@link ContainerSASPermission} or {@link BlobSASPermission} depending on the resource + * being accessed for help constructing the permissions string. + */ + public String permissions() { + return permissions; + } + + /** + * Please refer to either {@link ContainerSASPermission} or {@link BlobSASPermission} depending on the resource + * being accessed for help constructing the permissions string. + */ + public ServiceSASSignatureValues withPermissions(String permissions) { + this.permissions = permissions; + return this; + } + + /** + * {@link IPRange} + */ + public IPRange ipRange() { + return ipRange; + } + + /** + * {@link IPRange} + */ + public ServiceSASSignatureValues withIpRange(IPRange ipRange) { + this.ipRange = ipRange; + return this; + } + + /** + * The name of the container the SAS user may access. + */ + public String containerName() { + return containerName; + } + + /** + * The name of the container the SAS user may access. + */ + public ServiceSASSignatureValues withContainerName(String containerName) { + this.containerName = containerName; + return this; + } + + /** + * The name of the blob the SAS user may access. + */ + public String blobName() { + return blobName; + } + + /** + * The name of the blob the SAS user may access. + */ + public ServiceSASSignatureValues withBlobName(String blobName) { + this.blobName = blobName; + return this; + } + + /** + * The specific snapshot the SAS user may access. + */ + public String snapshotId() { + return snapshotId; + } + + /** + * The specific snapshot the SAS user may access. + */ + public ServiceSASSignatureValues withSnapshotId(String snapshotId) { + this.snapshotId = snapshotId; + return this; + } + + /** + * The name of the access policy on the container this SAS references if any. Please see + * here + * for more information. + */ + public String identifier() { + return identifier; + } + + /** + * The name of the access policy on the container this SAS references if any. Please see + * here + * for more information. + */ + public ServiceSASSignatureValues withIdentifier(String identifier) { + this.identifier = identifier; + return this; + } + + /** + * The cache-control header for the SAS. + */ + public String cacheControl() { + return cacheControl; + } + + /** + * The cache-control header for the SAS. + */ + public ServiceSASSignatureValues withCacheControl(String cacheControl) { + this.cacheControl = cacheControl; + return this; + } + + /** + * The content-disposition header for the SAS. + */ + public String contentDisposition() { + return contentDisposition; + } + + /** + * The content-disposition header for the SAS. + */ + public ServiceSASSignatureValues withContentDisposition(String contentDisposition) { + this.contentDisposition = contentDisposition; + return this; + } + + /** + * The content-encoding header for the SAS. + */ + public String contentEncoding() { + return contentEncoding; + } + + /** + * The content-encoding header for the SAS. + */ + public ServiceSASSignatureValues withContentEncoding(String contentEncoding) { + this.contentEncoding = contentEncoding; + return this; + } + + /** + * The content-language header for the SAS. + */ + public String contentLanguage() { + return contentLanguage; + } + + /** + * The content-language header for the SAS. + */ + public ServiceSASSignatureValues withContentLanguage(String contentLanguage) { + this.contentLanguage = contentLanguage; + return this; + } + + /** + * The content-type header for the SAS. + */ + public String contentType() { + return contentType; + } + + /** + * The content-type header for the SAS. + */ + public ServiceSASSignatureValues withContentType(String contentType) { + this.contentType = contentType; + return this; + } + + /** + * Uses an account's shared key credential to sign these signature values to produce the proper SAS query + * parameters. + * + * @param sharedKeyCredentials + * A {@link SharedKeyCredentials} object used to sign the SAS values. + * + * @return {@link SASQueryParameters} + */ + public SASQueryParameters generateSASQueryParameters(SharedKeyCredentials sharedKeyCredentials) { + Utility.assertNotNull("sharedKeyCredentials", sharedKeyCredentials); + assertGenerateOK(); + + String resource = getResource(); + String verifiedPermissions = getVerifiedPermissions(); + + // Signature is generated on the un-url-encoded values. + final String stringToSign = stringToSign(verifiedPermissions, resource, sharedKeyCredentials); + + String signature = null; + try { + signature = sharedKeyCredentials.computeHmac256(stringToSign); + } catch (InvalidKeyException e) { + throw new Error(e); // The key should have been validated by now. If it is no longer valid here, we fail. + } + + return new SASQueryParameters(this.version, null, null, + this.protocol, this.startTime, this.expiryTime, this.ipRange, this.identifier, resource, + this.permissions, signature, this.cacheControl, this.contentDisposition, this.contentEncoding, + this.contentLanguage, this.contentType, null /* delegate */); + } + + /** + * Uses a user delegation key to sign these signature values to produce the proper SAS query parameters. + * + * @param delegationKey + * A {@link UserDelegationKey} object used to sign the SAS values. + * + * @param accountName + * Name of the account holding the resource this SAS is authorizing. + * + * @return {@link SASQueryParameters} + */ + public SASQueryParameters generateSASQueryParameters(UserDelegationKey delegationKey, String accountName) { + Utility.assertNotNull("delegationKey", delegationKey); + Utility.assertNotNull("accountName", accountName); + assertGenerateOK(); + + String resource = getResource(); + String verifiedPermissions = getVerifiedPermissions(); + + // Signature is generated on the un-url-encoded values. + final String stringToSign = stringToSign(verifiedPermissions, resource, delegationKey, accountName); + + String signature = null; + try { + signature = Utility.delegateComputeHmac256(delegationKey, stringToSign); + } catch (InvalidKeyException e) { + throw new Error(e); // The key should have been validated by now. If it is no longer valid here, we fail. + } + + return new SASQueryParameters(this.version, null, null, + this.protocol, this.startTime, this.expiryTime, this.ipRange, null /* identifier */, resource, + this.permissions, signature, this.cacheControl, this.contentDisposition, this.contentEncoding, + this.contentLanguage, this.contentType, delegationKey); + } + + /** + * Common assertions for generateSASQueryParameters overloads. + */ + private void assertGenerateOK() { + Utility.assertNotNull("version", this.version); + Utility.assertNotNull("containerName", this.containerName); + if (blobName == null && snapshotId != null) { + throw new IllegalArgumentException("Cannot set a snapshotId without a blobName."); + } + } + + /** + * Gets the resource string for SAS tokens based on object state. + */ + private String getResource() { + String resource = "c"; + if (!Utility.isNullOrEmpty(this.blobName)) { + resource = snapshotId != null && !snapshotId.isEmpty() ? "bs" : "b"; + } + + return resource; + } + + /** + * Gets the verified permissions string for SAS tokens based on object state. + */ + private String getVerifiedPermissions() { + String verifiedPermissions = null; + // Calling parse and toString guarantees the proper ordering and throws on invalid characters. + if (Utility.isNullOrEmpty(this.blobName)) { + if (this.permissions != null) { + verifiedPermissions = ContainerSASPermission.parse(this.permissions).toString(); + } + } else { + if (this.permissions != null) { + verifiedPermissions = BlobSASPermission.parse(this.permissions).toString(); + } + } + + return verifiedPermissions; + } + + private String getCanonicalName(String accountName) { + // Container: "/blob/account/containername" + // Blob: "/blob/account/containername/blobname" + StringBuilder canonicalName = new StringBuilder("/blob"); + canonicalName.append('/').append(accountName).append('/').append(this.containerName); + + if (!Utility.isNullOrEmpty(this.blobName)) { + canonicalName.append("/").append(this.blobName); + } + + return canonicalName.toString(); + } + + private String stringToSign(final String verifiedPermissions, final String resource, + final SharedKeyCredentials sharedKeyCredentials) { + return String.join("\n", + verifiedPermissions == null ? "" : verifiedPermissions, + this.startTime == null ? "" : Utility.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime), + this.expiryTime == null ? "" : Utility.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime), + getCanonicalName(sharedKeyCredentials.getAccountName()), + this.identifier == null ? "" : this.identifier, + this.ipRange == null ? (new IPRange()).toString() : this.ipRange.toString(), + this.protocol == null ? "" : protocol.toString(), + this.version == null ? "" : this.version, + resource == null ? "" : resource, + this.snapshotId == null ? "" : this.snapshotId, + this.cacheControl == null ? "" : this.cacheControl, + this.contentDisposition == null ? "" : this.contentDisposition, + this.contentEncoding == null ? "" : this.contentEncoding, + this.contentLanguage == null ? "" : this.contentLanguage, + this.contentType == null ? "" : this.contentType + ); + } + + private String stringToSign(final String verifiedPermissions, final String resource, + final UserDelegationKey key, final String accountName) { + return String.join("\n", + verifiedPermissions == null ? "" : verifiedPermissions, + this.startTime == null ? "" : Utility.ISO_8601_UTC_DATE_FORMATTER.format(this.startTime), + this.expiryTime == null ? "" : Utility.ISO_8601_UTC_DATE_FORMATTER.format(this.expiryTime), + getCanonicalName(accountName), + key.signedOid() == null ? "" : key.signedOid(), + key.signedTid() == null ? "" : key.signedTid(), + key.signedStart() == null ? "" : Utility.ISO_8601_UTC_DATE_FORMATTER.format(key.signedStart()), + key.signedExpiry() == null ? "" : Utility.ISO_8601_UTC_DATE_FORMATTER.format(key.signedExpiry()), + key.signedService() == null ? "" : key.signedService(), + key.signedVersion() == null ? "" : key.signedVersion(), + this.ipRange == null ? new IPRange().toString() : this.ipRange.toString(), + this.protocol == null ? "" : this.protocol.toString(), + this.version == null ? "" : this.version, + resource == null ? "" : resource, + this.snapshotId == null ? "" : this.snapshotId, + this.cacheControl == null ? "" : this.cacheControl, + this.contentDisposition == null ? "" : this.contentDisposition, + this.contentEncoding == null ? "" : this.contentEncoding, + this.contentLanguage == null ? "" : this.contentLanguage, + this.contentType == null ? "" : this.contentType + ); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/SetResponseFieldPolicy.java b/storage/client/src/main/java/com/azure/storage/blob/SetResponseFieldPolicy.java new file mode 100644 index 0000000000000..855e6d8222b1c --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/SetResponseFieldPolicy.java @@ -0,0 +1,26 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.http.HttpPipelineCallContext; +import com.azure.core.http.HttpPipelineNextPolicy; +import com.azure.core.http.HttpResponse; +import com.azure.core.http.policy.HttpPipelinePolicy; +import reactor.core.publisher.Mono; + +/** + * This is a factory which creates policies in an {@link com.azure.core.http.HttpPipeline} for setting the request property on the response + * object. This is necessary because of a bug in autorest which fails to set this property. In most cases, it is + * sufficient to allow the default pipeline to add this factory automatically and assume that it works. The factory and + * policy must only be used directly when creating a custom pipeline. + */ +final class SetResponseFieldPolicy implements HttpPipelinePolicy { + + @Override + public Mono process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { + return next.process() + .map(response -> + response.withRequest(context.httpRequest())); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/SharedKeyCredentials.java b/storage/client/src/main/java/com/azure/storage/blob/SharedKeyCredentials.java new file mode 100644 index 0000000000000..46dd1c46d37fd --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/SharedKeyCredentials.java @@ -0,0 +1,249 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.http.*; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.QueryStringDecoder; +import reactor.core.publisher.Mono; + +import javax.crypto.Mac; +import javax.crypto.spec.SecretKeySpec; +import java.io.UnsupportedEncodingException; +import java.net.URL; +import java.security.InvalidKeyException; +import java.security.NoSuchAlgorithmException; +import java.time.OffsetDateTime; +import java.util.ArrayList; +import java.util.Base64; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; + +/** + * SharedKeyCredentials are a means of signing and authenticating storage requests. The key can be obtained from the + * Azure portal. This factory will create policies which take care of all the details of creating strings to sign, + * signing them, and setting the Authentication header. While this is a common way of authenticating with the service, + * recommended practice is using {@link TokenCredentials}. Pass this as the credentials in the construction of a new + * {@link HttpPipeline} via the {@link StorageURL} type. + */ +public final class SharedKeyCredentials implements ICredentials { + + private final String accountName; + + private final byte[] accountKey; + + /** + * Initializes a new instance of SharedKeyCredentials contains an account's name and its primary or secondary + * accountKey. + * + * @param accountName The account name associated with the request. + * @param accountKey The account access key used to authenticate the request. + */ + public SharedKeyCredentials(String accountName, String accountKey) { + this.accountName = accountName; + this.accountKey = Base64.getDecoder().decode(accountKey); + } + + /** + * Gets the account name associated with the request. + * + * @return The account name. + */ + public String getAccountName() { + return accountName; + } + + /** + * Constructs a canonicalized string for signing a request. + * + * @param request The request to canonicalize. + * @return A canonicalized string. + */ + private String buildStringToSign(final HttpRequest request) { + final HttpHeaders httpHeaders = request.headers(); + String contentLength = getStandardHeaderValue(httpHeaders, Constants.HeaderConstants.CONTENT_LENGTH); + contentLength = contentLength.equals("0") ? Constants.EMPTY_STRING : contentLength; + + return String.join("\n", + request.httpMethod().toString(), + getStandardHeaderValue(httpHeaders, Constants.HeaderConstants.CONTENT_ENCODING), + getStandardHeaderValue(httpHeaders, Constants.HeaderConstants.CONTENT_LANGUAGE), + contentLength, + getStandardHeaderValue(httpHeaders, Constants.HeaderConstants.CONTENT_MD5), + getStandardHeaderValue(httpHeaders, Constants.HeaderConstants.CONTENT_TYPE), + // x-ms-date header exists, so don't sign date header + Constants.EMPTY_STRING, + getStandardHeaderValue(httpHeaders, Constants.HeaderConstants.IF_MODIFIED_SINCE), + getStandardHeaderValue(httpHeaders, Constants.HeaderConstants.IF_MATCH), + getStandardHeaderValue(httpHeaders, Constants.HeaderConstants.IF_NONE_MATCH), + getStandardHeaderValue(httpHeaders, Constants.HeaderConstants.IF_UNMODIFIED_SINCE), + getStandardHeaderValue(httpHeaders, Constants.HeaderConstants.RANGE), + getAdditionalXmsHeaders(httpHeaders), + getCanonicalizedResource(request.url()) + ); + } + + private String getAdditionalXmsHeaders(final HttpHeaders headers) { + // Add only headers that begin with 'x-ms-' + final List xmsHeaderNameArray = StreamSupport.stream(headers.spliterator(), false) + .filter(header -> header.value() != null) + .map(header -> header.name().toLowerCase(Locale.ROOT)) + .filter(lowerCaseHeader -> lowerCaseHeader.startsWith(Constants.PREFIX_FOR_STORAGE_HEADER)) + .collect(Collectors.toList()); // ArrayList under hood + + // TODO the stream filter solves an issue where we are adding null value headers. We should not add them in the first place, this filter is a perf hit, especially on large metadata collections + +// for (HttpHeader header : headers) { +// String lowerCaseHeader = header.name().toLowerCase(Locale.ROOT); +// if (lowerCaseHeader.startsWith(Constants.PREFIX_FOR_STORAGE_HEADER)) { +// xmsHeaderNameArray.add(lowerCaseHeader); +// } +// } + + if (xmsHeaderNameArray.isEmpty()) { + return Constants.EMPTY_STRING; + } + + Collections.sort(xmsHeaderNameArray); + + final StringBuilder canonicalizedHeaders = new StringBuilder(); + for (final String key : xmsHeaderNameArray) { + if (canonicalizedHeaders.length() > 0) { + canonicalizedHeaders.append('\n'); + } + + canonicalizedHeaders.append(key); + canonicalizedHeaders.append(':'); + canonicalizedHeaders.append(headers.value(key)); + } + + return canonicalizedHeaders.toString(); + } + + /** + * Canonicalized the resource to sign. + * + * @param requestURL A {@code java.net.URL} of the request. + * @return The canonicalized resource to sign. + */ + private String getCanonicalizedResource(URL requestURL) { + + // Resource path + final StringBuilder canonicalizedResource = new StringBuilder("/"); + canonicalizedResource.append(this.accountName); + + // Note that AbsolutePath starts with a '/'. + if (requestURL.getPath().length() > 0) { + canonicalizedResource.append(requestURL.getPath()); + } else { + canonicalizedResource.append('/'); + } + + // check for no query params and return + if (requestURL.getQuery() == null) { + return canonicalizedResource.toString(); + } + + // The URL object's query field doesn't include the '?'. The QueryStringDecoder expects it. + QueryStringDecoder queryDecoder = new QueryStringDecoder("?" + requestURL.getQuery()); + Map> queryParams = queryDecoder.parameters(); + + ArrayList queryParamNames = new ArrayList<>(queryParams.keySet()); + Collections.sort(queryParamNames); + + for (String queryParamName : queryParamNames) { + final List queryParamValues = queryParams.get(queryParamName); + Collections.sort(queryParamValues); + String queryParamValuesStr = String.join(",", queryParamValues.toArray(new String[]{})); + canonicalizedResource.append("\n").append(queryParamName.toLowerCase(Locale.ROOT)).append(":") + .append(queryParamValuesStr); + } + + // append to main string appendBlobClientBuilder the join of completed params with new line + return canonicalizedResource.toString(); + } + + /** + * Returns the standard header value from the specified connection request, or an empty string if no header value + * has been specified for the request. + * + * @param httpHeaders A {@code HttpHeaders} object that represents the headers for the request. + * @param headerName A {@code String} that represents the name of the header being requested. + * @return A {@code String} that represents the header value, or {@code null} if there is no corresponding + * header value for {@code headerName}. + */ + private String getStandardHeaderValue(final HttpHeaders httpHeaders, final String headerName) { + final String headerValue = httpHeaders.value(headerName); + + return headerValue == null ? Constants.EMPTY_STRING : headerValue; + } + + /** + * Computes a signature for the specified string using the HMAC-SHA256 algorithm. + * Package-private because it is used to generate SAS signatures. + * + * @param stringToSign The UTF-8-encoded string to sign. + * @return A {@code String} that contains the HMAC-SHA256-encoded signature. + * @throws InvalidKeyException If the accountKey is not a valid Base64-encoded string. + */ + String computeHmac256(final String stringToSign) throws InvalidKeyException { + try { + /* + We must get a new instance of the Mac calculator for each signature calculated because the instances are + not threadsafe and there is some suggestion online that they may not even be safe for reuse, so we use a + new one each time to be sure. + */ + Mac hmacSha256 = Mac.getInstance("HmacSHA256"); + hmacSha256.init(new SecretKeySpec(this.accountKey, "HmacSHA256")); + byte[] utf8Bytes = stringToSign.getBytes(Constants.UTF8_CHARSET); + return Base64.getEncoder().encodeToString(hmacSha256.doFinal(utf8Bytes)); + } catch (final UnsupportedEncodingException | NoSuchAlgorithmException e) { + throw new Error(e); + } + } + + + /** + * Sign the request. + * + * @param context + * The call context. + * @param next + * The next policy to process. + * + * @return A {@link Mono} representing the HTTP response that will arrive asynchronously. + */ + @Override + public Mono process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { + if (context.httpRequest().headers().value(Constants.HeaderConstants.DATE) == null) { + context.httpRequest().headers().put(Constants.HeaderConstants.DATE, + Utility.RFC_1123_GMT_DATE_FORMATTER.format(OffsetDateTime.now())); + } + final String stringToSign = this.buildStringToSign(context.httpRequest()); + try { + final String computedBase64Signature = this.computeHmac256(stringToSign); + context.httpRequest().headers().put(Constants.HeaderConstants.AUTHORIZATION, + "SharedKey " + this.accountName + ":" + computedBase64Signature); + } catch (Exception e) { + return Mono.error(e); + } + + Mono response = next.process(); + return response.doOnSuccess(response1 -> { + if (response1.statusCode() == HttpResponseStatus.FORBIDDEN.code()) { + // TODO temporarily disabled for user study deadline. should renable later + /*if (options.shouldLog(HttpPipelineLogLevel.ERROR)) { + options.log(HttpPipelineLogLevel.ERROR, + "===== HTTP Forbidden status, String-to-Sign:%n'%s'%n==================%n", + stringToSign); + }*/ + } + }); + } +} + diff --git a/storage/client/src/main/java/com/azure/storage/blob/StorageAccountInfo.java b/storage/client/src/main/java/com/azure/storage/blob/StorageAccountInfo.java new file mode 100644 index 0000000000000..4fea499e69e49 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/StorageAccountInfo.java @@ -0,0 +1,35 @@ +package com.azure.storage.blob; + +import com.azure.storage.blob.models.*; + +public class StorageAccountInfo { + + private SkuName skuName; + + private AccountKind accountKind; + + + StorageAccountInfo(BlobGetAccountInfoHeaders generatedResponseHeaders) { + this.skuName = generatedResponseHeaders.skuName(); + this.accountKind = generatedResponseHeaders.accountKind(); + } + + StorageAccountInfo(ContainerGetAccountInfoHeaders generatedResponseHeaders) { + this.skuName = generatedResponseHeaders.skuName(); + this.accountKind = generatedResponseHeaders.accountKind(); + } + + StorageAccountInfo(ServiceGetAccountInfoHeaders generatedResponseHeaders) { + this.skuName = generatedResponseHeaders.skuName(); + this.accountKind = generatedResponseHeaders.accountKind(); + } + + + public SkuName skuName() { + return skuName; + } + + public AccountKind accountKind() { + return accountKind; + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/StorageAsyncClient.java b/storage/client/src/main/java/com/azure/storage/blob/StorageAsyncClient.java new file mode 100644 index 0000000000000..eaee79e1a1220 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/StorageAsyncClient.java @@ -0,0 +1,317 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.http.HttpPipeline; +import com.azure.core.http.rest.ResponseBase; +import com.azure.core.util.Context; +import com.azure.storage.blob.implementation.AzureBlobStorageImpl; +import com.azure.storage.blob.models.ContainerItem; +import com.azure.storage.blob.models.ServicesListContainersSegmentResponse; +import com.azure.storage.blob.models.StorageServiceProperties; +import com.azure.storage.blob.models.StorageServiceStats; +import com.azure.storage.blob.models.UserDelegationKey; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.net.MalformedURLException; +import java.net.URL; +import java.time.OffsetDateTime; + +/** + * Client to a storage account. It may only be instantiated through a {@link StorageClientBuilder}. + * This class does not hold any state about a particular storage account but is + * instead a convenient way of sending off appropriate requests to the resource on the service. + * It may also be used to construct URLs to blobs and containers. + * + *

+ * This client contains operations on a blob. Operations on a container are available on {@link ContainerAsyncClient} + * through {@link #getContainerAsyncClient(String)}, and operations on a blob are available on {@link BlobAsyncClient}. + * + *

+ * Please see here for more + * information on containers. + * + *

+ * Note this client is an async client that returns reactive responses from Spring Reactor Core + * project (https://projectreactor.io/). Calling the methods in this client will NOT + * start the actual network operation, until {@code .subscribe()} is called on the reactive response. + * You can simply convert one of these responses to a {@link java.util.concurrent.CompletableFuture} + * object through {@link Mono#toFuture()}. + */ +public final class StorageAsyncClient { + + StorageAsyncRawClient storageAsyncRawClient; + private StorageClientBuilder builder; + + /** + * Package-private constructor for use by {@link StorageClientBuilder}. + * @param azureBlobStorage the API client for blob storage API + */ + StorageAsyncClient(AzureBlobStorageImpl azureBlobStorage) { + this.storageAsyncRawClient = new StorageAsyncRawClient(azureBlobStorage); + } + + /** + * Static method for getting a new builder for this class. + * + * @return + * A new {@link StorageClientBuilder} instance. + */ + public static StorageClientBuilder storageClientBuilder() { + return new StorageClientBuilder(); + } + + /** + * Package-private constructor for use by {@link StorageClientBuilder}. + * @param builder the storage account client builder + */ + StorageAsyncClient(StorageClientBuilder builder) { + this.builder = builder; + this.storageAsyncRawClient = new StorageAsyncRawClient(builder.buildImpl()); + } + + /** + * Initializes a {@link ContainerAsyncClient} object pointing to the specified container. This method does not create a + * container. It simply constructs the URL to the container and offers access to methods relevant to containers. + * + * @param containerName + * The name of the container to point to. + * @return + * A {@link ContainerAsyncClient} object pointing to the specified container + */ + public ContainerAsyncClient getContainerAsyncClient(String containerName) { + try { + return new ContainerAsyncClient(this.builder.copyAsContainerBuilder().endpoint(Utility.appendToURLPath(new URL(builder.endpoint()), containerName).toString())); + } catch (MalformedURLException e) { + throw new RuntimeException(e); + } + } + + /** + * Returns a reactive Publisher emitting all the containers in this account lazily as needed. For more information, see + * the Azure Docs. + * + * @return + * A reactive response emitting the list of containers. + */ + public Flux listContainers() { + return this.listContainers(new ListContainersOptions(), null); + } + + /** + * Returns a reactive Publisher emitting all the containers in this account lazily as needed. For more information, see + * the Azure Docs. + * + * @param options + * A {@link ListContainersOptions} which specifies what data should be returned by the service. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response emitting the list of containers. + */ + public Flux listContainers(ListContainersOptions options, Context context) { + return storageAsyncRawClient + .listContainersSegment(null, options, context) + .flatMapMany(response -> listContainersHelper(response.value().marker(), options, context, response)); + } + + private Flux listContainersHelper(String marker, ListContainersOptions options, Context context, + ServicesListContainersSegmentResponse response){ + Flux result = Flux.fromIterable(response.value().containerItems()); + if (response.value().nextMarker() != null) { + // Recursively add the continuation items to the observable. + result = result.concatWith(storageAsyncRawClient.listContainersSegment(marker, options, + context) + .flatMapMany((r) -> + listContainersHelper(response.value().nextMarker(), options, context, r))); + } + + return result; + } + + /** + * Gets the properties of a storage account’s Blob service. For more information, see the + * Azure Docs. + * + * @return + * A reactive response containing the storage account properties. + */ + public Mono getProperties() { + return this.getProperties(null); + } + + /** + * Gets the properties of a storage account’s Blob service. For more information, see the + * Azure Docs. + * + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response containing the storage account properties. + */ + public Mono getProperties(Context context) { + return storageAsyncRawClient + .getProperties(context) + .map(ResponseBase::value); + } + + /** + * Sets properties for a storage account's Blob service endpoint. For more information, see the + * Azure Docs. + * Note that setting the default service version has no effect when using this client because this client explicitly + * sets the version header on each request, overriding the default. + * + * @param properties + * Configures the service. + * + * @return + * A reactive response containing the storage account properties. + */ + public Mono setProperties(StorageServiceProperties properties) { + return this.setProperties(properties, null); + } + + /** + * Sets properties for a storage account's Blob service endpoint. For more information, see the + * Azure Docs. + * Note that setting the default service version has no effect when using this client because this client explicitly + * sets the version header on each request, overriding the default. + * + * @param properties + * Configures the service. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response containing the storage account properties. + */ + public Mono setProperties(StorageServiceProperties properties, Context context) { + return storageAsyncRawClient + .setProperties(properties, context) + .then(); + } + + /** + * Gets a user delegation key for use with this account's blob storage. + * Note: This method call is only valid when using {@link TokenCredentials} in this object's {@link HttpPipeline}. + * + * @param start + * Start time for the key's validity. Null indicates immediate start. + * @param expiry + * Expiration of the key's validity. + * + * @return + * A reactive response containing the user delegation key. + */ + public Mono getUserDelegationKey(OffsetDateTime start, OffsetDateTime expiry) { + return this.getUserDelegationKey(start, expiry, null); + } + + /** + * Gets a user delegation key for use with this account's blob storage. + * Note: This method call is only valid when using {@link TokenCredentials} in this object's {@link HttpPipeline}. + * + * @param start + * Start time for the key's validity. Null indicates immediate start. + * @param expiry + * Expiration of the key's validity. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response containing the user delegation key. + */ + public Mono getUserDelegationKey(OffsetDateTime start, OffsetDateTime expiry, + Context context) { + return storageAsyncRawClient + .getUserDelegationKey(start, expiry, context) + .map(ResponseBase::value); + } + + /** + * Retrieves statistics related to replication for the Blob service. It is only available on the secondary + * location endpoint when read-access geo-redundant replication is enabled for the storage account. For more + * information, see the + * Azure Docs. + * + * @return + * A reactive response containing the storage account statistics. + */ + public Mono getStatistics() { + return this.getStatistics(null); + } + + /** + * Retrieves statistics related to replication for the Blob service. It is only available on the secondary + * location endpoint when read-access geo-redundant replication is enabled for the storage account. For more + * information, see the + * Azure Docs. + * + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response containing the storage account statistics. + */ + public Mono getStatistics(Context context) { + return storageAsyncRawClient + .getStatistics(context) + .map(ResponseBase::value); + } + + /** + * Returns the sku name and account kind for the account. For more information, please see the + * Azure Docs. + * + * @return + * A reactive response containing the storage account info. + */ + public Mono getAccountInfo() { + return this.getAccountInfo(null); + } + + /** + * Returns the sku name and account kind for the account. For more information, please see the + * Azure Docs. + * + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * A reactive response containing the storage account info. + */ + public Mono getAccountInfo(Context context) { + return storageAsyncRawClient + .getAccountInfo(context) + .map(ResponseBase::deserializedHeaders) + .map(StorageAccountInfo::new); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/StorageAsyncRawClient.java b/storage/client/src/main/java/com/azure/storage/blob/StorageAsyncRawClient.java new file mode 100644 index 0000000000000..27dc3a57c4e70 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/StorageAsyncRawClient.java @@ -0,0 +1,327 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.http.HttpPipeline; +import com.azure.core.util.Context; +import com.azure.storage.blob.implementation.AzureBlobStorageBuilder; +import com.azure.storage.blob.implementation.AzureBlobStorageImpl; +import com.azure.storage.blob.models.KeyInfo; +import com.azure.storage.blob.models.ServicesGetAccountInfoResponse; +import com.azure.storage.blob.models.ServicesGetPropertiesResponse; +import com.azure.storage.blob.models.ServicesGetStatisticsResponse; +import com.azure.storage.blob.models.ServicesGetUserDelegationKeyResponse; +import com.azure.storage.blob.models.ServicesListContainersSegmentResponse; +import com.azure.storage.blob.models.ServicesSetPropertiesResponse; +import com.azure.storage.blob.models.StorageServiceProperties; +import reactor.core.publisher.Mono; + +import java.net.MalformedURLException; +import java.net.URL; +import java.time.OffsetDateTime; + +import static com.azure.storage.blob.Utility.postProcessResponse; + +/** + * Represents a URL to a storage service. This class does not hold any state about a particular storage account but is + * instead a convenient way of sending off appropriate requests to the resource on the service. + * It may also be used to construct URLs to blobs and containers. + * Please see here for more + * information on containers. + */ +final class StorageAsyncRawClient { + + AzureBlobStorageImpl azureBlobStorage; + + /** + * Creates a {@code ServiceURL} object pointing to the account specified by the URL and using the provided pipeline + * to make HTTP requests. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_url "Sample code for ServiceURL constructor")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public StorageAsyncRawClient(AzureBlobStorageImpl azureBlobStorage) { + this.azureBlobStorage = azureBlobStorage; + } + + /** + * Returns a Mono segment of containers starting from the specified Marker. + * Use an empty marker to start enumeration from the beginning. Container names are returned in lexicographic order. + * After getting a segment, process it, and then call ListContainers again (passing the the previously-returned + * Marker) to get the next segment. For more information, see + * the Azure Docs. + * + * @param marker + * Identifies the portion of the list to be returned with the next list operation. + * This value is returned in the response of a previous list operation as the + * ListContainersSegmentResponse.body().nextMarker(). Set to null to list the first segment. + * @param options + * A {@link ListContainersOptions} which specifies what data should be returned by the service. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_list "Sample code for ServiceURL.listContainersSegment")] \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_list_helper "Helper code for ServiceURL.listContainersSegment")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono listContainersSegment(String marker, + ListContainersOptions options) { + return this.listContainersSegment(marker, options, null); + } + + /** + * Returns a Mono segment of containers starting from the specified Marker. + * Use an empty marker to start enumeration from the beginning. Container names are returned in lexicographic order. + * After getting a segment, process it, and then call ListContainers again (passing the the previously-returned + * Marker) to get the next segment. For more information, see + * the Azure Docs. + * + * @param marker + * Identifies the portion of the list to be returned with the next list operation. + * This value is returned in the response of a previous list operation as the + * ListContainersSegmentResponse.body().nextMarker(). Set to null to list the first segment. + * @param options + * A {@link ListContainersOptions} which specifies what data should be returned by the service. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_list "Sample code for ServiceURL.listContainersSegment")] \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_list_helper "Helper code for ServiceURL.listContainersSegment")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono listContainersSegment(String marker, + ListContainersOptions options, Context context) { + options = options == null ? new ListContainersOptions() : options; + context = context == null ? Context.NONE : context; + + return postProcessResponse( + this.azureBlobStorage.services().listContainersSegmentWithRestResponseAsync( + options.prefix(), marker, options.maxResults(), options.details().toIncludeType(), null, + null, context)); + } + + /** + * Gets the properties of a storage account’s Blob service. For more information, see the + * Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_getsetprops "Sample code for ServiceURL.getProperties")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono getProperties() { + return this.getProperties(null); + } + + /** + * Gets the properties of a storage account’s Blob service. For more information, see the + * Azure Docs. + * + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_getsetprops "Sample code for ServiceURL.getProperties")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono getProperties(Context context) { + context = context == null ? Context.NONE : context; + + return postProcessResponse( + this.azureBlobStorage.services().getPropertiesWithRestResponseAsync(null, null, context)); + } + + /** + * Sets properties for a storage account's Blob service endpoint. For more information, see the + * Azure Docs. + * Note that setting the default service version has no effect when using this client because this client explicitly + * sets the version header on each request, overriding the default. + * + * @param properties + * Configures the service. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_getsetprops "Sample code for ServiceURL.setProperties")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono setProperties(StorageServiceProperties properties) { + return this.setProperties(properties, null); + } + + /** + * Sets properties for a storage account's Blob service endpoint. For more information, see the + * Azure Docs. + * Note that setting the default service version has no effect when using this client because this client explicitly + * sets the version header on each request, overriding the default. + * + * @param properties + * Configures the service. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_getsetprops "Sample code for ServiceURL.setProperties")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono setProperties(StorageServiceProperties properties, Context context) { + context = context == null ? Context.NONE : context; + + return postProcessResponse( + this.azureBlobStorage.services().setPropertiesWithRestResponseAsync(properties, null, null, context)); + } + + /** + * Gets a user delegation key for use with this account's blob storage. + * Note: This method call is only valid when using {@link TokenCredentials} in this object's {@link HttpPipeline}. + * + * @param start + * Start time for the key's validity. Null indicates immediate start. + * @param expiry + * Expiration of the key's validity. + * + * @return Emits the successful response. + */ + public Mono getUserDelegationKey(OffsetDateTime start, OffsetDateTime expiry) { + return this.getUserDelegationKey(start, expiry, null); + } + + /** + * Gets a user delegation key for use with this account's blob storage. + * Note: This method call is only valid when using {@link TokenCredentials} in this object's {@link HttpPipeline}. + * + * @param start + * Start time for the key's validity. Null indicates immediate start. + * @param expiry + * Expiration of the key's validity. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + */ + public Mono getUserDelegationKey(OffsetDateTime start, OffsetDateTime expiry, + Context context) { + Utility.assertNotNull("expiry", expiry); + if (start != null && !start.isBefore(expiry)) { + throw new IllegalArgumentException("`start` must be null or a datetime before `expiry`."); + } + + context = context == null ? Context.NONE : context; + + return postProcessResponse( + this.azureBlobStorage.services().getUserDelegationKeyWithRestResponseAsync( + new KeyInfo() + .start(start == null ? "" : Utility.ISO_8601_UTC_DATE_FORMATTER.format(start)) + .expiry(Utility.ISO_8601_UTC_DATE_FORMATTER.format(expiry)), + null, null, context) + ); + } + + /** + * Retrieves statistics related to replication for the Blob service. It is only available on the secondary + * location endpoint when read-access geo-redundant replication is enabled for the storage account. For more + * information, see the + * Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_stats "Sample code for ServiceURL.getStats")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono getStatistics() { + return this.getStatistics(null); + } + + /** + * Retrieves statistics related to replication for the Blob service. It is only available on the secondary + * location endpoint when read-access geo-redundant replication is enabled for the storage account. For more + * information, see the + * Azure Docs. + * + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_stats "Sample code for ServiceURL.getStats")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono getStatistics(Context context) { + context = context == null ? Context.NONE : context; + + return postProcessResponse( + this.azureBlobStorage.services().getStatisticsWithRestResponseAsync(null, null, context)); + } + + /** + * Returns the sku name and account kind for the account. For more information, please see the + * Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=account_info "Sample code for ServiceURL.getAccountInfo")] \n + * For more samples, please see the [Samples file] (https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono getAccountInfo() { + return this.getAccountInfo(null); + } + + /** + * Returns the sku name and account kind for the account. For more information, please see the + * Azure Docs. + * + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.azure.core.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=account_info "Sample code for ServiceURL.getAccountInfo")] \n + * For more samples, please see the [Samples file] (https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Mono getAccountInfo(Context context) { + context = context == null ? Context.NONE : context; + + return postProcessResponse( + this.azureBlobStorage.services().getAccountInfoWithRestResponseAsync(context)); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/StorageClient.java b/storage/client/src/main/java/com/azure/storage/blob/StorageClient.java new file mode 100644 index 0000000000000..11694ac2c4017 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/StorageClient.java @@ -0,0 +1,317 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.http.HttpPipeline; +import com.azure.core.util.Context; +import com.azure.storage.blob.implementation.AzureBlobStorageImpl; +import com.azure.storage.blob.models.ContainerItem; +import com.azure.storage.blob.models.StorageServiceProperties; +import com.azure.storage.blob.models.StorageServiceStats; +import com.azure.storage.blob.models.UserDelegationKey; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.net.MalformedURLException; +import java.net.URL; +import java.time.Duration; +import java.time.OffsetDateTime; + +/** + * Client to a storage account. It may only be instantiated through a {@link StorageClientBuilder}. + * This class does not hold any state about a particular storage account but is + * instead a convenient way of sending off appropriate requests to the resource on the service. + * It may also be used to construct URLs to blobs and containers. + * + *

+ * This client contains operations on a blob. Operations on a container are available on {@link ContainerClient} + * through {@link #getContainerClient(String)}, and operations on a blob are available on {@link BlobClient}. + * + *

+ * Please see here for more + * information on containers. + */ +public final class StorageClient { + + private StorageAsyncClient storageAsyncClient; + private StorageClientBuilder builder; + + /** + * Package-private constructor for use by {@link StorageClientBuilder}. + * @param azureBlobStorage the API client for blob storage API + */ + StorageClient(AzureBlobStorageImpl azureBlobStorage) { + this.storageAsyncClient = new StorageAsyncClient(azureBlobStorage); + } + + /** + * Static method for getting a new builder for this class. + * + * @return + * A new {@link StorageClientBuilder} instance. + */ + public static StorageClientBuilder storageClientBuilder() { + return new StorageClientBuilder(); + } + + /** + * Package-private constructor for use by {@link StorageClientBuilder}. + * @param builder the storage account client builder + */ + StorageClient(StorageClientBuilder builder) { + this.builder = builder; + this.storageAsyncClient = new StorageAsyncClient(builder); + } + + /** + * Initializes a {@link ContainerClient} object pointing to the specified container. This method does not create a + * container. It simply constructs the URL to the container and offers access to methods relevant to containers. + * + * @param containerName + * The name of the container to point to. + * @return + * A {@link ContainerClient} object pointing to the specified container + */ + public ContainerClient getContainerClient(String containerName) { + try { + return new ContainerClient(this.builder.copyAsContainerBuilder().endpoint(Utility.appendToURLPath(new URL(builder.endpoint()), containerName).toString())); + } catch (MalformedURLException e) { + throw new RuntimeException(e); + } + } + + /** + * Returns a lazy loaded list of containers in this account. The returned {@link Iterable} can be iterated + * through while new items are automatically retrieved as needed. For more information, see + * the Azure Docs. + * + * @return + * The list of containers. + */ + public Iterable listContainers() { + return this.listContainers(new ListContainersOptions(), null); + } + + /** + * Returns a lazy loaded list of containers in this account. The returned {@link Iterable} can be iterated + * through while new items are automatically retrieved as needed. For more information, see + * the Azure Docs. + * + * @param options + * A {@link ListContainersOptions} which specifies what data should be returned by the service. + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * + * @return + * The list of containers. + */ + public Iterable listContainers(ListContainersOptions options, Duration timeout) { + Flux response = storageAsyncClient.listContainers(options, null); + + return timeout == null ? + response.toIterable(): + response.timeout(timeout).toIterable(); + } + + /** + * Gets the properties of a storage account’s Blob service. For more information, see the + * Azure Docs. + * + * @return + * The storage account properties. + */ + public StorageServiceProperties getProperties() { + return this.getProperties(null, null); + } + + /** + * Gets the properties of a storage account’s Blob service. For more information, see the + * Azure Docs. + * + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * The storage account properties. + */ + public StorageServiceProperties getProperties(Duration timeout, Context context) { + + Mono response = storageAsyncClient.getProperties(context); + + return timeout == null ? + response.block(): + response.block(timeout); + } + + /** + * Sets properties for a storage account's Blob service endpoint. For more information, see the + * Azure Docs. + * Note that setting the default service version has no effect when using this client because this client explicitly + * sets the version header on each request, overriding the default. + * + * @param properties + * Configures the service. + * + * @return + * The storage account properties. + */ + public void setProperties(StorageServiceProperties properties) { + this.setProperties(properties, null, null); + } + + /** + * Sets properties for a storage account's Blob service endpoint. For more information, see the + * Azure Docs. + * Note that setting the default service version has no effect when using this client because this client explicitly + * sets the version header on each request, overriding the default. + * + * @param properties + * Configures the service. + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * The storage account properties. + */ + public void setProperties(StorageServiceProperties properties, Duration timeout, Context context) { + Mono response = storageAsyncClient.setProperties(properties, context); + + if (timeout == null) { + response.block(); + } else { + response.block(timeout); + } + } + + /** + * Gets a user delegation key for use with this account's blob storage. + * Note: This method call is only valid when using {@link TokenCredentials} in this object's {@link HttpPipeline}. + * + * @param start + * Start time for the key's validity. Null indicates immediate start. + * @param expiry + * Expiration of the key's validity. + * + * @return + * The user delegation key. + */ + public UserDelegationKey getUserDelegationKey(OffsetDateTime start, OffsetDateTime expiry) { + return this.getUserDelegationKey(start, expiry, null, null); + } + + /** + * Gets a user delegation key for use with this account's blob storage. + * Note: This method call is only valid when using {@link TokenCredentials} in this object's {@link HttpPipeline}. + * + * @param start + * Start time for the key's validity. Null indicates immediate start. + * @param expiry + * Expiration of the key's validity. + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * The user delegation key. + */ + public UserDelegationKey getUserDelegationKey(OffsetDateTime start, OffsetDateTime expiry, + Duration timeout, Context context) { + Mono response = storageAsyncClient.getUserDelegationKey(start, expiry, context); + + return timeout == null ? + response.block(): + response.block(timeout); + } + + /** + * Retrieves statistics related to replication for the Blob service. It is only available on the secondary + * location endpoint when read-access geo-redundant replication is enabled for the storage account. For more + * information, see the + * Azure Docs. + * + * @return + * The storage account statistics. + */ + public StorageServiceStats getStatistics() { + return this.getStatistics(null, null); + } + + /** + * Retrieves statistics related to replication for the Blob service. It is only available on the secondary + * location endpoint when read-access geo-redundant replication is enabled for the storage account. For more + * information, see the + * Azure Docs. + * + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * The storage account statistics. + */ + public StorageServiceStats getStatistics(Duration timeout, Context context) { + Mono response = storageAsyncClient.getStatistics(context); + + return timeout == null ? + response.block(): + response.block(timeout); + } + + /** + * Returns the sku name and account kind for the account. For more information, please see the + * Azure Docs. + * + * @return + * The storage account info. + */ + public StorageAccountInfo getAccountInfo() { + return this.getAccountInfo(null, null); + } + + /** + * Returns the sku name and account kind for the account. For more information, please see the + * Azure Docs. + * + * @param timeout + * An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return + * The storage account info. + */ + public StorageAccountInfo getAccountInfo(Duration timeout, Context context) { + Mono response = storageAsyncClient.getAccountInfo(context); + + return timeout == null ? + response.block(): + response.block(timeout); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/StorageClientBuilder.java b/storage/client/src/main/java/com/azure/storage/blob/StorageClientBuilder.java new file mode 100644 index 0000000000000..eebdd673a412b --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/StorageClientBuilder.java @@ -0,0 +1,243 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.configuration.Configuration; +import com.azure.core.http.HttpClient; +import com.azure.core.http.HttpPipeline; +import com.azure.core.http.policy.AddDatePolicy; +import com.azure.core.http.policy.HttpLogDetailLevel; +import com.azure.core.http.policy.HttpLoggingPolicy; +import com.azure.core.http.policy.HttpPipelinePolicy; +import com.azure.core.http.policy.RequestIdPolicy; +import com.azure.core.http.policy.RetryPolicy; +import com.azure.core.http.policy.UserAgentPolicy; +import com.azure.core.implementation.util.ImplUtils; +import com.azure.storage.blob.implementation.AzureBlobStorageBuilder; +import com.azure.storage.blob.implementation.AzureBlobStorageImpl; + +import java.net.MalformedURLException; +import java.net.URL; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * Fluent StorageClientBuilder for instantiating a {@link StorageClient} or {@link StorageAsyncClient}. + * + *

+ * An instance of this builder may only be created from static method {@link StorageClient#storageClientBuilder()}. + * The following information must be provided on this builder: + * + *

+ * + *

+ * Once all the configurations are set on this builder, call {@code .buildClient()} to create a + * {@link StorageClient} or {@code .buildAsyncClient()} to create a {@link StorageAsyncClient}. + */ +public final class StorageClientBuilder { + private static final String ACCOUNT_NAME = "AccountName".toLowerCase(); + private static final String ACCOUNT_KEY = "AccountKey".toLowerCase(); + + private final List policies; + + private URL endpoint; + private ICredentials credentials = new AnonymousCredentials(); + private HttpClient httpClient; + private HttpLogDetailLevel logLevel; + private RetryPolicy retryPolicy; + private Configuration configuration; + + public StorageClientBuilder() { + retryPolicy = new RetryPolicy(); + logLevel = HttpLogDetailLevel.NONE; + policies = new ArrayList<>(); + } + + private StorageClientBuilder(List policies, URL endpoint, ICredentials credentials, + HttpClient httpClient, HttpLogDetailLevel logLevel, RetryPolicy retryPolicy, Configuration configuration) { + this.policies = policies; + this.endpoint = endpoint; + this.credentials = credentials; + this.httpClient = httpClient; + this.logLevel = logLevel; + this.retryPolicy = retryPolicy; + this.configuration = configuration; + } + + StorageClientBuilder copyBuilder() { + return new StorageClientBuilder(this.policies, this.endpoint, this.credentials, this.httpClient, this.logLevel, this.retryPolicy, this.configuration); + } + + ContainerClientBuilder copyAsContainerBuilder() { + return new ContainerClientBuilder(this.policies, this.endpoint, this.credentials, this.httpClient, this.logLevel, this.retryPolicy, this.configuration); + } + + /** + * Constructs an instance of ContainerAsyncClient based on the configurations stored in the appendBlobClientBuilder. + * @return a new client instance + */ + AzureBlobStorageImpl buildImpl() { + Objects.requireNonNull(endpoint); + + // Closest to API goes first, closest to wire goes last. + final List policies = new ArrayList<>(); + + policies.add(new UserAgentPolicy(BlobConfiguration.NAME, BlobConfiguration.VERSION)); + policies.add(new RequestIdPolicy()); + policies.add(new AddDatePolicy()); + policies.add(credentials); // This needs to be a different credential type. + + policies.add(retryPolicy); + + policies.addAll(this.policies); + policies.add(new HttpLoggingPolicy(logLevel)); + + HttpPipeline pipeline = HttpPipeline.builder() + .policies(policies.toArray(new HttpPipelinePolicy[0])) + .httpClient(httpClient) + .build(); + + return new AzureBlobStorageBuilder() + .url(endpoint.toString()) + .pipeline(pipeline) + .build(); + } + + /** + * @return a {@link StorageClient} created from the configurations in this builder. + */ + public StorageClient buildClient() { + return new StorageClient(this); + } + + /** + * @return a {@link StorageAsyncClient} created from the configurations in this builder. + */ + public StorageAsyncClient buildAsyncClient() { + return new StorageAsyncClient(this); + } + + /** + * Sets the blob service endpoint, additionally parses it for information (SAS token, queue name) + * @param endpoint URL of the service + * @return the updated StorageClientBuilder object + */ + public StorageClientBuilder endpoint(String endpoint) { + Objects.requireNonNull(endpoint); + try { + this.endpoint = new URL(endpoint); + } catch (MalformedURLException ex) { + throw new IllegalArgumentException("The Azure Storage Queue endpoint url is malformed."); + } + + return this; + } + + String endpoint() { + return this.endpoint.toString(); + } + + /** + * Sets the credentials used to authorize requests sent to the service + * @param credentials authorization credentials + * @return the updated ContainerClientBuilder object + */ + public StorageClientBuilder credentials(SharedKeyCredentials credentials) { + this.credentials = credentials; + return this; + } + + /** + * Sets the credentials used to authorize requests sent to the service + * @param credentials authorization credentials + * @return the updated StorageClientBuilder object + */ + public StorageClientBuilder credentials(TokenCredentials credentials) { + this.credentials = credentials; + return this; + } + + /** + * Clears the credentials used to authorize requests sent to the service + * @return the updated StorageClientBuilder object + */ + public StorageClientBuilder anonymousCredentials() { + this.credentials = new AnonymousCredentials(); + return this; + } + + /** + * Sets the connection string for the service, parses it for authentication information (account name, account key) + * @param connectionString connection string from access keys section + * @return the updated StorageClientBuilder object + */ + public StorageClientBuilder connectionString(String connectionString) { + Objects.requireNonNull(connectionString); + + Map connectionKVPs = new HashMap<>(); + for (String s : connectionString.split(";")) { + String[] kvp = s.split("=", 2); + connectionKVPs.put(kvp[0].toLowerCase(), kvp[1]); + } + + String accountName = connectionKVPs.get(ACCOUNT_NAME); + String accountKey = connectionKVPs.get(ACCOUNT_KEY); + + if (ImplUtils.isNullOrEmpty(accountName) || ImplUtils.isNullOrEmpty(accountKey)) { + throw new IllegalArgumentException("Connection string must contain 'AccountName' and 'AccountKey'."); + } + + // Use accountName and accountKey to get the SAS token using the credential class. + credentials = new SharedKeyCredentials(accountName, accountKey); + + return this; + } + + /** + * Sets the http client used to send service requests + * @param httpClient http client to send requests + * @return the updated StorageClientBuilder object + */ + public StorageClientBuilder httpClient(HttpClient httpClient) { + this.httpClient = httpClient; + return this; + } + + /** + * Adds a pipeline policy to apply on each request sent + * @param pipelinePolicy a pipeline policy + * @return the updated StorageClientBuilder object + */ + public StorageClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { + this.policies.add(pipelinePolicy); + return this; + } + + /** + * Sets the logging level for service requests + * @param logLevel logging level + * @return the updated StorageClientBuilder object + */ + public StorageClientBuilder httpLogDetailLevel(HttpLogDetailLevel logLevel) { + this.logLevel = logLevel; + return this; + } + + /** + * Sets the configuration object used to retrieve environment configuration values used to buildClient the client with + * when they are not set in the appendBlobClientBuilder, defaults to Configuration.NONE + * @param configuration configuration store + * @return the updated StorageClientBuilder object + */ + public StorageClientBuilder configuration(Configuration configuration) { + this.configuration = configuration; + return this; + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/StorageException.java b/storage/client/src/main/java/com/azure/storage/blob/StorageException.java new file mode 100644 index 0000000000000..6d794a2a604da --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/StorageException.java @@ -0,0 +1,53 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.exception.HttpResponseException; +import com.azure.storage.blob.models.StorageErrorCode; +import com.azure.storage.blob.models.StorageErrorException; +import reactor.core.publisher.Mono; + +/** + * A {@code StorageException} is thrown whenever Azure Storage successfully returns an error code that is not 200-level. + * Users can inspect the status code and error code to determine the cause of the error response. The exception message + * may also contain more detailed information depending on the type of error. The user may also inspect the raw HTTP + * response or call toString to get the full payload of the error response if present. + * Note that even some expected "errors" will be thrown as a {@code StorageException}. For example, some users may + * perform a getProperties request on an entity to determine whether it exists or not. If it does not exists, an + * exception will be thrown even though this may be considered an expected indication of absence in this case. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=exception "Sample code for StorageExceptions")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ +public final class StorageException extends HttpResponseException { + + private final String message; + + StorageException(StorageErrorException e, String responseBody) { + super(e.getMessage(), e.response(), e); + this.message = responseBody; + } + + /** + * @return The error code returned by the service. + */ + public StorageErrorCode errorCode() { + return StorageErrorCode.fromString(super.response().headers().value(Constants.HeaderConstants.ERROR_CODE)); + } + + /** + * @return The message returned by the service. + */ + public String message() { + return this.message; + } + + /** + * @return The status code on the response. + */ + public int statusCode() { + return super.response().statusCode(); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/StorageRawClient.java b/storage/client/src/main/java/com/azure/storage/blob/StorageRawClient.java new file mode 100644 index 0000000000000..2fae8cc690b64 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/StorageRawClient.java @@ -0,0 +1,309 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.http.HttpPipeline; +import com.azure.core.util.Context; +import com.azure.storage.blob.implementation.AzureBlobStorageImpl; +import com.azure.storage.blob.models.ServicesGetAccountInfoResponse; +import com.azure.storage.blob.models.ServicesGetPropertiesResponse; +import com.azure.storage.blob.models.ServicesGetStatisticsResponse; +import com.azure.storage.blob.models.ServicesGetUserDelegationKeyResponse; +import com.azure.storage.blob.models.ServicesListContainersSegmentResponse; +import com.azure.storage.blob.models.ServicesSetPropertiesResponse; +import com.azure.storage.blob.models.StorageServiceProperties; +import reactor.core.publisher.Mono; + +import java.time.Duration; +import java.time.OffsetDateTime; + +/** + * Represents a URL to a storage service. This class does not hold any state about a particular storage account but is + * instead a convenient way of sending off appropriate requests to the resource on the service. + * It may also be used to construct URLs to blobs and containers. + * Please see here for more + * information on containers. + */ +final class StorageRawClient { + + StorageAsyncRawClient storageAsyncRawClient; + + /** + * Creates a {@code ServiceURL} object pointing to the account specified by the URL and using the provided pipeline + * to make HTTP requests. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_url "Sample code for ServiceURL constructor")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public StorageRawClient(AzureBlobStorageImpl azureBlobStorage) { + this.storageAsyncRawClient = new StorageAsyncRawClient(azureBlobStorage); + } + + /** + * Returns a Mono segment of containers starting from the specified Marker. + * Use an empty marker to start enumeration from the beginning. Container names are returned in lexicographic order. + * After getting a segment, process it, and then call ListContainers again (passing the the previously-returned + * Marker) to get the next segment. For more information, see + * the Azure Docs. + * + * @param marker + * Identifies the portion of the list to be returned with the next list operation. + * This value is returned in the response of a previous list operation as the + * ListContainersSegmentResponse.body().nextMarker(). Set to null to list the first segment. + * @param options + * A {@link ListContainersOptions} which specifies what data should be returned by the service. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_list "Sample code for ServiceURL.listContainersSegment")] \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_list_helper "Helper code for ServiceURL.listContainersSegment")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ServicesListContainersSegmentResponse listContainersSegment(String marker, + ListContainersOptions options) { + return this.listContainersSegment(marker, options,null, null); + } + + /** + * Returns a Mono segment of containers starting from the specified Marker. + * Use an empty marker to start enumeration from the beginning. Container names are returned in lexicographic order. + * After getting a segment, process it, and then call ListContainers again (passing the the previously-returned + * Marker) to get the next segment. For more information, see + * the Azure Docs. + * + * @param marker + * Identifies the portion of the list to be returned with the next list operation. + * This value is returned in the response of a previous list operation as the + * ListContainersSegmentResponse.body().nextMarker(). Set to null to list the first segment. + * @param options + * A {@link ListContainersOptions} which specifies what data should be returned by the service. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_list "Sample code for ServiceURL.listContainersSegment")] \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_list_helper "Helper code for ServiceURL.listContainersSegment")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ServicesListContainersSegmentResponse listContainersSegment(String marker, + ListContainersOptions options, Duration timeout, Context context) { + Mono response = storageAsyncRawClient.listContainersSegment(marker, options, context); + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Gets the properties of a storage account’s Blob service. For more information, see the + * Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_getsetprops "Sample code for ServiceURL.getProperties")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ServicesGetPropertiesResponse getProperties() { + return this.getProperties(null, null); + } + + /** + * Gets the properties of a storage account’s Blob service. For more information, see the + * Azure Docs. + * + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_getsetprops "Sample code for ServiceURL.getProperties")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ServicesGetPropertiesResponse getProperties(Duration timeout, Context context) { + Mono response = storageAsyncRawClient.getProperties(context); + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Sets properties for a storage account's Blob service endpoint. For more information, see the + * Azure Docs. + * Note that setting the default service version has no effect when using this client because this client explicitly + * sets the version header on each request, overriding the default. + * + * @param properties + * Configures the service. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_getsetprops "Sample code for ServiceURL.setProperties")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ServicesSetPropertiesResponse setProperties(StorageServiceProperties properties) { + return this.setProperties(properties, null, null); + } + + /** + * Sets properties for a storage account's Blob service endpoint. For more information, see the + * Azure Docs. + * Note that setting the default service version has no effect when using this client because this client explicitly + * sets the version header on each request, overriding the default. + * + * @param properties + * Configures the service. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_getsetprops "Sample code for ServiceURL.setProperties")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ServicesSetPropertiesResponse setProperties(StorageServiceProperties properties, Duration timeout, Context context) { + Mono response = storageAsyncRawClient.setProperties(properties, context); + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Gets a user delegation key for use with this account's blob storage. + * Note: This method call is only valid when using {@link TokenCredentials} in this object's {@link HttpPipeline}. + * + * @param start + * Start time for the key's validity. Null indicates immediate start. + * @param expiry + * Expiration of the key's validity. + * + * @return Emits the successful response. + */ + public ServicesGetUserDelegationKeyResponse getUserDelegationKey(OffsetDateTime start, OffsetDateTime expiry) { + return this.getUserDelegationKey(start, expiry, null, null); + } + + /** + * Gets a user delegation key for use with this account's blob storage. + * Note: This method call is only valid when using {@link TokenCredentials} in this object's {@link HttpPipeline}. + * + * @param start + * Start time for the key's validity. Null indicates immediate start. + * @param expiry + * Expiration of the key's validity. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + */ + public ServicesGetUserDelegationKeyResponse getUserDelegationKey(OffsetDateTime start, OffsetDateTime expiry, + Duration timeout, Context context) { + Mono response = storageAsyncRawClient.getUserDelegationKey(start, expiry, context); + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Retrieves statistics related to replication for the Blob service. It is only available on the secondary + * location endpoint when read-access geo-redundant replication is enabled for the storage account. For more + * information, see the + * Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_stats "Sample code for ServiceURL.getStats")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ServicesGetStatisticsResponse getStatistics() { + return this.getStatistics(null, null); + } + + /** + * Retrieves statistics related to replication for the Blob service. It is only available on the secondary + * location endpoint when read-access geo-redundant replication is enabled for the storage account. For more + * information, see the + * Azure Docs. + * + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_stats "Sample code for ServiceURL.getStats")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ServicesGetStatisticsResponse getStatistics(Duration timeout, Context context) { + Mono response = storageAsyncRawClient.getStatistics(context); + return timeout == null + ? response.block() + : response.block(timeout); + } + + /** + * Returns the sku name and account kind for the account. For more information, please see the + * Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=account_info "Sample code for ServiceURL.getAccountInfo")] \n + * For more samples, please see the [Samples file] (https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ServicesGetAccountInfoResponse getAccountInfo() { + return this.getAccountInfo(null, null); + } + + /** + * Returns the sku name and account kind for the account. For more information, please see the + * Azure Docs. + * + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=account_info "Sample code for ServiceURL.getAccountInfo")] \n + * For more samples, please see the [Samples file] (https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ServicesGetAccountInfoResponse getAccountInfo(Duration timeout, Context context) { + Mono response = storageAsyncRawClient.getAccountInfo(context); + return timeout == null + ? response.block() + : response.block(timeout); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/StorageURL.java b/storage/client/src/main/java/com/azure/storage/blob/StorageURL.java new file mode 100644 index 0000000000000..89213e91d2287 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/StorageURL.java @@ -0,0 +1,154 @@ +//// Copyright (c) Microsoft Corporation. All rights reserved. +//// Licensed under the MIT License. +// +//package com.azure.storage.blob; +// +//import com.azure.core.http.HttpPipeline; +//import com.azure.core.implementation.http.UrlBuilder; +//import com.microsoft.rest.v2.http.HttpPipeline; +//import com.microsoft.rest.v2.http.HttpPipelineOptions; +//import com.microsoft.rest.v2.http.UrlBuilder; +//import com.microsoft.rest.v2.policy.DecodingPolicyFactory; +//import com.microsoft.rest.v2.policy.RequestPolicyFactory; +// +//import java.net.MalformedURLException; +//import java.net.URL; +//import java.util.ArrayList; +// +///** +// * Represents a URL to a Azure storage object. Typically this class is only needed to generate a new pipeline. In most +// * cases, one of the other URL types will be more useful. +// */ +//public abstract class StorageURL { +// +// protected final GeneratedStorageClient storageClient; +// +// protected StorageURL(URL url, HttpPipeline pipeline) { +// if (url == null) { +// throw new IllegalArgumentException("url cannot be null."); +// } +// if (pipeline == null) { +// throw new IllegalArgumentException("Pipeline cannot be null. Create a pipeline by calling" +// + " StorageURL.createPipeline."); +// } +// +// this.storageClient = new GeneratedStorageClient(pipeline) +// .withVersion(Constants.HeaderConstants.TARGET_STORAGE_VERSION); +// this.storageClient.withUrl(url.toString()); +// } +// +// +// +// /** +// * Creates an pipeline to process the HTTP requests and Responses. +// * +// * @apiNote +// * ## Sample Code \n +// * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_url "Sample code for StorageURL.createPipeline")] \n +// * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/New-Storage-SDK-V10-Preview/src/test/java/com/microsoft/azure/storage/Samples.java) +// * +// * @return The pipeline. +// */ +// public static HttpPipeline createPipeline() { +// return createPipeline(new AnonymousCredentials(), new PipelineOptions()); +// } +// +// /** +// * Creates an pipeline to process the HTTP requests and Responses. +// * +// * @apiNote +// * ## Sample Code \n +// * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_url "Sample code for StorageURL.createPipeline")] \n +// * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/New-Storage-SDK-V10-Preview/src/test/java/com/microsoft/azure/storage/Samples.java) +// * +// * @param credentials +// * The credentials the pipeline will use to authenticate the requests. +// * +// * @return The pipeline. +// */ +// public static HttpPipeline createPipeline(ICredentials credentials) { +// return createPipeline(credentials, new PipelineOptions()); +// } +// +// /** +// * Creates an pipeline to process the HTTP requests and Responses. +// * +// * @apiNote +// * ## Sample Code \n +// * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_url "Sample code for StorageURL.createPipeline")] \n +// * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/New-Storage-SDK-V10-Preview/src/test/java/com/microsoft/azure/storage/Samples.java) +// * +// * @param pipelineOptions +// * Configurations for each policy in the pipeline. +// * @return The pipeline. +// */ +// public static HttpPipeline createPipeline(PipelineOptions pipelineOptions) { +// return createPipeline(new AnonymousCredentials(), pipelineOptions); +// } +// +// /** +// * Creates an pipeline to process the HTTP requests and Responses. +// * +// * @apiNote +// * ## Sample Code \n +// * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_url "Sample code for StorageURL.createPipeline")] \n +// * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/New-Storage-SDK-V10-Preview/src/test/java/com/microsoft/azure/storage/Samples.java) +// * +// * @param credentials +// * The credentials the pipeline will use to authenticate the requests. +// * @param pipelineOptions +// * Configurations for each policy in the pipeline. +// * +// * @return The pipeline. +// * +// * @apiNote ## Sample Code \n +// * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_url "Sample code for StorageURL.createPipeline")] \n +// * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) +// */ +// public static HttpPipeline createPipeline(ICredentials credentials, PipelineOptions pipelineOptions) { +// /* +// PipelineOptions is mutable, but its fields refer to immutable objects. This method can pass the fields to other +// methods, but the PipelineOptions object itself can only be used for the duration of this call; it must not be +// passed to anything with a longer lifetime. +// */ +// if (credentials == null) { +// throw new IllegalArgumentException( +// "Credentials cannot be null. For anonymous access use Anonymous Credentials."); +// } +// if (pipelineOptions == null) { +// throw new IllegalArgumentException("pipelineOptions cannot be null. You must at least specify a client."); +// } +// +// // Closest to API goes first, closest to wire goes last. +// ArrayList factories = new ArrayList<>(); +// factories.add(new TelemetryPolicy(pipelineOptions.telemetryOptions())); +// factories.add(new RequestIDPolicy()); +// factories.add(new RequestRetryPolicy(pipelineOptions.requestRetryOptions())); +// if (!(credentials instanceof AnonymousCredentials)) { +// factories.add(credentials); +// } +// factories.add(new SetResponseFieldPolicy()); +// factories.add(new DecodingPolicyFactory()); +// factories.add(new LoggingPolicy(pipelineOptions.loggingOptions())); +// +// return HttpPipeline.build(new HttpPipelineOptions().withHttpClient(pipelineOptions.client()) +// .withLogger(pipelineOptions.logger()), +// factories.toArray(new RequestPolicyFactory[factories.size()])); +// } +// +// @Override +// public String toString() { +// return this.storageClient.url(); +// } +// +// /** +// * @return The underlying url to the resource. +// */ +// public URL toURL() { +// try { +// return new URL(this.storageClient.url()); +// } catch (MalformedURLException e) { +// throw new RuntimeException(e); +// } +// } +//} diff --git a/storage/client/src/main/java/com/azure/storage/blob/TelemetryOptions.java b/storage/client/src/main/java/com/azure/storage/blob/TelemetryOptions.java new file mode 100644 index 0000000000000..36f18a99a231c --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/TelemetryOptions.java @@ -0,0 +1,32 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +/** + * Options for configuring the {@link TelemetryPolicy}. Please refer to the Factory for more information. + */ +final class TelemetryOptions { + + private final String userAgentPrefix; + + public TelemetryOptions() { + this(Constants.EMPTY_STRING); + } + + /** + * @param userAgentPrefix + * A string prepended to each request's User-Agent and sent to the service. The service records. + * the user-agent in logs for diagnostics and tracking of client requests. + */ + public TelemetryOptions(String userAgentPrefix) { + this.userAgentPrefix = userAgentPrefix; + } + + /** + * @return The user agent prefix. + */ + public String userAgentPrefix() { + return this.userAgentPrefix; + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/TelemetryPolicy.java b/storage/client/src/main/java/com/azure/storage/blob/TelemetryPolicy.java new file mode 100644 index 0000000000000..754e1f67c3b5e --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/TelemetryPolicy.java @@ -0,0 +1,48 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.http.HttpPipelineCallContext; +import com.azure.core.http.HttpPipelineNextPolicy; +import com.azure.core.http.HttpResponse; +import com.azure.core.http.policy.HttpPipelinePolicy; +import reactor.core.publisher.Mono; + +import java.util.Locale; + +/** + * This is a factory which creates policies in an {@link com.azure.core.http.HttpPipeline} for adding telemetry to a + * given HTTP request. In most cases, it is sufficient to configure a {@link TelemetryOptions} object and set those as + * a field on a {@link PipelineOptions} object to configure a default pipeline. The factory and policy must only be used + * directly when creating a custom pipeline. + */ +final class TelemetryPolicy implements HttpPipelinePolicy { + + private final String userAgent; + + /** + * Creates a factory that can create telemetry policy objects which add telemetry information to the outgoing + * HTTP requests. + * + * @param telemetryOptions + * {@link TelemetryOptions} + */ + public TelemetryPolicy(TelemetryOptions telemetryOptions) { + telemetryOptions = telemetryOptions == null ? new TelemetryOptions() : telemetryOptions; + String userAgentPrefix = telemetryOptions.userAgentPrefix() == null + ? Constants.EMPTY_STRING : telemetryOptions.userAgentPrefix(); + this.userAgent = userAgentPrefix + ' ' + + Constants.HeaderConstants.USER_AGENT_PREFIX + '/' + Constants.HeaderConstants.USER_AGENT_VERSION + + String.format(Locale.ROOT, " (JavaJRE %s; %s %s)", + System.getProperty("java.version"), + System.getProperty("os.name").replaceAll(" ", ""), + System.getProperty("os.version")); + } + + @Override + public Mono process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { + context.httpRequest().headers().put(Constants.HeaderConstants.USER_AGENT, userAgent); + return next.process(); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/TokenCredentials.java b/storage/client/src/main/java/com/azure/storage/blob/TokenCredentials.java new file mode 100644 index 0000000000000..5b7f39ac9b117 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/TokenCredentials.java @@ -0,0 +1,66 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.http.HttpPipelineCallContext; +import com.azure.core.http.HttpPipelineNextPolicy; +import com.azure.core.http.HttpResponse; +import reactor.core.publisher.Mono; + +import java.util.concurrent.atomic.AtomicReference; + +/** + * TokenCredentials are a means of authenticating requests to Azure Storage via OAuth user tokens. This is the preferred + * way of authenticating with Azure Storage. + */ +public final class TokenCredentials implements ICredentials { + + /* + This is an atomic reference because it must be thread safe as all parts of the pipeline must be. It however cannot + be final as most factory fields are because in order to actually be useful, the token has to be renewed every few + hours, which requires updating the value here. + */ + private AtomicReference token; + + /** + * Creates a token credential for use with role-based access control (RBAC) access to Azure Storage resources. + * + * @param token + * A {@code String} of the token to use for authentication. + */ + public TokenCredentials(String token) { + this.token = new AtomicReference<>(token); + } + + /** + * Retrieve the value of the token used by this factory. + * + * @return A {@code String} with the token's value. + */ + public String getToken() { + return this.token.get(); + } + + /** + * Update the token to a new value. + * + * @param token + * A {@code String} containing the new token's value. + */ + public void withToken(String token) { + this.token.set(token); + } + + + @Override + public Mono process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { + if (!context.httpRequest().url().getProtocol().equals(Constants.HTTPS)) { + throw new Error("Token credentials require a URL using the https protocol scheme"); + } + context.httpRequest().withHeader(Constants.HeaderConstants.AUTHORIZATION, + "Bearer " + this.getToken()); + return next.process(); + } + +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/TransferManager.java b/storage/client/src/main/java/com/azure/storage/blob/TransferManager.java new file mode 100644 index 0000000000000..65b43c91eca58 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/TransferManager.java @@ -0,0 +1,481 @@ +//// Copyright (c) Microsoft Corporation. All rights reserved. +//// Licensed under the MIT License. +// +//package com.azure.storage.blob; +// +//import java.io.IOException; +//import java.nio.ByteBuffer; +//import java.nio.channels.AsynchronousFileChannel; +//import java.util.ArrayList; +//import java.util.Base64; +//import java.util.List; +//import java.util.UUID; +//import java.util.concurrent.atomic.AtomicLong; +//import java.util.concurrent.locks.Lock; +//import java.util.concurrent.locks.ReentrantLock; +// +//import static java.lang.StrictMath.toIntExact; +//import static java.nio.charset.StandardCharsets.UTF_8; +// +///** +// * This class contains a collection of methods (and structures associated with those methods) which perform higher-level +// * operations. Whereas operations on the URL types guarantee a single REST request and make no assumptions on desired +// * behavior, these methods will often compose several requests to provide a convenient way of performing more complex +// * operations. Further, we will make our own assumptions and optimizations for common cases that may not be ideal for +// * rarer cases. +// */ +//public final class TransferManager { +// +// /** +// * The default size of a download chunk for download large blobs. +// */ +// public static final int BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE = 4 * Constants.MB; +// +// /** +// * Uploads the contents of a file to a block blob in parallel, breaking it into block-size chunks if necessary. +// * +// * @param file +// * The file to upload. +// * @param blockBlobURL +// * Points to the blob to which the data should be uploaded. +// * @param blockLength +// * If the data must be broken up into blocks, this value determines what size those blocks will be. This +// * will affect the total number of service requests made as each REST request uploads exactly one block in +// * full. This value will be ignored if the data can be uploaded in a single put-blob operation. Must be +// * between 1 and {@link BlockBlobURL#MAX_STAGE_BLOCK_BYTES}. Note as well that +// * {@code fileLength/blockLength} must be less than or equal to {@link BlockBlobURL#MAX_BLOCKS}. +// * @param maxSingleShotSize +// * If the size of the data is less than or equal to this value, it will be uploaded in a single put +// * rather than broken up into chunks. If the data is uploaded in a single shot, the block size will be +// * ignored. Some constraints to consider are that more requests cost more, but several small or mid-sized +// * requests may sometimes perform better. Must be greater than 0. May be null to accept default behavior. +// * @param options +// * {@link TransferManagerUploadToBlockBlobOptions} +// * +// * @return Emits the successful response. +// * +// * @apiNote ## Sample Code \n +// * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=tm_file "Sample code for TransferManager.uploadFileToBlockBlob")] \n +// * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) +// */ +// public static Single uploadFileToBlockBlob(final AsynchronousFileChannel file, +// final BlockBlobURL blockBlobURL, final int blockLength, Integer maxSingleShotSize, +// final TransferManagerUploadToBlockBlobOptions options) throws IOException { +// Utility.assertNotNull("file", file); +// Utility.assertNotNull("blockBlobURL", blockBlobURL); +// Utility.assertInBounds("blockLength", blockLength, 1, BlockBlobURL.MAX_STAGE_BLOCK_BYTES); +// if (maxSingleShotSize != null) { +// Utility.assertInBounds("maxSingleShotSize", maxSingleShotSize, 0, BlockBlobURL.MAX_UPLOAD_BLOB_BYTES); +// } else { +// maxSingleShotSize = BlockBlobURL.MAX_UPLOAD_BLOB_BYTES; +// } +// TransferManagerUploadToBlockBlobOptions optionsReal = options == null +// ? new TransferManagerUploadToBlockBlobOptions() : options; +// +// // See ProgressReporter for an explanation on why this lock is necessary and why we use AtomicLong. +// AtomicLong totalProgress = new AtomicLong(0); +// Lock progressLock = new ReentrantLock(); +// +// // If the size of the file can fit in a single upload, do it this way. +// if (file.size() < maxSingleShotSize) { +// Flowable data = FlowableUtil.readFile(file); +// +// data = ProgressReporter.addProgressReporting(data, optionsReal.progressReceiver()); +// +// return blockBlobURL.upload(data, file.size(), optionsReal.httpHeaders(), +// optionsReal.metadata(), optionsReal.accessConditions(), null) +// // Transform the specific RestResponse into a CommonRestResponse. +// .map(CommonRestResponse::createFromPutBlobResponse); +// } +// +// // Calculate and validate the number of blocks. +// int numBlocks = calculateNumBlocks(file.size(), blockLength); +// if (numBlocks > BlockBlobURL.MAX_BLOCKS) { +// throw new IllegalArgumentException(SR.BLOB_OVER_MAX_BLOCK_LIMIT); +// } +// +// return Observable.range(0, numBlocks) +// /* +// For each block, make a call to stageBlock as follows. concatMap ensures that the items emitted +// by this Observable are in the same sequence as they are begun, which will be important for composing +// the list of Ids later. Eager ensures parallelism but may require some internal buffering. +// */ +// .concatMapEager(i -> { +// // The max number of bytes for a block is currently 100MB, so the final result must be an int. +// int count = (int) Math.min((long) blockLength, (file.size() - i * (long) blockLength)); +// // i * blockLength could be a long, so we need a cast to prevent overflow. +// Flowable data = FlowableUtil.readFile(file, i * (long) blockLength, count); +// +// // Report progress as necessary. +// data = ProgressReporter.addParallelProgressReporting(data, optionsReal.progressReceiver(), +// progressLock, totalProgress); +// +// final String blockId = Base64.getEncoder().encodeToString( +// UUID.randomUUID().toString().getBytes(UTF_8)); +// +// /* +// Make a call to stageBlock. Instead of emitting the response, which we don't care about other +// than that it was successful, emit the blockId for this request. These will be collected below. +// Turn that into an Observable which emits one item to comply with the signature of +// concatMapEager. +// */ +// return blockBlobURL.stageBlock(blockId, data, +// count, optionsReal.accessConditions().leaseAccessConditions(), null) +// .map(x -> blockId).toObservable(); +// +// /* +// Specify the number of concurrent subscribers to this map. This determines how many concurrent +// rest calls are made. This is so because maxConcurrency is the number of internal subscribers +// available to subscribe to the Observables emitted by the source. A subscriber is not released +// for a new subscription until its Observable calls onComplete, which here means that the call to +// stageBlock is finished. Prefetch is a hint that each of the Observables emitted by the source +// will emit only one value, which is true here because we have converted from a Single. +// */ +// }, optionsReal.parallelism(), 1) +// /* +// collectInto will gather each of the emitted blockIds into a list. Because we used concatMap, the Ids +// will be emitted according to their block number, which means the list generated here will be +// properly ordered. This also converts into a Single. +// */ +// .collectInto(new ArrayList(numBlocks), ArrayList::add) +// /* +// collectInto will not emit the list until its source calls onComplete. This means that by the time we +// call stageBlock list, all of the stageBlock calls will have finished. By flatMapping the list, we +// can "map" it into a call to commitBlockList. +// */ +// .flatMap(ids -> +// blockBlobURL.commitBlockList(ids, optionsReal.httpHeaders(), optionsReal.metadata(), +// optionsReal.accessConditions(), null)) +// +// // Finally, we must turn the specific response type into a CommonRestResponse by mapping. +// .map(CommonRestResponse::createFromPutBlockListResponse); +// } +// +// private static int calculateNumBlocks(long dataSize, long blockLength) { +// // Can successfully cast to an int because MaxBlockSize is an int, which this expression must be less than. +// int numBlocks = toIntExact(dataSize / blockLength); +// // Include an extra block for trailing data. +// if (dataSize % blockLength != 0) { +// numBlocks++; +// } +// return numBlocks; +// } +// +// /** +// * Downloads a file directly into a file, splitting the download into chunks and parallelizing as necessary. +// * +// * @param file +// * The destination file to which the blob will be written. +// * @param blobURL +// * The URL to the blob to download. +// * @param range +// * {@link BlobRange} +// * @param options +// * {@link TransferManagerDownloadFromBlobOptions} +// * +// * @return A {@code Completable} that will signal when the download is complete. +// * +// * @apiNote ## Sample Code \n +// * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=tm_file "Sample code for TransferManager.downloadBlobToFile")] \n +// * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) +// */ +// public static Single downloadBlobToFile(AsynchronousFileChannel file, BlobURL blobURL, +// BlobRange range, TransferManagerDownloadFromBlobOptions options) { +// BlobRange rangeReal = range == null ? new BlobRange() : range; +// TransferManagerDownloadFromBlobOptions optionsReal = options == null ? new TransferManagerDownloadFromBlobOptions() : options; +// Utility.assertNotNull("blobURL", blobURL); +// Utility.assertNotNull("file", file); +// +// // See ProgressReporter for an explanation on why this lock is necessary and why we use AtomicLong. +// Lock progressLock = new ReentrantLock(); +// AtomicLong totalProgress = new AtomicLong(0); +// +// // Get the size of the data and etag if not specified by the user. +// Single setupSingle = getSetupSingle(blobURL, rangeReal, optionsReal); +// +// return setupSingle.flatMap(helper -> { +// long newCount = helper.newCount; +// BlobAccessConditions realConditions = helper.realConditions; +// +// int numChunks = calculateNumBlocks(newCount, optionsReal.chunkSize()); +// +// // In case it is an empty blob, this ensures we still actually perform a download operation. +// numChunks = numChunks == 0 ? 1 : numChunks; +// +// DownloadAsyncResponse initialResponse = helper.initialResponse; +// return Flowable.range(0, numChunks) +// .flatMapSingle(chunkNum -> { +// // The first chunk was retrieved during setup. +// if (chunkNum == 0) { +// return writeBodyToFile(initialResponse, file, 0, optionsReal, progressLock, totalProgress); +// } +// +// // Calculate whether we need a full chunk or something smaller because we are at the end. +// long chunkSizeActual = Math.min(optionsReal.chunkSize(), +// newCount - (chunkNum * optionsReal.chunkSize())); +// BlobRange chunkRange = new BlobRange().withOffset( +// rangeReal.offset() + (chunkNum * optionsReal.chunkSize())) +// .withCount(chunkSizeActual); +// +// // Make the download call. +// return blobURL.download(chunkRange, realConditions, false, null) +// .flatMap(response -> +// writeBodyToFile(response, file, chunkNum, optionsReal, progressLock, +// totalProgress)); +// }, false, optionsReal.parallelism()) +// // All the headers will be the same, so we just pick the last one. +// .lastOrError(); +// }); +// } +// +// /* +// Construct a Single which will emit the total count for calculating the number of chunks, access conditions +// containing the etag to lock on, and the response from downloading the first chunk. +// */ +// private static Single getSetupSingle(BlobURL blobURL, BlobRange r, +// TransferManagerDownloadFromBlobOptions o) { +// // We will scope our initial download to either be one chunk or the total size. +// long initialChunkSize = r.count() != null && r.count() < o.chunkSize() ? r.count() : o.chunkSize(); +// +// return blobURL.download(new BlobRange().withOffset(r.offset()).withCount(initialChunkSize), +// o.accessConditions(), false, null) +// .map(response -> { +// /* +// Either the etag was set and it matches because the download succeed, so this is a no-op, or there +// was no etag, so we set it here. +// */ +// BlobAccessConditions newConditions = setEtag(o.accessConditions(), response.headers().eTag()); +// +// /* +// If the user either didn't specify a count or they specified a count greater than the size of the +// remaining data, take the size of the remaining data. This is to prevent the case where the count +// is much much larger than the size of the blob and we could try to download at an invalid offset. +// */ +// long newCount; +// // Extract the total length of the blob from the contentRange header. e.g. "bytes 1-6/7" +// long totalLength = extractTotalBlobLength(response.headers().contentRange()); +// if (r.count() == null || r.count() > (totalLength - r.offset())) { +// newCount = totalLength - r.offset(); +// } else { +// newCount = r.count(); +// } +// return new DownloadHelper(newCount, newConditions, response); +// }) +// .onErrorResumeNext(throwable -> { +// /* +// In the case of an empty blob, we still want to report successful download to file and give back +// valid DownloadResponseHeaders. Attempting a range download on an empty blob will return an +// InvalidRange error code and a Content-Range header of the format "bytes * /0". +// We need to double check that the total size is zero in the case that the customer has attempted an +// invalid range on a non-zero length blob. +// */ +// if (throwable instanceof StorageException +// && ((StorageException) throwable).errorCode() == StorageErrorCode.INVALID_RANGE +// && extractTotalBlobLength(((StorageException) throwable).response() +// .headers().value("Content-Range")) == 0) { +// return blobURL.download(new BlobRange().withOffset(0).withCount(0L), o.accessConditions(), +// false, null) +// .map(response -> { +// /* +// Ensure the blob is still 0 length by checking our download was the full length. +// (200 is for full blob; 206 is partial). +// */ +// if (response.statusCode() != 200) { +// throw new IllegalStateException("Blob was modified mid download. It was " +// + "originally 0 bytes and is now larger."); +// } +// return new DownloadHelper(0L, o.accessConditions(), response); +// }); +// } +// return Single.error(throwable); +// }); +// } +// +// private static BlobAccessConditions setEtag(BlobAccessConditions accessConditions, String etag) { +// /* +// We don't want to modify the user's object, so we'll create a duplicate and set the +// retrieved etag. +// */ +// return new BlobAccessConditions() +// .withModifiedAccessConditions(new ModifiedAccessConditions() +// .withIfModifiedSince( +// accessConditions.modifiedAccessConditions().ifModifiedSince()) +// .withIfUnmodifiedSince( +// accessConditions.modifiedAccessConditions().ifUnmodifiedSince()) +// .withIfMatch(etag) +// .withIfNoneMatch( +// accessConditions.modifiedAccessConditions().ifNoneMatch())) +// .withLeaseAccessConditions(accessConditions.leaseAccessConditions()); +// } +// +// private static Single writeBodyToFile(DownloadAsyncResponse response, +// AsynchronousFileChannel file, long chunkNum, TransferManagerDownloadFromBlobOptions optionsReal, +// Lock progressLock, AtomicLong totalProgress) { +// +// // Extract the body. +// Flowable data = response.body( +// optionsReal.reliableDownloadOptionsPerBlock()); +// +// // Report progress as necessary. +// data = ProgressReporter.addParallelProgressReporting(data, +// optionsReal.progressReceiver(), progressLock, totalProgress); +// +// // Write to the file. +// return FlowableUtil.writeFile(data, file, +// chunkNum * optionsReal.chunkSize()) +// /* +// Satisfy the return type. Observable required for flatmap to accept +// maxConcurrency. We want to eventually give the user back the headers. +// */ +// .andThen(Single.just(response.headers())); +// } +// +// private static long extractTotalBlobLength(String contentRange) { +// return Long.parseLong(contentRange.split("/")[1]); +// } +// +// private static final class DownloadHelper { +// final long newCount; +// +// final BlobAccessConditions realConditions; +// +// final DownloadAsyncResponse initialResponse; +// +// DownloadHelper(long newCount, BlobAccessConditions realConditions, DownloadAsyncResponse initialResponse) { +// this.newCount = newCount; +// this.realConditions = realConditions; +// this.initialResponse = initialResponse; +// } +// } +// +// /** +// * Uploads the contents of an arbitrary {@code Flux} to a block blob. This Flowable need not be replayable and +// * therefore it may have as its source a network stream or any other data for which the replay behavior is unknown +// * (non-replayable meaning the Flowable may not return the exact same data on each subscription). +// * +// * To eliminate the need for replayability on the source, the client must perform some buffering in order to ensure +// * the actual data passed to the network is replayable. This is important in order to support retries, which are +// * crucial for reliable data transfer. Typically, the greater the number of buffers used, the greater the possible +// * parallelism. Larger buffers means we will have to stage fewer blocks. The tradeoffs between these values are +// * context-dependent, so some experimentation may be required to optimize inputs for a given scenario. +// * +// * Note that buffering must be strictly sequential. Only the upload portion of this operation may be parallelized; +// * the reads cannot be. Therefore, this method is not as optimal as +// * {@link #uploadFileToBlockBlob(AsynchronousFileChannel, BlockBlobURL, int, Integer, TransferManagerUploadToBlockBlobOptions)} +// * and if the source is known to be a file, that method should be preferred. +// * +// * @param source +// * Contains the data to upload. Unlike other upload methods in this library, this method does not require +// * that the Flowable be replayable. +// * @param blockBlobURL +// * Points to the blob to which the data should be uploaded. +// * @param blockSize +// * The size of each block that will be staged. This value also determines the size that each buffer used by +// * this method will be and determines the number of requests that need to be made. The amount of memory +// * consumed by this method may be up to blockSize * numBuffers. If block size is large, this method will +// * make fewer network calls, but each individual call will send more data and will therefore take longer. +// * @param numBuffers +// * The maximum number of buffers this method should allocate. Must be at least two. Generally this value +// * should have some relationship to the value for parallelism passed via the options. If the number of +// * available buffers is smaller than the level of parallelism, then this method will not be able to make +// * full use of the available parallelism. It is unlikely that the value need be more than two times the +// * level of parallelism as such a value means that (assuming buffering is fast enough) there are enough +// * available buffers to have both one occupied for each worker and one ready for all workers should they +// * all complete the current request at approximately the same time. The amount of memory consumed by this +// * method may be up to blockSize * numBuffers. +// * @param options +// * {@link TransferManagerUploadToBlockBlobOptions} +// * @return Emits the successful response. +// * +// * @apiNote ## Sample Code \n +// * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=tm_nrf "Sample code for TransferManager.uploadFromNonReplayableFlowable")] \n +// * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) +// */ +// public static Single uploadFromNonReplayableFlowable( +// final Flowable source, final BlockBlobURL blockBlobURL, final int blockSize, +// final int numBuffers, final TransferManagerUploadToBlockBlobOptions options) { +// Utility.assertNotNull("source", source); +// Utility.assertNotNull("blockBlobURL", blockBlobURL); +// +// TransferManagerUploadToBlockBlobOptions optionsReal = options == null +// ? new TransferManagerUploadToBlockBlobOptions() : options; +// +// // See ProgressReporter for an explanation on why this lock is necessary and why we use AtomicLong. +// AtomicLong totalProgress = new AtomicLong(0); +// Lock progressLock = new ReentrantLock(); +// +// // Validation done in the constructor. +// UploadFromNRFBufferPool pool = new UploadFromNRFBufferPool(numBuffers, blockSize); +// +// /* +// Break the source flowable into chunks that are <= chunk size. This makes filling the pooled buffers much easier +// as we can guarantee we only need at most two buffers for any call to write (two in the case of one pool buffer +// filling up with more data to write) +// */ +// Flowable chunkedSource = source.flatMap(buffer -> { +// if (buffer.remaining() <= blockSize) { +// return Flowable.just(buffer); +// } +// List smallerChunks = new ArrayList<>(); +// for (int i = 0; i < Math.ceil(buffer.remaining() / (double) blockSize); i++) { +// // Note that duplicate does not duplicate data. It simply creates a duplicate view of the data. +// ByteBuffer duplicate = buffer.duplicate(); +// duplicate.position(i * blockSize); +// duplicate.limit(Math.min(duplicate.limit(), (i + 1) * blockSize)); +// smallerChunks.add(duplicate); +// } +// return Flowable.fromIterable(smallerChunks); +// }, false, 1); +// +// /* +// Write each buffer from the chunkedSource to the pool and call flush at the end to get the last bits. +// */ +// return chunkedSource.flatMap(pool::write, false, 1) +// .concatWith(Flowable.defer(pool::flush)) +// .concatMapEager(buffer -> { +// // Report progress as necessary. +// Flowable data = ProgressReporter.addParallelProgressReporting(Flowable.just(buffer), +// optionsReal.progressReceiver(), progressLock, totalProgress); +// +// final String blockId = Base64.getEncoder().encodeToString( +// UUID.randomUUID().toString().getBytes(UTF_8)); +// +// /* +// Make a call to stageBlock. Instead of emitting the response, which we don't care about other +// than that it was successful, emit the blockId for this request. These will be collected below. +// Turn that into an Observable which emits one item to comply with the signature of +// concatMapEager. +// */ +// return blockBlobURL.stageBlock(blockId, data, +// buffer.remaining(), optionsReal.accessConditions().leaseAccessConditions(), null) +// .map(x -> { +// pool.returnBuffer(buffer); +// return blockId; +// }).toFlowable(); +// +// /* +// Specify the number of concurrent subscribers to this map. This determines how many concurrent +// rest calls are made. This is so because maxConcurrency is the number of internal subscribers +// available to subscribe to the Observables emitted by the source. A subscriber is not released +// for a new subscription until its Observable calls onComplete, which here means that the call to +// stageBlock is finished. Prefetch is a hint that each of the Observables emitted by the source +// will emit only one value, which is true here because we have converted from a Single. +// */ +// }, optionsReal.parallelism(), 1) +// /* +// collectInto will gather each of the emitted blockIds into a list. Because we used concatMap, the Ids +// will be emitted according to their block number, which means the list generated here will be +// properly ordered. This also converts into a Single. +// */ +// .collectInto(new ArrayList(), ArrayList::add) +// /* +// collectInto will not emit the list until its source calls onComplete. This means that by the time we +// call stageBlock list, all of the stageBlock calls will have finished. By flatMapping the list, we +// can "map" it into a call to commitBlockList. +// */ +// .flatMap(ids -> +// blockBlobURL.commitBlockList(ids, optionsReal.httpHeaders(), optionsReal.metadata(), +// optionsReal.accessConditions(), null)); +// +// } +//} diff --git a/storage/client/src/main/java/com/azure/storage/blob/TransferManagerDownloadFromBlobOptions.java b/storage/client/src/main/java/com/azure/storage/blob/TransferManagerDownloadFromBlobOptions.java new file mode 100644 index 0000000000000..a4c621a4c3cbd --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/TransferManagerDownloadFromBlobOptions.java @@ -0,0 +1,105 @@ +//// Copyright (c) Microsoft Corporation. All rights reserved. +//// Licensed under the MIT License. +// +//package com.azure.storage.blob; +// +///** +// * Configures the parallel download behavior for methods on the {@link TransferManager}. +// */ +//public final class TransferManagerDownloadFromBlobOptions { +// +// private final long chunkSize; +// +// private final IProgressReceiver progressReceiver; +// +// private final int parallelism; +// +// private final ReliableDownloadOptions reliableDownloadOptionsPerBlock; +// +// // Cannot be final because we may have to set this property in order to lock on the etag. +// private BlobAccessConditions accessConditions; +// +// public TransferManagerDownloadFromBlobOptions() { +// this(null, null, null, null, null); +// } +// +// /** +// * Returns an object that configures the parallel download behavior for methods on the {@link TransferManager}. +// * +// * @param chunkSize +// * The size of the chunk into which large download operations will be broken into. Note that if the +// * chunkSize is large, fewer but larger requests will be made as each REST request will download a +// * single chunk in full. For larger chunk sizes, it may be helpful to configure the +// * {@code reliableDownloadOptions} to allow more retries. +// * @param progressReceiver +// * {@link IProgressReceiver} +// * @param accessConditions +// * {@link BlobAccessConditions} +// * @param reliableDownloadOptions +// * {@link ReliableDownloadOptions} +// * @param parallelism +// * A {@code int} that indicates the maximum number of chunks to download in parallel. Must be greater +// * than 0. May be null to accept default behavior. +// */ +// public TransferManagerDownloadFromBlobOptions(Long chunkSize, IProgressReceiver progressReceiver, +// BlobAccessConditions accessConditions, ReliableDownloadOptions reliableDownloadOptions, +// Integer parallelism) { +// this.progressReceiver = progressReceiver; +// +// if (chunkSize != null) { +// Utility.assertInBounds("chunkSize", chunkSize, 1, Long.MAX_VALUE); +// this.chunkSize = chunkSize; +// } else { +// this.chunkSize = TransferManager.BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE; +// } +// +// if (parallelism != null) { +// Utility.assertInBounds("parallelism", parallelism, 1, Integer.MAX_VALUE); +// this.parallelism = parallelism; +// } else { +// this.parallelism = Constants.TRANSFER_MANAGER_DEFAULT_PARALLELISM; +// } +// +// this.accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; +// this.reliableDownloadOptionsPerBlock = reliableDownloadOptions == null +// ? new ReliableDownloadOptions() : reliableDownloadOptions; +// } +// +// /** +// * The size of the chunk into which large download operations will be broken into. Note that if the chunkSize is +// * large, fewer but larger requests will be made as each REST request will download a single chunk in full. For +// * larger chunk sizes, it may be helpful to configure the{@code reliableDownloadOptions} to allow more retries. +// */ +// public long chunkSize() { +// return chunkSize; +// } +// +// /** +// * {@link IProgressReceiver} +// */ +// public IProgressReceiver progressReceiver() { +// return progressReceiver; +// } +// +// /** +// * A {@code int} that indicates the maximum number of chunks to download in parallel. Must be greater than 0. May be +// * null to accept default behavior. +// */ +// public int parallelism() { +// return parallelism; +// } +// +// /** +// * {@link ReliableDownloadOptions} +// */ +// public ReliableDownloadOptions reliableDownloadOptionsPerBlock() { +// return reliableDownloadOptionsPerBlock; +// } +// +// /** +// * {@link BlobAccessConditions} +// */ +// public BlobAccessConditions accessConditions() { +// return accessConditions; +// } +//} diff --git a/storage/client/src/main/java/com/azure/storage/blob/TransferManagerUploadToBlockBlobOptions.java b/storage/client/src/main/java/com/azure/storage/blob/TransferManagerUploadToBlockBlobOptions.java new file mode 100644 index 0000000000000..279f2eba3f239 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/TransferManagerUploadToBlockBlobOptions.java @@ -0,0 +1,96 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.storage.blob.models.BlobHTTPHeaders; + +/** + * Configures the parallel upload behavior for methods on the {@link TransferManager}. + */ +class TransferManagerUploadToBlockBlobOptions { + + private final IProgressReceiver progressReceiver; + + private final BlobHTTPHeaders httpHeaders; + + private final Metadata metadata; + + private final BlobAccessConditions accessConditions; + + private final int parallelism; + + public TransferManagerUploadToBlockBlobOptions() { + this(null, null, null, null, null); + } + + /** + * Creates a new object that configures the parallel upload behavior. Null may be passed to accept the default + * behavior. + * + * @param progressReceiver + * {@link IProgressReceiver} + * @param httpHeaders + * Most often used when creating a blob or setting its properties, this class contains fields for typical + * HTTP properties, which, if specified, will be attached to the target blob. Null may be passed to any API. + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link BlobAccessConditions} + * @param parallelism + * Indicates the maximum number of blocks to upload in parallel. Must be greater than 0. + * May be null to accept default behavior. + */ + public TransferManagerUploadToBlockBlobOptions(IProgressReceiver progressReceiver, BlobHTTPHeaders httpHeaders, + Metadata metadata, BlobAccessConditions accessConditions, Integer parallelism) { + this.progressReceiver = progressReceiver; + if (parallelism != null) { + Utility.assertInBounds("parallelism", parallelism, 0, Integer.MAX_VALUE); + this.parallelism = parallelism; + } else { + this.parallelism = Constants.TRANSFER_MANAGER_DEFAULT_PARALLELISM; + } + + this.httpHeaders = httpHeaders; + this.metadata = metadata; + this.accessConditions = accessConditions == null ? new BlobAccessConditions() : accessConditions; + } + + /** + * {@link IProgressReceiver} + */ + public IProgressReceiver progressReceiver() { + return progressReceiver; + } + + /** + * Most often used when creating a blob or setting its properties, this class contains fields for typical HTTP + * properties, which, if specified, will be attached to the target blob. Null may be passed to any API. + */ + public BlobHTTPHeaders httpHeaders() { + return httpHeaders; + } + + /** + * {@link Metadata} + */ + public Metadata metadata() { + return metadata; + } + + /** + * {@link BlobAccessConditions} + */ + public BlobAccessConditions accessConditions() { + return accessConditions; + } + + /** + * A {@code int} that indicates the maximum number of blocks to upload in parallel. Must be greater than 0. May be + * null to accept default behavior. + */ + public int parallelism() { + return parallelism; + } + +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/URLParser.java b/storage/client/src/main/java/com/azure/storage/blob/URLParser.java new file mode 100644 index 0000000000000..46808f88142c3 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/URLParser.java @@ -0,0 +1,131 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import java.net.URL; +import java.net.UnknownHostException; +import java.util.Comparator; +import java.util.Locale; +import java.util.Map; +import java.util.TreeMap; + +/** + * A class used to conveniently parse URLs into {@link BlobURLParts} to modify the components of the URL. + */ +final class URLParser { + + /** + * URLParser parses a URL initializing BlobURLParts' fields including any SAS-related and snapshot query parameters. + * Any other query parameters remain in the UnparsedParams field. This method overwrites all fields in the + * BlobURLParts object. + * + * @param url + * The {@code URL} to be parsed. + * + * @return A {@link BlobURLParts} object containing all the components of a BlobURL. + * + * @throws UnknownHostException + * If the url contains an improperly formatted ipaddress or unknown host address. + */ + public static BlobURLParts parse(URL url) throws UnknownHostException { + + final String scheme = url.getProtocol(); + final String host = url.getHost(); + + String containerName = null; + String blobName = null; + + // find the container & blob names (if any) + String path = url.getPath(); + if (!Utility.isNullOrEmpty(path)) { + // if the path starts with a slash remove it + if (path.charAt(0) == '/') { + path = path.substring(1); + } + + int containerEndIndex = path.indexOf('/'); + if (containerEndIndex == -1) { + // path contains only a container name and no blob name + containerName = path; + } else { + // path contains the container name up until the slash and blob name is everything after the slash + containerName = path.substring(0, containerEndIndex); + blobName = path.substring(containerEndIndex + 1); + } + } + Map queryParamsMap = parseQueryString(url.getQuery()); + + String snapshot = null; + String[] snapshotArray = queryParamsMap.get("snapshot"); + if (snapshotArray != null) { + snapshot = snapshotArray[0]; + queryParamsMap.remove("snapshot"); + } + + SASQueryParameters sasQueryParameters = new SASQueryParameters(queryParamsMap, true); + + return new BlobURLParts() + .withScheme(scheme) + .withHost(host) + .withContainerName(containerName) + .withBlobName(blobName) + .withSnapshot(snapshot) + .withSasQueryParameters(sasQueryParameters) + .withUnparsedParameters(queryParamsMap); + } + + /** + * Parses a query string into a one to many hashmap. + * + * @param queryParams + * The string of query params to parse. + * + * @return A {@code HashMap} of the key values. + */ + private static TreeMap parseQueryString(String queryParams) { + + final TreeMap retVals = new TreeMap(new Comparator() { + @Override + public int compare(String s1, String s2) { + return s1.compareTo(s2); + } + }); + + if (Utility.isNullOrEmpty(queryParams)) { + return retVals; + } + + // split name value pairs by splitting on the 'c&' character + final String[] valuePairs = queryParams.split("&"); + + // for each field value pair parse into appropriate map entries + for (int m = 0; m < valuePairs.length; m++) { + // Getting key and value for a single query parameter + final int equalDex = valuePairs[m].indexOf("="); + String key = Utility.safeURLDecode(valuePairs[m].substring(0, equalDex)).toLowerCase(Locale.ROOT); + String value = Utility.safeURLDecode(valuePairs[m].substring(equalDex + 1)); + + // add to map + String[] keyValues = retVals.get(key); + + // check if map already contains key + if (keyValues == null) { + // map does not contain this key + keyValues = new String[]{value}; + } else { + // map contains this key already so append + final String[] newValues = new String[keyValues.length + 1]; + for (int j = 0; j < keyValues.length; j++) { + newValues[j] = keyValues[j]; + } + + newValues[newValues.length - 1] = value; + keyValues = newValues; + } + retVals.put(key, keyValues); + } + + return retVals; + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/UploadFromNRFBufferPool.java b/storage/client/src/main/java/com/azure/storage/blob/UploadFromNRFBufferPool.java new file mode 100644 index 0000000000000..e238a54e2bd2f --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/UploadFromNRFBufferPool.java @@ -0,0 +1,156 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import reactor.core.publisher.Flux; + +import java.nio.ByteBuffer; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; + +/** + * This type is to support the implementation of uploadFromNonReplaybleFlowable only. It is mandatory that the caller + * has broken the source into ByteBuffers that are no greater than the size of a chunk and therefore a buffer in the + * pool. This is necessary because it upper bounds the number of buffers we need for a given call to write() to 2. If + * the size of ByteBuffer passed into write() were unbounded, the pool could stall as it would run out of buffers before + * it is able to return a result, and if it is unable to return, no data can be uploaded and therefore no pools + * returned. + * + * It is incumbent upon the caller to return the buffers after an upload is completed. It is also the caller's + * responsibility to signal to the pool when the stream is empty and call flush to return any data still sitting in + * the pool. + * + * Broadly, the workflow of this operation is to chunk the source into reasonable sized pieces. On each piece, one + * thread will call write on the pool. The pool will grab a buffer from the queue to write to, possibly waiting for one + * to be available, and either store the incomplete buffer to be filled on the next write or return the buffer to be + * sent. Filled buffers can be uploaded in parallel and should return buffers to the pool after the upload completes. + * Once the source terminates, it should call flush. + */ +final class UploadFromNRFBufferPool { + + private final BlockingQueue buffers; + + private final int maxBuffs; + + private int numBuffs = 0; + + private final int buffSize; + + private ByteBuffer currentBuf; + + UploadFromNRFBufferPool(final int numBuffs, final int buffSize) { + /* + We require at least two buffers because it is possible that a given write will spill over into a second buffer. + We only need one overflow buffer because the max size of a ByteBuffer is assumed to be the size as a buffer in + the pool. + */ + Utility.assertInBounds("numBuffs", numBuffs, 2, Integer.MAX_VALUE); + this.maxBuffs = numBuffs; + buffers = new LinkedBlockingQueue<>(numBuffs); + + + //These buffers will be used in calls to stageBlock, so they must be no greater than block size. + Utility.assertInBounds("buffSize", buffSize, 1, BlockBlobAsyncClient.MAX_STAGE_BLOCK_BYTES); + this.buffSize = buffSize; + + //We prep the queue with two buffers in case there is overflow. + buffers.add(ByteBuffer.allocate(this.buffSize)); + buffers.add(ByteBuffer.allocate(this.buffSize)); + this.numBuffs = 2; + } + + public Flux write(ByteBuffer buf) { + // Check if there's a buffer holding any data from a previous call to write. If not, get a new one. + if (this.currentBuf == null) { + this.currentBuf = this.getBuffer(); + } + + Flux result; + // We can fit this whole write in the buffer we currently have. + if (this.currentBuf.remaining() >= buf.remaining()) { + this.currentBuf.put(buf); + if (this.currentBuf.remaining() == 0) { + // Reset the position so that we can read the whole thing then return this buffer. + this.currentBuf.position(0); + result = Flux.just(this.currentBuf); + // This will force us to get a new buffer next time we try to write. + this.currentBuf = null; + } else { + /* + We are still filling the current buffer, so we have no data to return. We will return the buffer once it + is filled + */ + result = Flux.empty(); + } + } else { + // We will overflow the current buffer and require another one. + // Adjust the window of buf so that we fill up currentBuf without going out of bounds. + int oldLimit = buf.limit(); + buf.limit(buf.position() + this.currentBuf.remaining()); + this.currentBuf.put(buf); + // Set the old limit so we can read to the end in the next buffer. + buf.limit(oldLimit); + + // Reset the position so we can read the buffer. + this.currentBuf.position(0); + result = Flux.just(this.currentBuf); + + /* + Get a new buffer and fill it with whatever is left from buf. Note that this relies on the assumption that + the source Flowable has been split up into buffers that are no bigger than chunk size. This assumption + means we'll only have to over flow once, and the buffer we overflow into will not be filled. This is the + buffer we will write to on the next call to write(). + */ + this.currentBuf = this.getBuffer(); + this.currentBuf.put(buf); + } + return result; + } + + private ByteBuffer getBuffer() { + ByteBuffer result; + // There are no buffers in the queue and we have space to allocate one. + if (this.buffers.isEmpty() && this.numBuffs < this.maxBuffs) { + result = ByteBuffer.allocate(this.buffSize); + this.numBuffs++; + } else { + try { + // If empty, this will wait for an upload to finish and return a buffer. + result = this.buffers.take(); + + } catch (InterruptedException e) { + throw new IllegalStateException("UploadFromStream thread interrupted." + " Thread:" + + Thread.currentThread().getId()); + } + } + return result; + } + + Flux flush() { + /* + Prep and return any data left in the pool. It is important to set the limit so that we don't read beyond the + actual data as this buffer may have been used before and therefore may have some garbage at the end. + */ + if (this.currentBuf != null) { + this.currentBuf.flip(); + ByteBuffer last = this.currentBuf; + // If there is an accidental duplicate call to flush, this prevents sending the last buffer twice + this.currentBuf = null; + return Flux.just(last); + } + return Flux.empty(); + } + + void returnBuffer(ByteBuffer b) { + // Reset the buffer. + b.position(0); + b.limit(b.capacity()); + + try { + this.buffers.put(b); + } catch (InterruptedException e) { + throw new IllegalStateException("UploadFromStream thread interrupted."); + } + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/Utility.java b/storage/client/src/main/java/com/azure/storage/blob/Utility.java new file mode 100644 index 0000000000000..6a5616e362c8e --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/Utility.java @@ -0,0 +1,336 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob; + +import com.azure.core.implementation.http.UrlBuilder; +import com.azure.storage.blob.models.StorageErrorException; +import com.azure.storage.blob.models.UserDelegationKey; +import reactor.core.publisher.Mono; + +import javax.crypto.Mac; +import javax.crypto.spec.SecretKeySpec; +import java.io.UnsupportedEncodingException; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.net.MalformedURLException; +import java.net.URL; +import java.net.URLDecoder; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; +import java.security.InvalidKeyException; +import java.security.NoSuchAlgorithmException; +import java.time.LocalDateTime; +import java.time.OffsetDateTime; +import java.time.ZoneId; +import java.time.format.DateTimeFormatter; +import java.util.Base64; +import java.util.Locale; + +final class Utility { + + static final DateTimeFormatter RFC_1123_GMT_DATE_FORMATTER = + DateTimeFormatter.ofPattern("EEE, dd MMM yyyy HH:mm:ss z", Locale.ROOT).withZone(ZoneId.of("GMT")); + + static final DateTimeFormatter ISO_8601_UTC_DATE_FORMATTER = + DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss'Z'", Locale.ROOT).withZone(ZoneId.of("UTC")); + /** + * Stores a reference to the UTC time zone. + */ + static final ZoneId UTC_ZONE = ZoneId.of("UTC"); + /** + * Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing. + */ + private static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS"; + /** + * Stores a reference to the ISO8601 date/time pattern. + */ + private static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'"; + /** + * Stores a reference to the ISO8601 date/time pattern. + */ + private static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'"; + /** + * The length of a datestring that matches the MAX_PRECISION_PATTERN. + */ + private static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "").length(); + + /** + * Asserts that a value is not null. + * + * @param param + * A {@code String} that represents the name of the parameter, which becomes the exception message + * text if the value parameter is null. + * @param value + * An Object object that represents the value of the specified parameter. This is the value + * being asserted as not null. + */ + static void assertNotNull(final String param, final Object value) { + if (value == null) { + throw new IllegalArgumentException(String.format(Locale.ROOT, SR.ARGUMENT_NULL_OR_EMPTY, param)); + } + } + + /** + * Returns a value that indicates whether the specified string is null or empty. + * + * @param value + * A {@code String} being examined for null or empty. + * + * @return true if the specified value is null or empty; otherwise, false + */ + static boolean isNullOrEmpty(final String value) { + return value == null || value.length() == 0; + } + + /** + * Performs safe decoding of the specified string, taking care to preserve each + character, rather + * than replacing it with a space character. + * + * @param stringToDecode + * A {@code String} that represents the string to decode. + * + * @return A {@code String} that represents the decoded string. + */ + static String safeURLDecode(final String stringToDecode) { + if (stringToDecode.length() == 0) { + return Constants.EMPTY_STRING; + } + + // '+' are decoded as ' ' so preserve before decoding + if (stringToDecode.contains("+")) { + final StringBuilder outBuilder = new StringBuilder(); + + int startDex = 0; + for (int m = 0; m < stringToDecode.length(); m++) { + if (stringToDecode.charAt(m) == '+') { + if (m > startDex) { + try { + outBuilder.append(URLDecoder.decode(stringToDecode.substring(startDex, m), + Constants.UTF8_CHARSET)); + } catch (UnsupportedEncodingException e) { + throw new Error(e); + } + } + + outBuilder.append("+"); + startDex = m + 1; + } + } + + if (startDex != stringToDecode.length()) { + try { + outBuilder.append(URLDecoder.decode(stringToDecode.substring(startDex, stringToDecode.length()), + Constants.UTF8_CHARSET)); + } catch (UnsupportedEncodingException e) { + throw new Error(e); + } + } + + return outBuilder.toString(); + } else { + try { + return URLDecoder.decode(stringToDecode, Constants.UTF8_CHARSET); + } catch (UnsupportedEncodingException e) { + throw new Error(e); + } + } + } + + /** + * Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it + * with up to millisecond precision. + * + * @param dateString + * the {@code String} to be interpreted as a Date + * + * @return the corresponding Date object + */ + public static OffsetDateTime parseDate(String dateString) { + String pattern = MAX_PRECISION_PATTERN; + switch (dateString.length()) { + case 28: // "yyyy-MM-dd'T'HH:mm:ss.SSSSSSS'Z'"-> [2012-01-04T23:21:59.1234567Z] length = 28 + case 27: // "yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"-> [2012-01-04T23:21:59.123456Z] length = 27 + case 26: // "yyyy-MM-dd'T'HH:mm:ss.SSSSS'Z'"-> [2012-01-04T23:21:59.12345Z] length = 26 + case 25: // "yyyy-MM-dd'T'HH:mm:ss.SSSS'Z'"-> [2012-01-04T23:21:59.1234Z] length = 25 + case 24: // "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"-> [2012-01-04T23:21:59.123Z] length = 24 + dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH); + break; + case 23: // "yyyy-MM-dd'T'HH:mm:ss.SS'Z'"-> [2012-01-04T23:21:59.12Z] length = 23 + // SS is assumed to be milliseconds, so a trailing 0 is necessary + dateString = dateString.replace("Z", "0"); + break; + case 22: // "yyyy-MM-dd'T'HH:mm:ss.S'Z'"-> [2012-01-04T23:21:59.1Z] length = 22 + // S is assumed to be milliseconds, so trailing 0's are necessary + dateString = dateString.replace("Z", "00"); + break; + case 20: // "yyyy-MM-dd'T'HH:mm:ss'Z'"-> [2012-01-04T23:21:59Z] length = 20 + pattern = Utility.ISO8601_PATTERN; + break; + case 17: // "yyyy-MM-dd'T'HH:mm'Z'"-> [2012-01-04T23:21Z] length = 17 + pattern = Utility.ISO8601_PATTERN_NO_SECONDS; + break; + default: + throw new IllegalArgumentException(String.format(Locale.ROOT, SR.INVALID_DATE_STRING, dateString)); + } + + DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT); + return LocalDateTime.parse(dateString, formatter).atZone(UTC_ZONE).toOffsetDateTime(); + } + + /** + * Asserts that the specified integer is in the valid range. + * + * @param param + * A String that represents the name of the parameter, which becomes the exception message + * text if the value parameter is out of bounds. + * @param value + * The value of the specified parameter. + * @param min + * The minimum value for the specified parameter. + * @param max + * The maximum value for the specified parameter. + */ + public static void assertInBounds(final String param, final long value, final long min, final long max) { + if (value < min || value > max) { + throw new IllegalArgumentException(String.format(Locale.ROOT, SR.PARAMETER_NOT_IN_RANGE, param, min, max)); + } + } + + /** + * Performs safe encoding of the specified string, taking care to insert %20 for each space character, + * instead of inserting the + character. + */ + static String safeURLEncode(final String stringToEncode) { + if (stringToEncode == null) { + return null; + } + if (stringToEncode.length() == 0) { + return Constants.EMPTY_STRING; + } + + try { + final String tString = URLEncoder.encode(stringToEncode, Constants.UTF8_CHARSET); + + if (stringToEncode.contains(" ")) { + final StringBuilder outBuilder = new StringBuilder(); + + int startDex = 0; + for (int m = 0; m < stringToEncode.length(); m++) { + if (stringToEncode.charAt(m) == ' ') { + if (m > startDex) { + outBuilder.append(URLEncoder.encode(stringToEncode.substring(startDex, m), + Constants.UTF8_CHARSET)); + } + + outBuilder.append("%20"); + startDex = m + 1; + } + } + + if (startDex != stringToEncode.length()) { + outBuilder.append(URLEncoder.encode(stringToEncode.substring(startDex, stringToEncode.length()), + Constants.UTF8_CHARSET)); + } + + return outBuilder.toString(); + } else { + return tString; + } + + } catch (final UnsupportedEncodingException e) { + throw new Error(e); // If we can't encode UTF-8, we fail. + } + } + + static Mono postProcessResponse(Mono s) { + s = addErrorWrappingToSingle(s); + s = scrubEtagHeaderInResponse(s); + return s; + } + + /* + We need to convert the generated StorageErrorException to StorageException, which has a cleaner interface and + methods to conveniently access important values. + */ + private static Mono addErrorWrappingToSingle(Mono s) { + return s.onErrorResume( + StorageErrorException.class, + e -> e.response().bodyAsString().flatMap(body -> Mono.error(new StorageException(e, body)))); + } + + /* + The service is inconsistent in whether or not the etag header value has quotes. This method will check if the + response returns an etag value, and if it does, remove any quotes that may be present to give the user a more + predictable format to work with. + */ + private static Mono scrubEtagHeaderInResponse(Mono s) { + return s.map(response -> { + try { + Object headers = response.getClass().getMethod("headers").invoke(response); + Method etagGetterMethod = headers.getClass().getMethod("eTag"); + String etag = (String) etagGetterMethod.invoke(headers); + // CommitBlockListHeaders has an etag property, but it's only set if the blob has committed blocks. + if (etag == null) { + return response; + } + etag = etag.replace("\"", ""); // Etag headers without the quotes will be unaffected. + headers.getClass().getMethod("withETag", String.class).invoke(headers, etag); + } catch (NoSuchMethodException e) { + // Response did not return an eTag value. No change necessary. + } catch (IllegalAccessException | InvocationTargetException e) { + //TODO validate this won't throw + } + return response; + }); + } + + /** + * Computes a signature for the specified string using the HMAC-SHA256 algorithm. + * + * @param delegate + * Key used to sign + * @param stringToSign + * The UTF-8-encoded string to sign. + * + * @return A {@code String} that contains the HMAC-SHA256-encoded signature. + * + * @throws InvalidKeyException + * If the accountKey is not a valid Base64-encoded string. + */ + static String delegateComputeHmac256(final UserDelegationKey delegate, String stringToSign) throws InvalidKeyException { + try { + byte[] key = Base64.getDecoder().decode(delegate.value()); + Mac hmacSha256 = Mac.getInstance("HmacSHA256"); + hmacSha256.init(new SecretKeySpec(key, "HmacSHA256")); + byte[] utf8Bytes = stringToSign.getBytes(StandardCharsets.UTF_8); + return Base64.getEncoder().encodeToString(hmacSha256.doFinal(utf8Bytes)); + } catch (final NoSuchAlgorithmException e) { + throw new Error(e); + } + } + + /** + * Appends a string to the end of a URL's path (prefixing the string with a '/' if required). + * + * @param baseURL + * The url to which the name should be appended. + * @param name + * The name to be appended. + * + * @return A url with the name appended. + * + * @throws MalformedURLException + * Appending the specified name produced an invalid URL. + */ + static URL appendToURLPath(URL baseURL, String name) throws MalformedURLException { + UrlBuilder url = UrlBuilder.parse(baseURL.toString()); + if (url.path() == null) { + url.withPath("/"); // .path() will return null if it is empty, so we have to process separately from below. + } else if (url.path().charAt(url.path().length() - 1) != '/') { + url.withPath(url.path() + '/'); + } + url.withPath(url.path() + name); + return new URL(url.toString()); + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/implementation/AzureBlobStorageBuilder.java b/storage/client/src/main/java/com/azure/storage/blob/implementation/AzureBlobStorageBuilder.java new file mode 100644 index 0000000000000..c77d984c4e928 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/implementation/AzureBlobStorageBuilder.java @@ -0,0 +1,83 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) AutoRest Code Generator. + +package com.azure.storage.blob.implementation; + +import com.azure.core.http.HttpPipeline; +import com.azure.core.implementation.RestProxy; + +/** + * A builder for creating a new instance of the AzureBlobStorage type. + */ +public final class AzureBlobStorageBuilder { + /* + * The URL of the service account, container, or blob that is the targe of the desired operation. + */ + private String url; + + /** + * Sets The URL of the service account, container, or blob that is the targe of the desired operation. + * + * @param url the url value. + * @return the AzureBlobStorageBuilder. + */ + public AzureBlobStorageBuilder url(String url) { + this.url = url; + return this; + } + + /* + * Specifies the version of the operation to use for this request. + */ + private String version; + + /** + * Sets Specifies the version of the operation to use for this request. + * + * @param version the version value. + * @return the AzureBlobStorageBuilder. + */ + public AzureBlobStorageBuilder version(String version) { + this.version = version; + return this; + } + + /* + * The HTTP pipeline to send requests through + */ + private HttpPipeline pipeline; + + /** + * Sets The HTTP pipeline to send requests through. + * + * @param pipeline the pipeline value. + * @return the AzureBlobStorageBuilder. + */ + public AzureBlobStorageBuilder pipeline(HttpPipeline pipeline) { + this.pipeline = pipeline; + return this; + } + + /** + * Builds an instance of AzureBlobStorageImpl with the provided parameters. + * + * @return an instance of AzureBlobStorageImpl. + */ + public AzureBlobStorageImpl build() { + if (version == null) { + this.version = "2018-11-09"; + } + if (pipeline == null) { + this.pipeline = RestProxy.createDefaultPipeline(); + } + AzureBlobStorageImpl client = new AzureBlobStorageImpl(pipeline); + if (this.url != null) { + client.url(this.url); + } + if (this.version != null) { + client.version(this.version); + } + return client; + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/implementation/AzureBlobStorageImpl.java b/storage/client/src/main/java/com/azure/storage/blob/implementation/AzureBlobStorageImpl.java index 1f54d0ea8e0a7..6702b82da14ea 100644 --- a/storage/client/src/main/java/com/azure/storage/blob/implementation/AzureBlobStorageImpl.java +++ b/storage/client/src/main/java/com/azure/storage/blob/implementation/AzureBlobStorageImpl.java @@ -32,7 +32,7 @@ public String url() { * @param url the url value. * @return the service client itself. */ - public AzureBlobStorageImpl withUrl(String url) { + AzureBlobStorageImpl url(String url) { this.url = url; return this; } @@ -57,7 +57,7 @@ public String version() { * @param version the version value. * @return the service client itself. */ - public AzureBlobStorageImpl withVersion(String version) { + AzureBlobStorageImpl version(String version) { this.version = version; return this; } @@ -160,7 +160,6 @@ public AzureBlobStorageImpl() { */ public AzureBlobStorageImpl(HttpPipeline httpPipeline) { super(httpPipeline); - this.version = "2018-11-09"; this.services = new ServicesImpl(this); this.containers = new ContainersImpl(this); this.blobs = new BlobsImpl(this); diff --git a/storage/client/src/main/java/com/azure/storage/blob/implementation/BlobsImpl.java b/storage/client/src/main/java/com/azure/storage/blob/implementation/BlobsImpl.java index 44a1d90ab3b0c..8dac0f9bdbfa5 100644 --- a/storage/client/src/main/java/com/azure/storage/blob/implementation/BlobsImpl.java +++ b/storage/client/src/main/java/com/azure/storage/blob/implementation/BlobsImpl.java @@ -90,6 +90,7 @@ private interface BlobsService { @HEAD("{containerName}/{blob}") @ExpectedResponses({200, 304}) + @UnexpectedResponseExceptionType(StorageErrorException.class) Mono getProperties(@PathParam("containerName") String containerName, @PathParam("blob") String blob, @HostParam("url") String url, @QueryParam("snapshot") String snapshot, @QueryParam("versionid") String versionId, @QueryParam("timeout") Integer timeout, @QueryParam("x-ms-encryption-key") String encryptionKey, @QueryParam("x-ms-encryption-key-sha256") String encryptionKeySha256, @QueryParam("x-ms-encryption-algorithm") EncryptionAlgorithmType encryptionAlgorithm, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @HeaderParam("x-ms-lease-id") String leaseId, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince, @HeaderParam("If-Match") String ifMatch, @HeaderParam("If-None-Match") String ifNoneMatch, Context context); @DELETE("{containerName}/{blob}") @@ -311,7 +312,7 @@ public Mono getPropertiesWithRestResponseAsync(Strin } /** - * If the storage account's soft delete feature is disabled then, when a blob is deleted, it is permanently removed from the storage account. If the storage account's soft delete feature is enabled, then, when a blob is deleted, it is marked for deletion and becomes inaccessible immediately. However, the blob service retains the blob or snapshot for the number of days specified by the DeleteRetentionPolicy section of [Storage service properties] (Set-Blob-Service-Properties.md). After the specified number of days has passed, the blob's data is permanently removed from the storage account. Note that you continue to be charged for the soft-deleted blob's storage until it is permanently removed. Use the List Blobs API and specify the "include=deleted" query parameter to discover which blobs and snapshots have been soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. All other operations on a soft-deleted blob or snapshot causes the service to return an HTTP status code of 404 (ResourceNotFound). If the storage account's automatic snapshot feature is enabled, then, when a blob is deleted, an automatic snapshot is created. The blob becomes inaccessible immediately. All other operations on the blob causes the service to return an HTTP status code of 404 (ResourceNotFound). You can access automatic snapshot using snapshot timestamp or version id. You can restore the blob by calling Put or Copy Blob API with automatic snapshot as source. Deleting automatic snapshot requires shared key or special SAS/RBAC permissions. + * If the storage account's soft delete feature is disabled then, when a blob is deleted, it is permanently removed from the storage account. If the storage account's soft delete feature is enabled, then, when a blob is deleted, it is marked for deletion and becomes inaccessible immediately. However, the storage service retains the blob or snapshot for the number of days specified by the DeleteRetentionPolicy section of [Storage service properties] (Set-Blob-Service-Properties.md). After the specified number of days has passed, the blob's data is permanently removed from the storage account. Note that you continue to be charged for the soft-deleted blob's storage until it is permanently removed. Use the List Blobs API and specify the "include=deleted" query parameter to discover which blobs and snapshots have been soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. All other operations on a soft-deleted blob or snapshot causes the service to return an HTTP status code of 404 (ResourceNotFound). If the storage account's automatic snapshot feature is enabled, then, when a blob is deleted, an automatic snapshot is created. The blob becomes inaccessible immediately. All other operations on the blob causes the service to return an HTTP status code of 404 (ResourceNotFound). You can access automatic snapshot using snapshot timestamp or version id. You can restore the blob by calling Put or Copy Blob API with automatic snapshot as source. Deleting automatic snapshot requires shared key or special SAS/RBAC permissions. * * @param containerName The container name. * @param blob The blob name. @@ -334,7 +335,7 @@ public Mono deleteWithRestResponseAsync(String containerNam } /** - * If the storage account's soft delete feature is disabled then, when a blob is deleted, it is permanently removed from the storage account. If the storage account's soft delete feature is enabled, then, when a blob is deleted, it is marked for deletion and becomes inaccessible immediately. However, the blob service retains the blob or snapshot for the number of days specified by the DeleteRetentionPolicy section of [Storage service properties] (Set-Blob-Service-Properties.md). After the specified number of days has passed, the blob's data is permanently removed from the storage account. Note that you continue to be charged for the soft-deleted blob's storage until it is permanently removed. Use the List Blobs API and specify the "include=deleted" query parameter to discover which blobs and snapshots have been soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. All other operations on a soft-deleted blob or snapshot causes the service to return an HTTP status code of 404 (ResourceNotFound). If the storage account's automatic snapshot feature is enabled, then, when a blob is deleted, an automatic snapshot is created. The blob becomes inaccessible immediately. All other operations on the blob causes the service to return an HTTP status code of 404 (ResourceNotFound). You can access automatic snapshot using snapshot timestamp or version id. You can restore the blob by calling Put or Copy Blob API with automatic snapshot as source. Deleting automatic snapshot requires shared key or special SAS/RBAC permissions. + * If the storage account's soft delete feature is disabled then, when a blob is deleted, it is permanently removed from the storage account. If the storage account's soft delete feature is enabled, then, when a blob is deleted, it is marked for deletion and becomes inaccessible immediately. However, the storage service retains the blob or snapshot for the number of days specified by the DeleteRetentionPolicy section of [Storage service properties] (Set-Blob-Service-Properties.md). After the specified number of days has passed, the blob's data is permanently removed from the storage account. Note that you continue to be charged for the soft-deleted blob's storage until it is permanently removed. Use the List Blobs API and specify the "include=deleted" query parameter to discover which blobs and snapshots have been soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. All other operations on a soft-deleted blob or snapshot causes the service to return an HTTP status code of 404 (ResourceNotFound). If the storage account's automatic snapshot feature is enabled, then, when a blob is deleted, an automatic snapshot is created. The blob becomes inaccessible immediately. All other operations on the blob causes the service to return an HTTP status code of 404 (ResourceNotFound). You can access automatic snapshot using snapshot timestamp or version id. You can restore the blob by calling Put or Copy Blob API with automatic snapshot as source. Deleting automatic snapshot requires shared key or special SAS/RBAC permissions. * * @param containerName The container name. * @param blob The blob name. diff --git a/storage/client/src/main/java/com/azure/storage/blob/models/AppendBlobAppendBlockFromUrlHeaders.java b/storage/client/src/main/java/com/azure/storage/blob/models/AppendBlobAppendBlockFromUrlHeaders.java index 3e23a6fc7e1b5..8273338cb6676 100644 --- a/storage/client/src/main/java/com/azure/storage/blob/models/AppendBlobAppendBlockFromUrlHeaders.java +++ b/storage/client/src/main/java/com/azure/storage/blob/models/AppendBlobAppendBlockFromUrlHeaders.java @@ -5,10 +5,10 @@ package com.azure.storage.blob.models; import com.azure.core.implementation.DateTimeRfc1123; +import com.azure.core.implementation.util.ImplUtils; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; import java.time.OffsetDateTime; -import java.util.Arrays; /** * Defines headers for AppendBlockFromUrl operation. @@ -146,7 +146,7 @@ public AppendBlobAppendBlockFromUrlHeaders lastModified(OffsetDateTime lastModif * @return the contentMD5 value. */ public byte[] contentMD5() { - return Arrays.copyOf(this.contentMD5, this.contentMD5.length); + return ImplUtils.clone(this.contentMD5); } /** @@ -158,7 +158,7 @@ public byte[] contentMD5() { * @return the AppendBlobAppendBlockFromUrlHeaders object itself. */ public AppendBlobAppendBlockFromUrlHeaders contentMD5(byte[] contentMD5) { - this.contentMD5 = Arrays.copyOf(contentMD5, contentMD5.length); + this.contentMD5 = ImplUtils.clone(contentMD5); return this; } diff --git a/storage/client/src/main/java/com/azure/storage/blob/models/AppendBlobAppendBlockHeaders.java b/storage/client/src/main/java/com/azure/storage/blob/models/AppendBlobAppendBlockHeaders.java index 63038ad02e070..8fe788378e589 100644 --- a/storage/client/src/main/java/com/azure/storage/blob/models/AppendBlobAppendBlockHeaders.java +++ b/storage/client/src/main/java/com/azure/storage/blob/models/AppendBlobAppendBlockHeaders.java @@ -5,10 +5,10 @@ package com.azure.storage.blob.models; import com.azure.core.implementation.DateTimeRfc1123; +import com.azure.core.implementation.util.ImplUtils; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; import java.time.OffsetDateTime; -import java.util.Arrays; /** * Defines headers for AppendBlock operation. @@ -162,7 +162,7 @@ public AppendBlobAppendBlockHeaders lastModified(OffsetDateTime lastModified) { * @return the contentMD5 value. */ public byte[] contentMD5() { - return Arrays.copyOf(this.contentMD5, this.contentMD5.length); + return ImplUtils.clone(this.contentMD5); } /** @@ -174,7 +174,7 @@ public byte[] contentMD5() { * @return the AppendBlobAppendBlockHeaders object itself. */ public AppendBlobAppendBlockHeaders contentMD5(byte[] contentMD5) { - this.contentMD5 = Arrays.copyOf(contentMD5, contentMD5.length); + this.contentMD5 = ImplUtils.clone(contentMD5); return this; } diff --git a/storage/client/src/main/java/com/azure/storage/blob/models/AppendBlobCreateHeaders.java b/storage/client/src/main/java/com/azure/storage/blob/models/AppendBlobCreateHeaders.java index 5aaf2385d763d..45fd447bde387 100644 --- a/storage/client/src/main/java/com/azure/storage/blob/models/AppendBlobCreateHeaders.java +++ b/storage/client/src/main/java/com/azure/storage/blob/models/AppendBlobCreateHeaders.java @@ -5,10 +5,10 @@ package com.azure.storage.blob.models; import com.azure.core.implementation.DateTimeRfc1123; +import com.azure.core.implementation.util.ImplUtils; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; import java.time.OffsetDateTime; -import java.util.Arrays; /** * Defines headers for Create operation. @@ -156,7 +156,7 @@ public AppendBlobCreateHeaders lastModified(OffsetDateTime lastModified) { * @return the contentMD5 value. */ public byte[] contentMD5() { - return Arrays.copyOf(this.contentMD5, this.contentMD5.length); + return ImplUtils.clone(this.contentMD5); } /** @@ -168,7 +168,7 @@ public byte[] contentMD5() { * @return the AppendBlobCreateHeaders object itself. */ public AppendBlobCreateHeaders contentMD5(byte[] contentMD5) { - this.contentMD5 = Arrays.copyOf(contentMD5, contentMD5.length); + this.contentMD5 = ImplUtils.clone(contentMD5); return this; } diff --git a/storage/client/src/main/java/com/azure/storage/blob/models/BlobDownloadHeaders.java b/storage/client/src/main/java/com/azure/storage/blob/models/BlobDownloadHeaders.java index 1583498eb600f..aa56c2e6683c4 100644 --- a/storage/client/src/main/java/com/azure/storage/blob/models/BlobDownloadHeaders.java +++ b/storage/client/src/main/java/com/azure/storage/blob/models/BlobDownloadHeaders.java @@ -6,10 +6,10 @@ import com.azure.core.annotations.HeaderCollection; import com.azure.core.implementation.DateTimeRfc1123; +import com.azure.core.implementation.util.ImplUtils; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; import java.time.OffsetDateTime; -import java.util.Arrays; import java.util.Map; /** @@ -415,7 +415,7 @@ public BlobDownloadHeaders eTag(String eTag) { * @return the contentMD5 value. */ public byte[] contentMD5() { - return Arrays.copyOf(this.contentMD5, this.contentMD5.length); + return ImplUtils.clone(this.contentMD5); } /** @@ -427,7 +427,7 @@ public byte[] contentMD5() { * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders contentMD5(byte[] contentMD5) { - this.contentMD5 = Arrays.copyOf(contentMD5, contentMD5.length); + this.contentMD5 = ImplUtils.clone(contentMD5); return this; } @@ -1003,7 +1003,7 @@ public BlobDownloadHeaders encryptionKeySha256(String encryptionKeySha256) { * @return the blobContentMD5 value. */ public byte[] blobContentMD5() { - return Arrays.copyOf(this.blobContentMD5, this.blobContentMD5.length); + return ImplUtils.clone(this.blobContentMD5); } /** @@ -1017,7 +1017,7 @@ public byte[] blobContentMD5() { * @return the BlobDownloadHeaders object itself. */ public BlobDownloadHeaders blobContentMD5(byte[] blobContentMD5) { - this.blobContentMD5 = Arrays.copyOf(blobContentMD5, blobContentMD5.length); + this.blobContentMD5 = ImplUtils.clone(blobContentMD5); return this; } diff --git a/storage/client/src/main/java/com/azure/storage/blob/models/BlobGetPropertiesHeaders.java b/storage/client/src/main/java/com/azure/storage/blob/models/BlobGetPropertiesHeaders.java index b04f517dff9e6..cb1d9e3ba1762 100644 --- a/storage/client/src/main/java/com/azure/storage/blob/models/BlobGetPropertiesHeaders.java +++ b/storage/client/src/main/java/com/azure/storage/blob/models/BlobGetPropertiesHeaders.java @@ -6,10 +6,10 @@ import com.azure.core.annotations.HeaderCollection; import com.azure.core.implementation.DateTimeRfc1123; +import com.azure.core.implementation.util.ImplUtils; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; import java.time.OffsetDateTime; -import java.util.Arrays; import java.util.Map; /** @@ -774,7 +774,7 @@ public BlobGetPropertiesHeaders eTag(String eTag) { * @return the contentMD5 value. */ public byte[] contentMD5() { - return Arrays.copyOf(this.contentMD5, this.contentMD5.length); + return ImplUtils.clone(this.contentMD5); } /** @@ -786,7 +786,7 @@ public byte[] contentMD5() { * @return the BlobGetPropertiesHeaders object itself. */ public BlobGetPropertiesHeaders contentMD5(byte[] contentMD5) { - this.contentMD5 = Arrays.copyOf(contentMD5, contentMD5.length); + this.contentMD5 = ImplUtils.clone(contentMD5); return this; } diff --git a/storage/client/src/main/java/com/azure/storage/blob/models/BlobHTTPHeaders.java b/storage/client/src/main/java/com/azure/storage/blob/models/BlobHTTPHeaders.java index 7204a04bd37d3..aafbd1eb56e5e 100644 --- a/storage/client/src/main/java/com/azure/storage/blob/models/BlobHTTPHeaders.java +++ b/storage/client/src/main/java/com/azure/storage/blob/models/BlobHTTPHeaders.java @@ -4,9 +4,9 @@ package com.azure.storage.blob.models; +import com.azure.core.implementation.util.ImplUtils; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; -import java.util.Arrays; /** * Additional parameters for a set of operations. @@ -111,7 +111,7 @@ public BlobHTTPHeaders blobContentType(String blobContentType) { * @return the blobContentMD5 value. */ public byte[] blobContentMD5() { - return Arrays.copyOf(this.blobContentMD5, this.blobContentMD5.length); + return ImplUtils.clone(this.blobContentMD5); } /** @@ -123,7 +123,7 @@ public byte[] blobContentMD5() { * @return the BlobHTTPHeaders object itself. */ public BlobHTTPHeaders blobContentMD5(byte[] blobContentMD5) { - this.blobContentMD5 = Arrays.copyOf(blobContentMD5, blobContentMD5.length); + this.blobContentMD5 = ImplUtils.clone(blobContentMD5); return this; } diff --git a/storage/client/src/main/java/com/azure/storage/blob/models/BlobProperties.java b/storage/client/src/main/java/com/azure/storage/blob/models/BlobProperties.java index 40c7c7753c124..a297fe40e0432 100644 --- a/storage/client/src/main/java/com/azure/storage/blob/models/BlobProperties.java +++ b/storage/client/src/main/java/com/azure/storage/blob/models/BlobProperties.java @@ -5,10 +5,10 @@ package com.azure.storage.blob.models; import com.azure.core.implementation.DateTimeRfc1123; +import com.azure.core.implementation.util.ImplUtils; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; import java.time.OffsetDateTime; -import java.util.Arrays; /** * Properties of a blob. @@ -364,7 +364,7 @@ public BlobProperties contentLanguage(String contentLanguage) { * @return the contentMD5 value. */ public byte[] contentMD5() { - return Arrays.copyOf(this.contentMD5, this.contentMD5.length); + return ImplUtils.clone(this.contentMD5); } /** @@ -374,7 +374,7 @@ public byte[] contentMD5() { * @return the BlobProperties object itself. */ public BlobProperties contentMD5(byte[] contentMD5) { - this.contentMD5 = Arrays.copyOf(contentMD5, contentMD5.length); + this.contentMD5 = ImplUtils.clone(contentMD5); return this; } diff --git a/storage/client/src/main/java/com/azure/storage/blob/models/BlockBlobCommitBlockListHeaders.java b/storage/client/src/main/java/com/azure/storage/blob/models/BlockBlobCommitBlockListHeaders.java index f6efdc50537da..a2605e19cf430 100644 --- a/storage/client/src/main/java/com/azure/storage/blob/models/BlockBlobCommitBlockListHeaders.java +++ b/storage/client/src/main/java/com/azure/storage/blob/models/BlockBlobCommitBlockListHeaders.java @@ -5,10 +5,10 @@ package com.azure.storage.blob.models; import com.azure.core.implementation.DateTimeRfc1123; +import com.azure.core.implementation.util.ImplUtils; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; import java.time.OffsetDateTime; -import java.util.Arrays; /** * Defines headers for CommitBlockList operation. @@ -156,7 +156,7 @@ public BlockBlobCommitBlockListHeaders lastModified(OffsetDateTime lastModified) * @return the contentMD5 value. */ public byte[] contentMD5() { - return Arrays.copyOf(this.contentMD5, this.contentMD5.length); + return ImplUtils.clone(this.contentMD5); } /** @@ -168,7 +168,7 @@ public byte[] contentMD5() { * @return the BlockBlobCommitBlockListHeaders object itself. */ public BlockBlobCommitBlockListHeaders contentMD5(byte[] contentMD5) { - this.contentMD5 = Arrays.copyOf(contentMD5, contentMD5.length); + this.contentMD5 = ImplUtils.clone(contentMD5); return this; } diff --git a/storage/client/src/main/java/com/azure/storage/blob/models/BlockBlobStageBlockFromURLHeaders.java b/storage/client/src/main/java/com/azure/storage/blob/models/BlockBlobStageBlockFromURLHeaders.java index 5d7336abf0e56..1b434c919342f 100644 --- a/storage/client/src/main/java/com/azure/storage/blob/models/BlockBlobStageBlockFromURLHeaders.java +++ b/storage/client/src/main/java/com/azure/storage/blob/models/BlockBlobStageBlockFromURLHeaders.java @@ -5,10 +5,10 @@ package com.azure.storage.blob.models; import com.azure.core.implementation.DateTimeRfc1123; +import com.azure.core.implementation.util.ImplUtils; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; import java.time.OffsetDateTime; -import java.util.Arrays; /** * Defines headers for StageBlockFromURL operation. @@ -75,7 +75,7 @@ public final class BlockBlobStageBlockFromURLHeaders { * @return the contentMD5 value. */ public byte[] contentMD5() { - return Arrays.copyOf(this.contentMD5, this.contentMD5.length); + return ImplUtils.clone(this.contentMD5); } /** @@ -87,7 +87,7 @@ public byte[] contentMD5() { * @return the BlockBlobStageBlockFromURLHeaders object itself. */ public BlockBlobStageBlockFromURLHeaders contentMD5(byte[] contentMD5) { - this.contentMD5 = Arrays.copyOf(contentMD5, contentMD5.length); + this.contentMD5 = ImplUtils.clone(contentMD5); return this; } diff --git a/storage/client/src/main/java/com/azure/storage/blob/models/BlockBlobStageBlockHeaders.java b/storage/client/src/main/java/com/azure/storage/blob/models/BlockBlobStageBlockHeaders.java index 2a89a98b66c19..40108ad24ec2a 100644 --- a/storage/client/src/main/java/com/azure/storage/blob/models/BlockBlobStageBlockHeaders.java +++ b/storage/client/src/main/java/com/azure/storage/blob/models/BlockBlobStageBlockHeaders.java @@ -5,10 +5,10 @@ package com.azure.storage.blob.models; import com.azure.core.implementation.DateTimeRfc1123; +import com.azure.core.implementation.util.ImplUtils; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; import java.time.OffsetDateTime; -import java.util.Arrays; /** * Defines headers for StageBlock operation. @@ -75,7 +75,7 @@ public final class BlockBlobStageBlockHeaders { * @return the contentMD5 value. */ public byte[] contentMD5() { - return Arrays.copyOf(this.contentMD5, this.contentMD5.length); + return ImplUtils.clone(this.contentMD5); } /** @@ -87,7 +87,7 @@ public byte[] contentMD5() { * @return the BlockBlobStageBlockHeaders object itself. */ public BlockBlobStageBlockHeaders contentMD5(byte[] contentMD5) { - this.contentMD5 = Arrays.copyOf(contentMD5, contentMD5.length); + this.contentMD5 = ImplUtils.clone(contentMD5); return this; } diff --git a/storage/client/src/main/java/com/azure/storage/blob/models/BlockBlobUploadHeaders.java b/storage/client/src/main/java/com/azure/storage/blob/models/BlockBlobUploadHeaders.java index 5cf60bf831dd0..e5430e100e6ea 100644 --- a/storage/client/src/main/java/com/azure/storage/blob/models/BlockBlobUploadHeaders.java +++ b/storage/client/src/main/java/com/azure/storage/blob/models/BlockBlobUploadHeaders.java @@ -5,10 +5,10 @@ package com.azure.storage.blob.models; import com.azure.core.implementation.DateTimeRfc1123; +import com.azure.core.implementation.util.ImplUtils; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; import java.time.OffsetDateTime; -import java.util.Arrays; /** * Defines headers for Upload operation. @@ -156,7 +156,7 @@ public BlockBlobUploadHeaders lastModified(OffsetDateTime lastModified) { * @return the contentMD5 value. */ public byte[] contentMD5() { - return Arrays.copyOf(this.contentMD5, this.contentMD5.length); + return ImplUtils.clone(this.contentMD5); } /** @@ -168,7 +168,7 @@ public byte[] contentMD5() { * @return the BlockBlobUploadHeaders object itself. */ public BlockBlobUploadHeaders contentMD5(byte[] contentMD5) { - this.contentMD5 = Arrays.copyOf(contentMD5, contentMD5.length); + this.contentMD5 = ImplUtils.clone(contentMD5); return this; } diff --git a/storage/client/src/main/java/com/azure/storage/blob/models/BlockItem.java b/storage/client/src/main/java/com/azure/storage/blob/models/BlockItem.java new file mode 100644 index 0000000000000..4920fd7334a14 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/models/BlockItem.java @@ -0,0 +1,46 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) AutoRest Code Generator. + +package com.azure.storage.blob.models; + +/** + * Represents a single block in a block blob. + */ +public final class BlockItem { + /* Internal block object. */ + private Block block; + + private boolean isCommitted; + + /** + * Creates an instance of a BlobItem. + * @param block the API blob object + * @param isCommitted if the blob is committed + */ + public BlockItem(Block block, boolean isCommitted) { + this.block = block; + this.isCommitted = isCommitted; + } + + /** + * @return the base64 encoded block ID. + */ + public String name() { + return this.block.name(); + } + + /** + * @return the block size in bytes. + */ + public int size() { + return this.block.size(); + } + + /** + * @return if the block has been committed. + */ + public boolean isCommitted() { + return isCommitted; + } +} diff --git a/storage/client/src/main/java/com/azure/storage/blob/models/PageBlobClearPagesHeaders.java b/storage/client/src/main/java/com/azure/storage/blob/models/PageBlobClearPagesHeaders.java index 46a965bb0e028..45f30cefd53a1 100644 --- a/storage/client/src/main/java/com/azure/storage/blob/models/PageBlobClearPagesHeaders.java +++ b/storage/client/src/main/java/com/azure/storage/blob/models/PageBlobClearPagesHeaders.java @@ -5,10 +5,10 @@ package com.azure.storage.blob.models; import com.azure.core.implementation.DateTimeRfc1123; +import com.azure.core.implementation.util.ImplUtils; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; import java.time.OffsetDateTime; -import java.util.Arrays; /** * Defines headers for ClearPages operation. @@ -138,7 +138,7 @@ public PageBlobClearPagesHeaders lastModified(OffsetDateTime lastModified) { * @return the contentMD5 value. */ public byte[] contentMD5() { - return Arrays.copyOf(this.contentMD5, this.contentMD5.length); + return ImplUtils.clone(this.contentMD5); } /** @@ -150,7 +150,7 @@ public byte[] contentMD5() { * @return the PageBlobClearPagesHeaders object itself. */ public PageBlobClearPagesHeaders contentMD5(byte[] contentMD5) { - this.contentMD5 = Arrays.copyOf(contentMD5, contentMD5.length); + this.contentMD5 = ImplUtils.clone(contentMD5); return this; } diff --git a/storage/client/src/main/java/com/azure/storage/blob/models/PageBlobCreateHeaders.java b/storage/client/src/main/java/com/azure/storage/blob/models/PageBlobCreateHeaders.java index d3b05ecd4c4e3..215c6f4c934aa 100644 --- a/storage/client/src/main/java/com/azure/storage/blob/models/PageBlobCreateHeaders.java +++ b/storage/client/src/main/java/com/azure/storage/blob/models/PageBlobCreateHeaders.java @@ -5,10 +5,10 @@ package com.azure.storage.blob.models; import com.azure.core.implementation.DateTimeRfc1123; +import com.azure.core.implementation.util.ImplUtils; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; import java.time.OffsetDateTime; -import java.util.Arrays; /** * Defines headers for Create operation. @@ -156,7 +156,7 @@ public PageBlobCreateHeaders lastModified(OffsetDateTime lastModified) { * @return the contentMD5 value. */ public byte[] contentMD5() { - return Arrays.copyOf(this.contentMD5, this.contentMD5.length); + return ImplUtils.clone(this.contentMD5); } /** @@ -168,7 +168,7 @@ public byte[] contentMD5() { * @return the PageBlobCreateHeaders object itself. */ public PageBlobCreateHeaders contentMD5(byte[] contentMD5) { - this.contentMD5 = Arrays.copyOf(contentMD5, contentMD5.length); + this.contentMD5 = ImplUtils.clone(contentMD5); return this; } diff --git a/storage/client/src/main/java/com/azure/storage/blob/models/PageBlobUploadPagesFromURLHeaders.java b/storage/client/src/main/java/com/azure/storage/blob/models/PageBlobUploadPagesFromURLHeaders.java index 4d7b695f266af..ad21b582b509e 100644 --- a/storage/client/src/main/java/com/azure/storage/blob/models/PageBlobUploadPagesFromURLHeaders.java +++ b/storage/client/src/main/java/com/azure/storage/blob/models/PageBlobUploadPagesFromURLHeaders.java @@ -5,10 +5,10 @@ package com.azure.storage.blob.models; import com.azure.core.implementation.DateTimeRfc1123; +import com.azure.core.implementation.util.ImplUtils; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; import java.time.OffsetDateTime; -import java.util.Arrays; /** * Defines headers for UploadPagesFromURL operation. @@ -146,7 +146,7 @@ public PageBlobUploadPagesFromURLHeaders lastModified(OffsetDateTime lastModifie * @return the contentMD5 value. */ public byte[] contentMD5() { - return Arrays.copyOf(this.contentMD5, this.contentMD5.length); + return ImplUtils.clone(this.contentMD5); } /** @@ -158,7 +158,7 @@ public byte[] contentMD5() { * @return the PageBlobUploadPagesFromURLHeaders object itself. */ public PageBlobUploadPagesFromURLHeaders contentMD5(byte[] contentMD5) { - this.contentMD5 = Arrays.copyOf(contentMD5, contentMD5.length); + this.contentMD5 = ImplUtils.clone(contentMD5); return this; } diff --git a/storage/client/src/main/java/com/azure/storage/blob/models/PageBlobUploadPagesHeaders.java b/storage/client/src/main/java/com/azure/storage/blob/models/PageBlobUploadPagesHeaders.java index e400b00dcfe70..29cf41bae0b8e 100644 --- a/storage/client/src/main/java/com/azure/storage/blob/models/PageBlobUploadPagesHeaders.java +++ b/storage/client/src/main/java/com/azure/storage/blob/models/PageBlobUploadPagesHeaders.java @@ -5,10 +5,10 @@ package com.azure.storage.blob.models; import com.azure.core.implementation.DateTimeRfc1123; +import com.azure.core.implementation.util.ImplUtils; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; import java.time.OffsetDateTime; -import java.util.Arrays; /** * Defines headers for UploadPages operation. @@ -154,7 +154,7 @@ public PageBlobUploadPagesHeaders lastModified(OffsetDateTime lastModified) { * @return the contentMD5 value. */ public byte[] contentMD5() { - return Arrays.copyOf(this.contentMD5, this.contentMD5.length); + return ImplUtils.clone(this.contentMD5); } /** @@ -166,7 +166,7 @@ public byte[] contentMD5() { * @return the PageBlobUploadPagesHeaders object itself. */ public PageBlobUploadPagesHeaders contentMD5(byte[] contentMD5) { - this.contentMD5 = Arrays.copyOf(contentMD5, contentMD5.length); + this.contentMD5 = ImplUtils.clone(contentMD5); return this; } diff --git a/storage/client/src/main/java/com/azure/storage/blob/package-info.java b/storage/client/src/main/java/com/azure/storage/blob/package-info.java new file mode 100644 index 0000000000000..77e28d3d565ae --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/blob/package-info.java @@ -0,0 +1,9 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) AutoRest Code Generator + +/** + * This package contains the classes for StorageClient. + * Storage Client. + */ +package com.azure.storage.blob; diff --git a/storage/client/src/main/java/com/azure/storage/file/implementation/AzureFileStorageBuilder.java b/storage/client/src/main/java/com/azure/storage/file/implementation/AzureFileStorageBuilder.java new file mode 100644 index 0000000000000..6dced97e93c08 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/file/implementation/AzureFileStorageBuilder.java @@ -0,0 +1,83 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) AutoRest Code Generator. + +package com.azure.storage.file.implementation; + +import com.azure.core.http.HttpPipeline; +import com.azure.core.implementation.RestProxy; + +/** + * A appendBlobClientBuilder for creating a new instance of the AzureFileStorage type. + */ +public final class AzureFileStorageBuilder { + /* + * Specifies the version of the operation to use for this request. + */ + private String version; + + /** + * Sets Specifies the version of the operation to use for this request. + * + * @param version the version value. + * @return the AzureFileStorageBuilder. + */ + public AzureFileStorageBuilder version(String version) { + this.version = version; + return this; + } + + /* + * The URL of the service account, share, directory or file that is the target of the desired operation. + */ + private String url; + + /** + * Sets The URL of the service account, share, directory or file that is the target of the desired operation. + * + * @param url the url value. + * @return the AzureFileStorageBuilder. + */ + public AzureFileStorageBuilder url(String url) { + this.url = url; + return this; + } + + /* + * The HTTP pipeline to send requests through + */ + private HttpPipeline pipeline; + + /** + * Sets The HTTP pipeline to send requests through. + * + * @param pipeline the pipeline value. + * @return the AzureFileStorageBuilder. + */ + public AzureFileStorageBuilder pipeline(HttpPipeline pipeline) { + this.pipeline = pipeline; + return this; + } + + /** + * Builds an instance of AzureFileStorageImpl with the provided parameters. + * + * @return an instance of AzureFileStorageImpl. + */ + public AzureFileStorageImpl build() { + if (version == null) { + this.version = "2018-11-09"; + } + if (pipeline == null) { + this.pipeline = RestProxy.createDefaultPipeline(); + } + AzureFileStorageImpl client = new AzureFileStorageImpl(pipeline); + if (this.version != null) { + client.version(this.version); + } + if (this.url != null) { + client.url(this.url); + } + return client; + } +} diff --git a/storage/client/src/main/java/com/azure/storage/file/implementation/AzureFileStorageImpl.java b/storage/client/src/main/java/com/azure/storage/file/implementation/AzureFileStorageImpl.java index 44882a3e8a31d..ec39450873537 100644 --- a/storage/client/src/main/java/com/azure/storage/file/implementation/AzureFileStorageImpl.java +++ b/storage/client/src/main/java/com/azure/storage/file/implementation/AzureFileStorageImpl.java @@ -32,7 +32,7 @@ public String version() { * @param version the version value. * @return the service client itself. */ - public AzureFileStorageImpl withVersion(String version) { + AzureFileStorageImpl version(String version) { this.version = version; return this; } @@ -57,7 +57,7 @@ public String url() { * @param url the url value. * @return the service client itself. */ - public AzureFileStorageImpl withUrl(String url) { + AzureFileStorageImpl url(String url) { this.url = url; return this; } @@ -132,7 +132,6 @@ public AzureFileStorageImpl() { */ public AzureFileStorageImpl(HttpPipeline httpPipeline) { super(httpPipeline); - this.version = "2018-11-09"; this.services = new ServicesImpl(this); this.shares = new SharesImpl(this); this.directorys = new DirectorysImpl(this); diff --git a/storage/client/src/main/java/com/azure/storage/queue/QueueAsyncClient.java b/storage/client/src/main/java/com/azure/storage/queue/QueueAsyncClient.java index 0b3cc59af7fd2..eab2a19707d5c 100644 --- a/storage/client/src/main/java/com/azure/storage/queue/QueueAsyncClient.java +++ b/storage/client/src/main/java/com/azure/storage/queue/QueueAsyncClient.java @@ -2,6 +2,7 @@ import com.azure.core.ServiceClient; import com.azure.core.http.HttpPipeline; +import com.azure.storage.queue.implementation.AzureQueueStorageBuilder; import com.azure.storage.queue.implementation.AzureQueueStorageImpl; import java.net.URL; @@ -14,13 +15,13 @@ public class QueueAsyncClient extends ServiceClient { private QueueAsyncClient(URL endpoint, HttpPipeline httpPipeline) { super(httpPipeline); this.endpoint = endpoint.toString(); - this.generateClient = new AzureQueueStorageImpl(httpPipeline).withUrl(this.endpoint); + this.generateClient = new AzureQueueStorageBuilder().pipeline(httpPipeline).url(this.endpoint).build(); this.apiVersion = this.generateClient.version(); } /** - * Creates a builder that can configure options for the SecretAsyncClient before creating an instance of it. - * @return A new builder to create a SecretAsyncClient from. + * Creates a appendBlobClientBuilder that can configure options for the SecretAsyncClient before creating an instance of it. + * @return A new appendBlobClientBuilder to create a SecretAsyncClient from. */ public static QueueAsyncClientBuilder builder() { return new QueueAsyncClientBuilder(); diff --git a/storage/client/src/main/java/com/azure/storage/queue/implementation/AzureQueueStorageBuilder.java b/storage/client/src/main/java/com/azure/storage/queue/implementation/AzureQueueStorageBuilder.java new file mode 100644 index 0000000000000..4f678dfe10832 --- /dev/null +++ b/storage/client/src/main/java/com/azure/storage/queue/implementation/AzureQueueStorageBuilder.java @@ -0,0 +1,83 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) AutoRest Code Generator. + +package com.azure.storage.queue.implementation; + +import com.azure.core.http.HttpPipeline; +import com.azure.core.implementation.RestProxy; + +/** + * A appendBlobClientBuilder for creating a new instance of the AzureQueueStorage type. + */ +public final class AzureQueueStorageBuilder { + /* + * The URL of the service account, queue or message that is the targe of the desired operation. + */ + private String url; + + /** + * Sets The URL of the service account, queue or message that is the targe of the desired operation. + * + * @param url the url value. + * @return the AzureQueueStorageBuilder. + */ + public AzureQueueStorageBuilder url(String url) { + this.url = url; + return this; + } + + /* + * Specifies the version of the operation to use for this request. + */ + private String version; + + /** + * Sets Specifies the version of the operation to use for this request. + * + * @param version the version value. + * @return the AzureQueueStorageBuilder. + */ + public AzureQueueStorageBuilder version(String version) { + this.version = version; + return this; + } + + /* + * The HTTP pipeline to send requests through + */ + private HttpPipeline pipeline; + + /** + * Sets The HTTP pipeline to send requests through. + * + * @param pipeline the pipeline value. + * @return the AzureQueueStorageBuilder. + */ + public AzureQueueStorageBuilder pipeline(HttpPipeline pipeline) { + this.pipeline = pipeline; + return this; + } + + /** + * Builds an instance of AzureQueueStorageImpl with the provided parameters. + * + * @return an instance of AzureQueueStorageImpl. + */ + public AzureQueueStorageImpl build() { + if (version == null) { + this.version = "2018-03-28"; + } + if (pipeline == null) { + this.pipeline = RestProxy.createDefaultPipeline(); + } + AzureQueueStorageImpl client = new AzureQueueStorageImpl(pipeline); + if (this.url != null) { + client.url(this.url); + } + if (this.version != null) { + client.version(this.version); + } + return client; + } +} diff --git a/storage/client/src/main/java/com/azure/storage/queue/implementation/AzureQueueStorageImpl.java b/storage/client/src/main/java/com/azure/storage/queue/implementation/AzureQueueStorageImpl.java index 354ba1b5e2a0e..1573b9c7e05bd 100644 --- a/storage/client/src/main/java/com/azure/storage/queue/implementation/AzureQueueStorageImpl.java +++ b/storage/client/src/main/java/com/azure/storage/queue/implementation/AzureQueueStorageImpl.java @@ -32,7 +32,7 @@ public String url() { * @param url the url value. * @return the service client itself. */ - public AzureQueueStorageImpl withUrl(String url) { + AzureQueueStorageImpl url(String url) { this.url = url; return this; } @@ -57,7 +57,7 @@ public String version() { * @param version the version value. * @return the service client itself. */ - public AzureQueueStorageImpl withVersion(String version) { + AzureQueueStorageImpl version(String version) { this.version = version; return this; } @@ -132,7 +132,6 @@ public AzureQueueStorageImpl() { */ public AzureQueueStorageImpl(HttpPipeline httpPipeline) { super(httpPipeline); - this.version = "2018-03-28"; this.services = new ServicesImpl(this); this.queues = new QueuesImpl(this); this.messages = new MessagesImpl(this); diff --git a/storage/client/src/test/java/com/azure/storage/blob/APISpec.groovy b/storage/client/src/test/java/com/azure/storage/blob/APISpec.groovy new file mode 100644 index 0000000000000..8f8e9c0c0cc73 --- /dev/null +++ b/storage/client/src/test/java/com/azure/storage/blob/APISpec.groovy @@ -0,0 +1,608 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob + + +import com.azure.core.http.* +import com.azure.core.http.policy.HttpLogDetailLevel +import com.azure.core.http.policy.HttpPipelinePolicy +import com.azure.core.util.Context +import com.azure.storage.blob.models.* +import com.microsoft.aad.adal4j.AuthenticationContext +import com.microsoft.aad.adal4j.ClientCredential +import org.junit.Assume +import org.spockframework.lang.ISpecificationContext +import reactor.core.publisher.Flux +import reactor.core.publisher.Mono +import spock.lang.Shared +import spock.lang.Specification + +import java.nio.ByteBuffer +import java.nio.charset.StandardCharsets +import java.time.OffsetDateTime +import java.util.concurrent.Executors +import java.util.function.Supplier + +class APISpec extends Specification { + static final String RECORD_MODE = "RECORD" + + @Shared + Integer iterationNo = 0 // Used to generate stable container names for recording tests with multiple iterations. + + Integer entityNo = 0 // Used to generate stable container names for recording tests requiring multiple containers. + + @Shared + ContainerClient cu + + // Fields used for conveniently creating blobs with data. + static final String defaultText = "default" + + static final ByteBuffer defaultData = ByteBuffer.wrap(defaultText.getBytes(StandardCharsets.UTF_8)) + + static final Flux defaultFlux = Flux.just(defaultData) + + static final Supplier defaultInputStream = new Supplier() { + @Override + InputStream get() { + return new ByteArrayInputStream(defaultText.getBytes(StandardCharsets.UTF_8)) + } + } + + static defaultDataSize = defaultData.remaining() + + // If debugging is enabled, recordings cannot run as there can only be one proxy at a time. + static boolean enableDebugging = false + + // Prefixes for blobs and containers + static String containerPrefix = "jtc" // java test container + + static String blobPrefix = "javablob" + + /* + The values below are used to create data-driven tests for access conditions. + */ + static final OffsetDateTime oldDate = OffsetDateTime.now().minusDays(1) + + static final OffsetDateTime newDate = OffsetDateTime.now().plusDays(1) + + /* + Note that this value is only used to check if we are depending on the received etag. This value will not actually + be used. + */ + static final String receivedEtag = "received" + + static final String garbageEtag = "garbage" + + /* + Note that this value is only used to check if we are depending on the received etag. This value will not actually + be used. + */ + static final String receivedLeaseID = "received" + + static final String garbageLeaseID = UUID.randomUUID().toString() + + /* + Credentials for various kinds of accounts. + */ + @Shared + static SharedKeyCredentials primaryCreds + + @Shared + static SharedKeyCredentials alternateCreds + + /* + URLs to various kinds of accounts. + */ + StorageClient primaryServiceURL + + @Shared + static StorageClient alternateServiceURL + + @Shared + static StorageClient blobStorageServiceURL + + @Shared + static StorageClient premiumServiceURL + + /* + Constants for testing that the context parameter is properly passed to the pipeline. + */ + static final String defaultContextKey = "Key" + + static final String defaultContextValue = "Value" + + static final Context defaultContext = new Context(defaultContextKey, defaultContextValue) + + static String getTestName(ISpecificationContext ctx) { + return ctx.getCurrentFeature().name.replace(' ', '').toLowerCase() + } + + def generateContainerName() { + generateContainerName(specificationContext, iterationNo, entityNo++) + } + + def generateBlobName() { + generateBlobName(specificationContext, iterationNo, entityNo++) + } + + /** + * This function generates an entity name by concatenating the passed prefix, the name of the test requesting the + * entity name, and some unique suffix. This ensures that the entity name is unique for each test so there are + * no conflicts on the service. If we are not recording, we can just use the time. If we are recording, the suffix + * must always be the same so we can match requests. To solve this, we use the entityNo for how many entities have + * already been created by this test so far. This would sufficiently distinguish entities within a recording, but + * could still yield duplicates on the service for data-driven tests. Therefore, we also add the iteration number + * of the data driven tests. + * + * @param specificationContext + * Used to obtain the name of the test running. + * @param prefix + * Used to group all entities created by these tests under common prefixes. Useful for listing. + * @param iterationNo + * Indicates which iteration of a data-driven test is being executed. + * @param entityNo + * Indicates how man entities have been created by the test so far. This distinguishes multiple containers + * or multiple blobs created by the same test. Only used when dealing with recordings. + * @return + */ + static String generateResourceName(ISpecificationContext specificationContext, String prefix, int iterationNo, + int entityNo) { + String suffix = "" + suffix += System.currentTimeMillis() // For uniqueness between runs. + suffix += entityNo // For easy identification of which call created this resource. + return prefix + getTestName(specificationContext).take(63 - suffix.length() - prefix.length()) + suffix + } + + static int updateIterationNo(ISpecificationContext specificationContext, int iterationNo) { + if (specificationContext.currentIteration.estimatedNumIterations > 1) { + return iterationNo + 1 + } else { + return 0 + } + } + + static String generateContainerName(ISpecificationContext specificationContext, int iterationNo, int entityNo) { + return generateResourceName(specificationContext, containerPrefix, iterationNo, entityNo) + } + + static String generateBlobName(ISpecificationContext specificationContext, int iterationNo, int entityNo) { + return generateResourceName(specificationContext, blobPrefix, iterationNo, entityNo) + } + + static void setupFeatureRecording(String sceneName) { + + } + + static void scrubAuthHeader(String sceneName) { + + } + + static getEnvironmentVariable(String variable){ + String envVariable = System.getenv().get(variable) + if(envVariable == null){ + envVariable = "" + } + return envVariable + } + + static getGenericCreds(String accountType) { + String accountName = getEnvironmentVariable(accountType + "ACCOUNT_NAME") + String accountKey = getEnvironmentVariable(accountType + "ACCOUNT_KEY") + + if (accountName == null || accountKey == null) { + System.out.println("Account name or key for the " + accountType + " account was null. Test's requiring " + + "these credentials will fail.") + return null + } + return new SharedKeyCredentials(accountName, accountKey) + } + + static HttpClient getHttpClient() { + if (enableDebugging) { + return HttpClient.createDefault().proxy(new Supplier() { + @Override + ProxyOptions get() { + return new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888)) + } + }) + } else return HttpClient.createDefault() + } + + static StorageClient getGenericServiceURL(SharedKeyCredentials creds) { + // TODO: logging? + + return StorageClient.storageClientBuilder() + .endpoint("https://" + creds.getAccountName() + ".blob.core.windows.net") + .httpClient(getHttpClient()) + .httpLogDetailLevel(HttpLogDetailLevel.BASIC) + .credentials(primaryCreds) + .buildClient() + } + + static void cleanupContainers() throws MalformedURLException { + StorageClient serviceURL = StorageClient.storageClientBuilder() + .endpoint("http://" + primaryCreds.accountName + ".blob.core.windows.net") + .credentials(primaryCreds) + .buildClient() + // There should not be more than 5000 containers from these tests + for (ContainerItem c : serviceURL.listContainersSegment(null, + new ListContainersOptions().withPrefix(containerPrefix))) { + ContainerClient containerURL = serviceURL.getContainerClient(c.name()) + if (c.properties().leaseState().equals(LeaseStateType.LEASED)) { + containerURL.breakLease(0, null, null).block() + } + containerURL.delete() + } + } + + /* + Size must be an int because ByteBuffer sizes can only be an int. Long is not supported. + */ + static ByteBuffer getRandomData(int size) { + Random rand = new Random(getRandomSeed()) + byte[] data = new byte[size] + rand.nextBytes(data) + return ByteBuffer.wrap(data) + } + + /* + We only allow int because anything larger than 2GB (which would require a long) is left to stress/perf. + */ + static File getRandomFile(int size) { + File file = File.createTempFile(UUID.randomUUID().toString(), ".txt") + file.deleteOnExit() + FileOutputStream fos = new FileOutputStream(file) + fos.write(getRandomData(size).array()) + fos.close() + return file + } + + static long getRandomSeed() { + return System.currentTimeMillis() + } + + def setupSpec() { + /* + We'll let primary creds throw and crash if there are no credentials specified because everything else will fail. + */ + primaryCreds = getGenericCreds("PRIMARY_STORAGE_") + + /* + It's feasible someone wants to test a specific subset of tests, so we'll still attempt to create each of the + ServiceURLs separately. We don't really need to take any action here, as we've already reported to the user, + so we just swallow the exception and let the relevant tests fail later. Perhaps we can add annotations or + something in the future. + */ + try { + alternateCreds = getGenericCreds("SECONDARY_STORAGE_") + alternateServiceURL = getGenericServiceURL(alternateCreds) + } + catch (Exception e) { + } + try { + blobStorageServiceURL = getGenericServiceURL(getGenericCreds("BLOB_STORAGE_")) + } + catch (Exception e) { + } + try { + premiumServiceURL = getGenericServiceURL(getGenericCreds("PREMIUM_STORAGE_")) + } + catch (Exception e) { + } + } + + def cleanupSpec() { + Assume.assumeTrue("The test only runs in Live mode.", getTestMode().equalsIgnoreCase(RECORD_MODE)) + cleanupContainers() + } + + def setup() { + Assume.assumeTrue("The test only runs in Live mode.", getTestMode().equalsIgnoreCase(RECORD_MODE)) + String containerName = generateContainerName() + + primaryServiceURL = getGenericServiceURL(primaryCreds) + cu = primaryServiceURL.getContainerClient(containerName) + cu.create() + } + + def cleanup() { + // TODO: Scrub auth header here? + iterationNo = updateIterationNo(specificationContext, iterationNo) + } + + /** + * This will retrieve the etag to be used in testing match conditions. The result will typically be assigned to + * the ifMatch condition when testing success and the ifNoneMatch condition when testing failure. + * + * @param bu + * The URL to the blob to get the etag on. + * @param match + * The ETag value for this test. If {@code receivedEtag} is passed, that will signal that the test is expecting + * the blob's actual etag for this test, so it is retrieved. + * @return + * The appropriate etag value to run the current test. + */ + def setupBlobMatchCondition(BlobClient bu, String match) { + if (match == receivedEtag) { + BlobGetPropertiesHeaders headers = bu.getProperties(null, null) + return headers.eTag() + } else { + return match + } + } + + /** + * This helper method will acquire a lease on a blob to prepare for testing leaseAccessConditions. We want to test + * against a valid lease in both the success and failure cases to guarantee that the results actually indicate + * proper setting of the header. If we pass null, though, we don't want to acquire a lease, as that will interfere + * with other AC tests. + * + * @param bu + * The blob on which to acquire a lease. + * @param leaseID + * The signalID. Values should only ever be {@code receivedLeaseID}, {@code garbageLeaseID}, or {@code null}. + * @return + * The actual leaseAccessConditions of the blob if recievedLeaseID is passed, otherwise whatever was passed will be + * returned. + */ + def setupBlobLeaseCondition(BlobClient bu, String leaseID) { + String responseLeaseId = null + if (leaseID == receivedLeaseID || leaseID == garbageLeaseID) { + responseLeaseId = bu.acquireLease(null, -1, null, null) + } + if (leaseID == receivedLeaseID) { + return responseLeaseId + } else { + return leaseID + } + } + + def setupContainerMatchCondition(ContainerClient cu, String match) { + if (match == receivedEtag) { + return cu.getProperties().eTag() + } else { + return match + } + } + + def setupContainerLeaseCondition(ContainerClient cu, String leaseID) { + if (leaseID == receivedLeaseID) { + return cu.acquireLease(null, -1).block().deserializedHeaders().leaseId() + } else { + return leaseID + } + } + + def getMockRequest() { + HttpHeaders headers = new HttpHeaders() + headers.set(Constants.HeaderConstants.CONTENT_ENCODING, "en-US") + URL url = new URL("http://devtest.blob.core.windows.net/test-container/test-blob") + HttpRequest request = new HttpRequest(HttpMethod.POST, url, headers, null) + return request + } + +// def waitForCopy(ContainerClient bu, CopyStatusType status) { +// OffsetDateTime start = OffsetDateTime.now() +// while (status != CopyStatusType.SUCCESS) { +// status = bu.getProperties(). +// OffsetDateTime currentTime = OffsetDateTime.now() +// if (status == CopyStatusType.FAILED || currentTime.minusMinutes(1) == start) { +// throw new Exception("Copy failed or took too long") +// } +// sleep(1000) +// } +// } + + /** + * Validates the presence of headers that are present on a large number of responses. These headers are generally + * random and can really only be checked as not null. + * @param headers + * The object (may be headers object or response object) that has properties which expose these common headers. + * @return + * Whether or not the header values are appropriate. + */ + def validateBasicHeaders(Object headers) { + return headers.class.getMethod("eTag").invoke(headers) != null && + // Quotes should be scrubbed from etag header values +// !((String)(headers.class.getMethod("eTag").invoke(headers))).contains("\"") && + headers.class.getMethod("lastModified").invoke(headers) != null && + headers.class.getMethod("requestId").invoke(headers) != null && + headers.class.getMethod("version").invoke(headers) != null && + headers.class.getMethod("dateProperty").invoke(headers) != null + } + + def validateBlobHeaders(Object headers, String cacheControl, String contentDisposition, String contentEncoding, + String contentLangauge, byte[] contentMD5, String contentType) { + return headers.class.getMethod("cacheControl").invoke(headers) == cacheControl && + headers.class.getMethod("contentDisposition").invoke(headers) == contentDisposition && + headers.class.getMethod("contentEncoding").invoke(headers) == contentEncoding && + headers.class.getMethod("contentLanguage").invoke(headers) == contentLangauge && + headers.class.getMethod("contentMD5").invoke(headers) == contentMD5 && + headers.class.getMethod("contentType").invoke(headers) == contentType + + } + + def enableSoftDelete() { + primaryServiceURL.setProperties(new StorageServiceProperties() + .deleteRetentionPolicy(new RetentionPolicy().enabled(true).days(2))) + sleep(30000) // Wait for the policy to take effect. + } + + def disableSoftDelete() { + primaryServiceURL.setProperties(new StorageServiceProperties() + .deleteRetentionPolicy(new RetentionPolicy().enabled(false))) + + sleep(30000) // Wait for the policy to take effect. + } + + + + /* + This method returns a stub of an HttpResponse. This is for when we want to test policies in isolation but don't care + about the status code, so we stub a response that always returns a given value for the status code. We never care + about the number or nature of interactions with this stub. + */ + + def getStubResponse(int code) { + return Stub(HttpResponse) { + statusCode() >> code + } + } + + /* + This is for stubbing responses that will actually go through the pipeline and autorest code. Autorest does not seem + to play too nicely with mocked objects and the complex reflection stuff on both ends made it more difficult to work + with than was worth it. + */ +// def getStubResponse(int code, Class responseHeadersType) { +// return new HttpResponse() { +// +// @Override +// int statusCode() { +// return code +// } +// +// @Override +// String headerValue(String s) { +// return null +// } +// +// @Override +// HttpHeaders headers() { +// return new HttpHeaders() +// } +// +// @Override +// Flux body() { +// return Flowable.empty() +// } +// +// @Override +// Mono bodyAsByteArray() { +// return null +// } +// +// @Override +// Mono bodyAsString() { +// return null +// } +// +// @Override +// Mono bodyAsString(Charset charset) { +// return null +// } +// +// @Override +// Object deserializedHeaders() { +// def headers = responseHeadersType.getConstructor().newInstance() +// +// // If the headers have an etag method, we need to set it to prevent postProcessResponse from breaking. +// try { +// headers.getClass().getMethod("withETag", String.class).invoke(headers, "etag"); +// } +// catch (NoSuchMethodException e) { +// // No op +// } +// return headers +// } +// +// @Override +// boolean isDecoded() { +// return true +// } +// } +// } + + /* + This is for stubbing responses that will actually go through the pipeline and autorest code. Autorest does not seem + to play too nicely with mocked objects and the complex reflection stuff on both ends made it more difficult to work + with than was worth it. Because this type is just for BlobDownload, we don't need to accept a header type. + */ +// def getStubResponseForBlobDownload(int code, Flux body, String etag) { +// return new HttpResponse() { +// +// @Override +// int statusCode() { +// return code +// } +// +// @Override +// String headerValue(String s) { +// return null +// } +// +// @Override +// HttpHeaders headers() { +// return new HttpHeaders() +// } +// +// @Override +// Flux body() { +// return body +// } +// +// @Override +// Mono bodyAsByteArray() { +// return null +// } +// +// @Override +// Mono bodyAsString() { +// return null +// } +// +// @Override +// Mono bodyAsString(Charset charset) { +// return null +// } +// +// @Override +// Object deserializedHeaders() { +// def headers = new BlobDownloadHeaders() +// headers.withETag(etag) +// return headers +// } +// +// @Override +// boolean isDecoded() { +// return true +// } +// } +// } + + def getContextStubPolicy(int successCode, Class responseHeadersType) { + return Mock(HttpPipelinePolicy) { + sendAsync(_) >> { HttpRequest request -> + if (!request.context().getData(defaultContextKey).isPresent()) { + return Mono.error(new RuntimeException("Context key not present.")) + } else { + return Mono.just(getStubResponse(successCode, responseHeadersType)) + } + } + } + } + + def getOAuthServiceURL() { + String tenantId = getEnvironmentVariable("MICROSOFT_AD_TENANT_ID"); + String servicePrincipalId = getEnvironmentVariable("ARM_CLIENTID"); + String servicePrincipalKey = getEnvironmentVariable("ARM_CLIENTKEY"); + + def authority = String.format("https://login.microsoftonline.com/%s/oauth2/token",tenantId); + def credential = new ClientCredential(servicePrincipalId, servicePrincipalKey) + def token = new AuthenticationContext(authority, false, Executors.newFixedThreadPool(1)).acquireToken("https://storage.azure.com", credential, null).get().accessToken + + return StorageClient.storageClientBuilder() + .endpoint(String.format("https://%s.blob.core.windows.net/", primaryCreds.accountName)) + .credentials(new TokenCredentials(token)) + .buildClient() + } + + def getTestMode(){ + String testMode = System.getenv("AZURE_TEST_MODE") + if(testMode == null){ + testMode = "PLAYBACK" + } + return testMode + } +} diff --git a/storage/client/src/test/java/com/azure/storage/blob/BlobAPITest.groovy b/storage/client/src/test/java/com/azure/storage/blob/BlobAPITest.groovy new file mode 100644 index 0000000000000..6897f369e7fed --- /dev/null +++ b/storage/client/src/test/java/com/azure/storage/blob/BlobAPITest.groovy @@ -0,0 +1,2039 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob + + +import com.azure.storage.blob.models.* +import spock.lang.Unroll + +import java.nio.ByteBuffer +import java.security.MessageDigest + +class BlobAPITest extends APISpec { + BlobClient bu + + def setup() { + bu = cu.getBlockBlobClient(generateBlobName()) + bu.upload(defaultInputStream.get(), defaultDataSize) + } + + def "Download all null"() { + when: + ByteArrayOutputStream stream = new ByteArrayOutputStream() + bu.download(stream) + ByteBuffer body = ByteBuffer.wrap(stream.toByteArray()) +// BlobDownloadHeaders headers = response.headers() + + then: + body == defaultData +// headers.metadata().isEmpty() +// headers.contentLength() != null +// headers.contentType() != null +// headers.contentRange() == null +// headers.contentMD5() != null +// headers.contentEncoding() == null +// headers.cacheControl() == null +// headers.contentDisposition() == null +// headers.contentLanguage() == null +// headers.blobSequenceNumber() == null +// headers.blobType() == BlobType.BLOCK_BLOB +// headers.copyCompletionTime() == null +// headers.copyStatusDescription() == null +// headers.copyId() == null +// headers.copyProgress() == null +// headers.copySource() == null +// headers.copyStatus() == null +// headers.leaseDuration() == null +// headers.leaseState() == LeaseStateType.AVAILABLE +// headers.leaseStatus() == LeaseStatusType.UNLOCKED +// headers.acceptRanges() == "bytes" +// headers.blobCommittedBlockCount() == null +// headers.serverEncrypted +// headers.blobContentMD5() == null + } + + def "Download empty file"() { + setup: + bu = cu.getAppendBlobClient("emptyAppendBlob") + bu.create() + + when: + def outStream = new ByteArrayOutputStream() + bu.download(outStream) + def result = outStream.toByteArray() + + then: + notThrown(StorageException) + result.length == 0 + } + + /* + This is to test the appropriate integration of DownloadResponse, including setting the correct range values on + HTTPGetterInfo. + */ + /*def "Download with retry range"() { + *//* + We are going to make a request for some range on a blob. The Flux returned will throw an exception, forcing + a retry per the ReliableDownloadOptions. The next request should have the same range header, which was generated + from the count and offset values in HTTPGetterInfo that was constructed on the initial call to download. We + don't need to check the data here, but we want to ensure that the correct range is set each time. This will + test the correction of a bug that was found which caused HTTPGetterInfo to have an incorrect offset when it was + constructed in BlobClient.download(). + *//* + setup: + def mockPolicy = Mock(HttpPipelinePolicy) { + process(_ as HttpPipelineCallContext, _ as HttpPipelineNextPolicy) >> { + HttpPipelineCallContext context, HttpPipelineNextPolicy next -> + HttpRequest request = context.httpRequest() + if (request.headers().value("x-ms-range") != "bytes=2-6") { + return Mono.error(new IllegalArgumentException("The range header was not set correctly on retry.")) + } + else { + // ETag can be a dummy value. It's not validated, but DownloadResponse requires one + // return Mono.just(getStubResponseForBlobDownload(206, Flux.error(new IOException()), "etag")) + } + } + } + + def pipeline = HttpPipeline.builder().policies(mockPolicy).build() + bu = bu.withPipeline(pipeline) + + when: + def range = new BlobRange().withOffset(2).withCount(5) + def options = new ReliableDownloadOptions().withMaxRetryRequests(3) + bu.download(null, options, range, null, false, null) + + then: + *//* + Because the dummy Flux always throws an error. This will also validate that an IllegalArgumentException is + NOT thrown because the types would not match. + *//* + def e = thrown(RuntimeException) + e.getCause() instanceof IOException + }*/ + + def "Download min"() { + when: + def outStream = new ByteArrayOutputStream() + bu.download(outStream) + byte[] result = outStream.toByteArray() + + then: + result == defaultData.array() + } + + @Unroll + def "Download range"() { + setup: + BlobRange range = new BlobRange().offset(offset).count(count) + + when: + def outStream = new ByteArrayOutputStream() + bu.download(outStream, null, range, null, false, null) + String bodyStr = outStream.toString() + + then: + bodyStr == expectedData + + where: + offset | count || expectedData + 0 | null || defaultText + 0 | 5 || defaultText.substring(0, 5) + 3 | 2 || defaultText.substring(3, 3 + 2) + } + + /*@Unroll + def "Download AC"() { + setup: + match = setupBlobMatchCondition(bu, match) + leaseID = setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions() + .withLeaseAccessConditions(new LeaseAccessConditions().leaseId(leaseID)) + .withModifiedAccessConditions(new ModifiedAccessConditions().ifModifiedSince(modified) + .ifUnmodifiedSince(unmodified) + .ifMatch(match) + .ifNoneMatch(noneMatch)) + + then: + bu.download(null, null, null, bac, false, null).statusCode() == 200 + + where: + modified | unmodified | match | noneMatch | leaseID + null | null | null | null | null + oldDate | null | null | null | null + null | newDate | null | null | null + null | null | receivedEtag | null | null + null | null | null | garbageEtag | null + null | null | null | null | receivedLeaseID + }*/ + + /*@Unroll + def "Download AC fail"() { + setup: + noneMatch = setupBlobMatchCondition(bu, noneMatch) + setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions() + .withLeaseAccessConditions(new LeaseAccessConditions().leaseId(leaseID)) + .withModifiedAccessConditions(new ModifiedAccessConditions() + .ifModifiedSince(modified) + .ifUnmodifiedSince(unmodified) + .ifMatch(match) + .ifNoneMatch(noneMatch)) + + then: + bu.download(null, null, null, bac, false, null).statusCode() == 206 + + where: + modified | unmodified | match | noneMatch | leaseID + newDate | null | null | null | null + null | oldDate | null | null | null + null | null | garbageEtag | null | null + null | null | null | receivedEtag | null + null | null | null | null | garbageLeaseID + }*/ + + /*def "Download md5"() { + expect: + bu.download(null, null, new BlobRange().withOffset(0).withCount(3), null, true, null) + .deserializedHeaders().contentMD5() == + MessageDigest.getInstance("MD5").digest(defaultText.substring(0, 3).getBytes()) + }*/ + + def "Download error"() { + setup: + bu = cu.getBlockBlobClient(generateBlobName()) + + when: + bu.download(null, null, null, null, false, null) + + then: + thrown(StorageException) + } + + /*def "Download context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(206, BlobDownloadHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + bu.download(null) + + then: + notThrown(RuntimeException) + }*/ + + def "Get properties default"() { + when: + BlobGetPropertiesHeaders headers = bu.getProperties(null, null) + + then: + validateBasicHeaders(headers) + headers.metadata().isEmpty() + headers.blobType() == BlobType.BLOCK_BLOB + headers.copyCompletionTime() == null // tested in "copy" + headers.copyStatusDescription() == null // only returned when the service has errors; cannot validate. + headers.copyId() == null // tested in "abort copy" + headers.copyProgress() == null // tested in "copy" + headers.copySource() == null // tested in "copy" + headers.copyStatus() == null // tested in "copy" + headers.isIncrementalCopy() == null // tested in PageBlob."start incremental copy" + headers.destinationSnapshot() == null // tested in PageBlob."start incremental copy" + headers.leaseDuration() == null // tested in "acquire lease" + headers.leaseState() == LeaseStateType.AVAILABLE + headers.leaseStatus() == LeaseStatusType.UNLOCKED + headers.contentLength() != null + headers.contentType() != null + headers.contentMD5() != null + headers.contentEncoding() == null // tested in "set HTTP headers" + headers.contentDisposition() == null // tested in "set HTTP headers" + headers.contentLanguage() == null // tested in "set HTTP headers" + headers.cacheControl() == null // tested in "set HTTP headers" + headers.blobSequenceNumber() == null // tested in PageBlob."create sequence number" + headers.acceptRanges() == "bytes" + headers.blobCommittedBlockCount() == null // tested in AppendBlob."append block" + headers.isServerEncrypted() + headers.accessTier() == AccessTier.HOT.toString() + headers.accessTierInferred() + headers.archiveStatus() == null + headers.creationTime() != null + } + + /*def "Get properties min"() { + expect: + bu.getProperties().blockingGet().statusCode() == 200 + }*/ + + @Unroll + def "Get properties AC"() { + setup: + match = setupBlobMatchCondition(bu, match) + leaseID = setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions() + .withLeaseAccessConditions(new LeaseAccessConditions().leaseId(leaseID)) + .withModifiedAccessConditions(new ModifiedAccessConditions() + .ifModifiedSince(modified) + .ifUnmodifiedSince(unmodified) + .ifMatch(match) + .ifNoneMatch(noneMatch)) + + expect: + bu.getProperties(bac, null) //.blockingGet().statusCode() == 200 + + where: + modified | unmodified | match | noneMatch | leaseID + null | null | null | null | null + oldDate | null | null | null | null + null | newDate | null | null | null + null | null | receivedEtag | null | null + null | null | null | garbageEtag | null + null | null | null | null | receivedLeaseID + } + + /*@Unroll + def "Get properties AC fail"() { + setup: + noneMatch = setupBlobMatchCondition(bu, noneMatch) + setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions() + .withLeaseAccessConditions(new LeaseAccessConditions().leaseId(leaseID)) + .withModifiedAccessConditions(new ModifiedAccessConditions() + .ifModifiedSince(modified) + .ifUnmodifiedSince(unmodified) + .ifMatch(match) + .ifNoneMatch(noneMatch)) + + when: + bu.getProperties(bac, null) + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch | leaseID + newDate | null | null | null | null + null | oldDate | null | null | null + null | null | garbageEtag | null | null + null | null | null | receivedEtag | null + null | null | null | null | garbageLeaseID + }*/ + + /*def "Get properties error"() { + setup: + bu = cu.getBlockBlobClient(generateBlobName()) + + when: + bu.getProperties(null, null) + + then: + thrown(StorageException) + }*/ + + /*def "Get properties context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(200, BlobGetPropertiesHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + bu.getProperties() + + then: + notThrown(RuntimeException) + }*/ + + /*def "Set HTTP headers null"() { + setup: + BlobsSetHTTPHeadersResponse response = bu.setHTTPHeaders(null, null, null) + + expect: + response.statusCode() == 200 + validateBasicHeaders(response.headers()) + response.deserializedHeaders().blobSequenceNumber() == null + }*/ + + def "Set HTTP headers min"() { + when: + bu.setHTTPHeaders(new BlobHTTPHeaders().blobContentType("type")) + + then: + bu.getProperties().contentType() == "type" + } + + @Unroll + def "Set HTTP headers headers"() { + setup: + BlobHTTPHeaders putHeaders = new BlobHTTPHeaders().blobCacheControl(cacheControl) + .blobContentDisposition(contentDisposition) + .blobContentEncoding(contentEncoding) + .blobContentLanguage(contentLanguage) + .blobContentMD5(contentMD5) + .blobContentType(contentType) + bu.setHTTPHeaders(putHeaders, null, null) + + BlobGetPropertiesHeaders receivedHeaders = bu.getProperties(null, null) + + expect: + validateBlobHeaders(receivedHeaders, cacheControl, contentDisposition, contentEncoding, contentLanguage, + contentMD5, contentType) + + where: + cacheControl | contentDisposition | contentEncoding | contentLanguage | contentMD5 | contentType + null | null | null | null | null | null + "control" | "disposition" | "encoding" | "language" | Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(defaultData.array())) | "type" + + } + + + @Unroll + def "Set HTTP headers AC"() { + setup: + match = setupBlobMatchCondition(bu, match) + leaseID = setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions() + .withLeaseAccessConditions(new LeaseAccessConditions().leaseId(leaseID)) + .withModifiedAccessConditions(new ModifiedAccessConditions() + .ifModifiedSince(modified) + .ifUnmodifiedSince(unmodified) + .ifMatch(match) + .ifNoneMatch(noneMatch)) + + expect: + bu.setHTTPHeaders(null, bac, null) //.blockingGet().statusCode() == 200 + + where: + modified | unmodified | match | noneMatch | leaseID + null | null | null | null | null + oldDate | null | null | null | null + null | newDate | null | null | null + null | null | receivedEtag | null | null + null | null | null | garbageEtag | null + null | null | null | null | receivedLeaseID + } + + @Unroll + def "Set HTTP headers AC fail"() { + setup: + noneMatch = setupBlobMatchCondition(bu, noneMatch) + setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions() + .withLeaseAccessConditions(new LeaseAccessConditions().leaseId(leaseID)) + .withModifiedAccessConditions(new ModifiedAccessConditions() + .ifModifiedSince(modified) + .ifUnmodifiedSince(unmodified) + .ifMatch(match) + .ifNoneMatch(noneMatch)) + + when: + bu.setHTTPHeaders(null, bac, null) + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch | leaseID + newDate | null | null | null | null + null | oldDate | null | null | null + null | null | garbageEtag | null | null + null | null | null | receivedEtag | null + null | null | null | null | garbageLeaseID + } + + def "Set HTTP headers error"() { + setup: + bu = cu.getBlockBlobClient(generateBlobName()) + + when: + bu.setHTTPHeaders(null, null, null) + + then: + thrown(StorageException) + } + + /*def "Set HTTP headers context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(200, BlobSetHTTPHeadersHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + bu.setHTTPHeaders(null) + + then: + notThrown(RuntimeException) + }*/ + + /*def "Set metadata all null"() { + setup: + BlobsSetMetadataResponse response = bu.setMetadata(null, null, null) + + expect: + bu.getProperties(null, null).metadata().size() == 0 + response.statusCode() == 200 + validateBasicHeaders(response.headers()) + response.deserializedHeaders().isServerEncrypted() + }*/ + + def "Set metadata min"() { + setup: + Metadata metadata = new Metadata() + metadata.put("foo", "bar") + + when: + bu.setMetadata(metadata) + + then: + bu.getProperties().metadata() == metadata + } + + @Unroll + def "Set metadata metadata"() { + setup: + Metadata metadata = new Metadata() + if (key1 != null && value1 != null) { + metadata.put(key1, value1) + } + if (key2 != null && value2 != null) { + metadata.put(key2, value2) + } + + expect: + bu.setMetadata(metadata, null, null) //.blockingGet().statusCode() == statusCode + bu.getProperties(null, null).metadata() == metadata + + where: + key1 | value1 | key2 | value2 || statusCode + null | null | null | null || 200 + "foo" | "bar" | "fizz" | "buzz" || 200 + } + + @Unroll + def "Set metadata AC"() { + setup: + match = setupBlobMatchCondition(bu, match) + leaseID = setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions() + .withLeaseAccessConditions(new LeaseAccessConditions().leaseId(leaseID)) + .withModifiedAccessConditions(new ModifiedAccessConditions() + .ifModifiedSince(modified) + .ifUnmodifiedSince(unmodified) + .ifMatch(match) + .ifNoneMatch(noneMatch)) + + expect: + bu.setMetadata(null, bac, null) //.blockingGet().statusCode() == 200 + + where: + modified | unmodified | match | noneMatch | leaseID + null | null | null | null | null + oldDate | null | null | null | null + null | newDate | null | null | null + null | null | receivedEtag | null | null + null | null | null | garbageEtag | null + null | null | null | null | receivedLeaseID + } + + @Unroll + def "Set metadata AC fail"() { + setup: + noneMatch = setupBlobMatchCondition(bu, noneMatch) + setupBlobLeaseCondition(bu, leaseID) + + BlobAccessConditions bac = new BlobAccessConditions() + .withLeaseAccessConditions(new LeaseAccessConditions().leaseId(leaseID)) + .withModifiedAccessConditions(new ModifiedAccessConditions() + .ifModifiedSince(modified) + .ifUnmodifiedSince(unmodified) + .ifMatch(match) + .ifNoneMatch(noneMatch)) + + when: + bu.setMetadata(null, bac, null) + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch | leaseID + newDate | null | null | null | null + null | oldDate | null | null | null + null | null | garbageEtag | null | null + null | null | null | receivedEtag | null + null | null | null | null | garbageLeaseID + } + + def "Set metadata error"() { + setup: + bu = cu.getBlockBlobClient(generateBlobName()) + + when: + bu.setMetadata(null, null, null) + + then: + thrown(StorageException) + } + + /*def "Set metadata context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(200, BlobSetMetadataHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + bu.setMetadata(null) + + then: + notThrown(RuntimeException) + }*/ + + @Unroll + def "Acquire lease"() { + setup: + /*BlobAcquireLeaseHeaders*/ String leaseId = bu.acquireLease(proposedID, leaseTime, null, null) + + when: + BlobGetPropertiesHeaders properties = bu.getProperties(null, null) + + then: + properties.leaseState() == leaseState + properties.leaseDuration() == leaseDuration + leaseId != null + //validateBasicHeaders(headers) + + where: + proposedID | leaseTime || leaseState | leaseDuration + null | -1 || LeaseStateType.LEASED | LeaseDurationType.INFINITE + null | 25 || LeaseStateType.LEASED | LeaseDurationType.FIXED + UUID.randomUUID().toString() | -1 || LeaseStateType.LEASED | LeaseDurationType.INFINITE + } + + /*def "Acquire lease min"() { + setup: + bu.acquireLease(null, -1).blockingGet().statusCode() == 201 + }*/ + + @Unroll + def "Acquire lease AC"() { + setup: + match = setupBlobMatchCondition(bu, match) + def mac = new ModifiedAccessConditions().ifModifiedSince(modified).ifUnmodifiedSince(unmodified) + .ifMatch(match).ifNoneMatch(noneMatch) + + expect: + bu.acquireLease(null, -1, mac, null) //.blockingGet().statusCode() == 201 + + where: + modified | unmodified | match | noneMatch + null | null | null | null + oldDate | null | null | null + null | newDate | null | null + null | null | receivedEtag | null + null | null | null | garbageEtag + } + + @Unroll + def "Acquire lease AC fail"() { + setup: + noneMatch = setupBlobMatchCondition(bu, noneMatch) + def mac = new ModifiedAccessConditions() + .ifModifiedSince(modified) + .ifUnmodifiedSince(unmodified) + .ifMatch(match) + .ifNoneMatch(noneMatch) + + when: + bu.acquireLease(null, -1, mac, null) + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch + newDate | null | null | null + null | oldDate | null | null + null | null | garbageEtag | null + null | null | null | receivedEtag + } + + def "Acquire lease error"() { + setup: + bu = cu.getBlockBlobClient(generateBlobName()) + + when: + bu.acquireLease(null, 20, null, null) + + then: + thrown(StorageException) + } + + /*def "Acquire lease context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(201, BlobAcquireLeaseHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + bu.acquireLease(null, 20) + + then: + notThrown(RuntimeException) + }*/ + + def "Renew lease"() { + setup: + String leaseID = setupBlobLeaseCondition(bu, receivedLeaseID) + + Thread.sleep(16000) // Wait for the lease to expire to ensure we are actually renewing it + /*BlobRenewLeaseHeaders*/ String leaseId = bu.renewLease(leaseID, null, null) + + expect: + bu.getProperties(null, null).leaseState() == LeaseStateType.LEASED + //validateBasicHeaders(headers) + leaseId != null + } + + /*def "Renew lease min"() { + setup: + String leaseID = setupBlobLeaseCondition(bu, receivedLeaseID) + + expect: + bu.renewLease(leaseID).blockingGet().statusCode() == 200 + }*/ + + @Unroll + def "Renew lease AC"() { + setup: + match = setupBlobMatchCondition(bu, match) + String leaseID = setupBlobLeaseCondition(bu, receivedLeaseID) + def mac = new ModifiedAccessConditions() + .ifModifiedSince(modified) + .ifUnmodifiedSince(unmodified) + .ifMatch(match) + .ifNoneMatch(noneMatch) + + expect: + bu.renewLease(leaseID, mac, null) //.blockingGet().statusCode() == 200 + + where: + modified | unmodified | match | noneMatch + null | null | null | null + oldDate | null | null | null + null | newDate | null | null + null | null | receivedEtag | null + null | null | null | garbageEtag + } + + @Unroll + def "Renew lease AC fail"() { + noneMatch = setupBlobMatchCondition(bu, noneMatch) + String leaseID = setupBlobLeaseCondition(bu, receivedLeaseID) + def mac = new ModifiedAccessConditions() + .ifModifiedSince(modified) + .ifUnmodifiedSince(unmodified) + .ifMatch(match) + .ifNoneMatch(noneMatch) + + when: + bu.renewLease(leaseID, mac, null) + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch + newDate | null | null | null + null | oldDate | null | null + null | null | garbageEtag | null + null | null | null | receivedEtag + } + + def "Renew lease error"() { + setup: + bu = cu.getBlockBlobClient(generateBlobName()) + + when: + bu.renewLease("id", null, null) + + then: + thrown(StorageException) + } + + /*def "Renew lease context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(200, BlobRenewLeaseHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.renewLease("id") + + then: + notThrown(RuntimeException) + }*/ + + /*def "Release lease"() { + setup: + String leaseID = setupBlobLeaseCondition(bu, receivedLeaseID) + + BlobReleaseLeaseHeaders headers = bu.releaseLease(leaseID, null, null) + + expect: + bu.getProperties(null, null).leaseState() == LeaseStateType.AVAILABLE + validateBasicHeaders(headers) + }*/ + + def "Release lease min"() { + setup: + String leaseID = setupBlobLeaseCondition(bu, receivedLeaseID) + + expect: + bu.releaseLease(leaseID) //.blockingGet().statusCode() == 200 + } + + @Unroll + def "Release lease AC"() { + setup: + match = setupBlobMatchCondition(bu, match) + String leaseID = setupBlobLeaseCondition(bu, receivedLeaseID) + def mac = new ModifiedAccessConditions() + .ifModifiedSince(modified) + .ifUnmodifiedSince(unmodified) + .ifMatch(match) + .ifNoneMatch(noneMatch) + + expect: + bu.releaseLease(leaseID, mac, null) //.blockingGet().statusCode() == 200 + + where: + modified | unmodified | match | noneMatch + null | null | null | null + oldDate | null | null | null + null | newDate | null | null + null | null | receivedEtag | null + null | null | null | garbageEtag + } + + @Unroll + def "Release lease AC fail"() { + setup: + noneMatch = setupBlobMatchCondition(bu, noneMatch) + String leaseID = setupBlobLeaseCondition(bu, receivedLeaseID) + def mac = new ModifiedAccessConditions() + .ifModifiedSince(modified) + .ifUnmodifiedSince(unmodified) + .ifMatch(match) + .ifNoneMatch(noneMatch) + + when: + bu.releaseLease(leaseID, mac, null) + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch + newDate | null | null | null + null | oldDate | null | null + null | null | garbageEtag | null + null | null | null | receivedEtag + } + + def "Release lease error"() { + setup: + bu = cu.getBlockBlobClient(generateBlobName()) + + when: + bu.releaseLease("id", null, null) + + then: + thrown(StorageException) + } + + /*def "Release lease context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(200, BlobReleaseLeaseHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.releaseLease("id") + + then: + notThrown(RuntimeException) + }*/ + + @Unroll + def "Break lease"() { + setup: + bu.acquireLease(UUID.randomUUID().toString(), leaseTime, null, null) + + /*BlobBreakLeaseHeaders*/ int responseLeaseTime = bu.breakLease(breakPeriod, null, null) + LeaseStateType state = bu.getProperties(null, null).leaseState() + + expect: + state == LeaseStateType.BROKEN || state == LeaseStateType.BREAKING + responseLeaseTime <= remainingTime + //validateBasicHeaders(headers) + + where: + leaseTime | breakPeriod | remainingTime + -1 | null | 0 + -1 | 20 | 25 + 20 | 15 | 16 + } + + /*def "Break lease min"() { + setup: + setupBlobLeaseCondition(bu, receivedLeaseID) + + + then: + bu.breakLease().statusCode() == 202 + }*/ + + /*@Unroll + def "Break lease AC"() { + setup: + match = setupBlobMatchCondition(bu, match) + setupBlobLeaseCondition(bu, receivedLeaseID) + def mac = new ModifiedAccessConditions() + .ifModifiedSince(modified) + .ifUnmodifiedSince(unmodified) + .ifMatch(match) + .ifNoneMatch(noneMatch) + + then: + bu.breakLease(null, mac, null).statusCode() == 202 + + where: + modified | unmodified | match | noneMatch + null | null | null | null + oldDate | null | null | null + null | newDate | null | null + null | null | receivedEtag | null + null | null | null | garbageEtag + }*/ + + @Unroll + def "Break lease AC fail"() { + setup: + noneMatch = setupBlobMatchCondition(bu, noneMatch) + setupBlobLeaseCondition(bu, receivedLeaseID) + def mac = new ModifiedAccessConditions() + .ifModifiedSince(modified) + .ifUnmodifiedSince(unmodified) + .ifMatch(match) + .ifNoneMatch(noneMatch) + + when: + bu.breakLease(null, mac, null) + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch + newDate | null | null | null + null | oldDate | null | null + null | null | garbageEtag | null + null | null | null | receivedEtag + } + + def "Break lease error"() { + setup: + bu = cu.getBlockBlobClient(generateBlobName()) + + when: + bu.breakLease(null, null, null) + + then: + thrown(StorageException) + } + + /*def "Break lease context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(202, BlobBreakLeaseHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.breakLease(18, null, null) + + then: + notThrown(RuntimeException) + }*/ + + def "Change lease"() { + setup: + String leaseID = bu.acquireLease(UUID.randomUUID().toString(), 15) + leaseID = bu.changeLease(leaseID, UUID.randomUUID().toString()) + + expect: + bu.releaseLease(leaseID, null, null) //.blockingGet().statusCode() == 200 + //validateBasicHeaders(headers) + } + + def "Change lease min"() { + setup: + def leaseID = setupBlobLeaseCondition(bu, receivedLeaseID) + + expect: + bu.changeLease(leaseID, UUID.randomUUID().toString()) //.blockingGet().statusCode() == 200 + } + + @Unroll + def "Change lease AC"() { + setup: + match = setupBlobMatchCondition(bu, match) + String leaseID = setupBlobLeaseCondition(bu, receivedLeaseID) + def mac = new ModifiedAccessConditions() + .ifModifiedSince(modified) + .ifUnmodifiedSince(unmodified) + .ifMatch(match) + .ifNoneMatch(noneMatch) + + expect: + bu.changeLease(leaseID, UUID.randomUUID().toString(), mac, null) //.blockingGet().statusCode() == 200 + + where: + modified | unmodified | match | noneMatch + null | null | null | null + oldDate | null | null | null + null | newDate | null | null + null | null | receivedEtag | null + null | null | null | garbageEtag + } + + @Unroll + def "Change lease AC fail"() { + setup: + noneMatch = setupBlobMatchCondition(bu, noneMatch) + String leaseID = setupBlobLeaseCondition(bu, receivedLeaseID) + def mac = new ModifiedAccessConditions() + .ifModifiedSince(modified) + .ifUnmodifiedSince(unmodified) + .ifMatch(match) + .ifNoneMatch(noneMatch) + + when: + bu.changeLease(leaseID, UUID.randomUUID().toString(), mac, null) + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch + newDate | null | null | null + null | oldDate | null | null + null | null | garbageEtag | null + null | null | null | receivedEtag + } + + def "Change lease error"() { + setup: + bu = cu.getBlockBlobClient(generateBlobName()) + + when: + bu.changeLease("id", "id", null, null) + + then: + thrown(StorageException) + } + + /*def "Change lease context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(200, BlobChangeLeaseHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.changeLease("id", "newId") + + then: + notThrown(RuntimeException) + }*/ + + /*def "Snapshot"() { + when: + String snapshot = bu.createSnapshot(null, null, null) + + then: + bu.withSnapshot(snapshot).getProperties(null, null).blockingGet().statusCode() == 200 + validateBasicHeaders(headers) + }*/ + + /*def "Snapshot min"() { + expect: + bu.createSnapshot().blockingGet().statusCode() == 201 + }*/ + + /*@Unroll + def "Snapshot metadata"() { + setup: + Metadata metadata = new Metadata() + if (key1 != null && value1 != null) { + metadata.put(key1, value1) + } + if (key2 != null && value2 != null) { + metadata.put(key2, value2) + } + + BlobsCreateSnapshotResponse response = bu.createSnapshot(metadata, null, null) + + expect: + response.statusCode() == 201 + bu.withSnapshot(response.headers().snapshot()) + .getProperties(null, null).blockingGet().headers().metadata() == metadata + + where: + key1 | value1 | key2 | value2 + null | null | null | null + "foo" | "bar" | "fizz" | "buzz" + }*/ + + /*@Unroll + def "Snapshot AC"() { + setup: + match = setupBlobMatchCondition(bu, match) + leaseID = setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().ifModifiedSince(modified).ifUnmodifiedSince(unmodified) + .ifMatch(match).ifNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().leaseId(leaseID)) + + expect: + bu.createSnapshot(null, bac, null).blockingGet().statusCode() == 201 + + where: + modified | unmodified | match | noneMatch | leaseID + null | null | null | null | null + oldDate | null | null | null | null + null | newDate | null | null | null + null | null | receivedEtag | null | null + null | null | null | garbageEtag | null + null | null | null | null | receivedLeaseID + }*/ + + @Unroll + def "Snapshot AC fail"() { + setup: + noneMatch = setupBlobMatchCondition(bu, noneMatch) + setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions() + .withLeaseAccessConditions(new LeaseAccessConditions().leaseId(leaseID)) + .withModifiedAccessConditions(new ModifiedAccessConditions() + .ifModifiedSince(modified) + .ifUnmodifiedSince(unmodified) + .ifMatch(match) + .ifNoneMatch(noneMatch)) + + + when: + bu.createSnapshot(null, bac, null) + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch | leaseID + newDate | null | null | null | null + null | oldDate | null | null | null + null | null | garbageEtag | null | null + null | null | null | receivedEtag | null + null | null | null | null | garbageLeaseID + } + + def "Snapshot error"() { + setup: + bu = cu.getBlockBlobClient(generateBlobName()) + + when: + bu.createSnapshot(null, null, null) + + then: + thrown(StorageException) + } + + /*def "Snapshot context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(201, BlobCreateSnapshotHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.createSnapshot() + + then: + notThrown(RuntimeException) + }*/ + + /*def "Copy"() { + setup: + BlobClient bu2 = cu.getBlockBlobClient(generateBlobName()) + BlobStartCopyFromURLHeaders headers = + bu2.startCopyFromURL(bu.toURL(), null, null, null, null) + + when: + while (bu2.getProperties(null, null).blockingGet().headers().copyStatus() == CopyStatusType.PENDING) { + sleep(1000) + } + BlobGetPropertiesHeaders headers2 = bu2.getProperties(null, null).blockingGet().headers() + + then: + headers2.copyStatus() == CopyStatusType.SUCCESS + headers2.copyCompletionTime() != null + headers2.copyProgress() != null + headers2.copySource() != null + validateBasicHeaders(headers) + headers.copyId() != null + }*/ + + /*def "Copy min"() { + expect: + bu.startCopyFromURL(bu.toURL()).blockingGet().statusCode() == 202 + }*/ + + /*@Unroll + def "Copy metadata"() { + setup: + BlobClient bu2 = cu.getBlockBlobClient(generateBlobName()) + Metadata metadata = new Metadata() + if (key1 != null && value1 != null) { + metadata.put(key1, value1) + } + if (key2 != null && value2 != null) { + metadata.put(key2, value2) + } + + BlobsStartCopyFromURLResponse response = + bu2.startCopyFromURL(bu.toURL(), metadata, null, null, null) + waitForCopy(bu2, response.deserializedHeaders().copyStatus()) + + expect: + bu2.getProperties().metadata() == metadata + + where: + key1 | value1 | key2 | value2 + null | null | null | null + "foo" | "bar" | "fizz" | "buzz" + }*/ + + /*@Unroll + def "Copy source AC"() { + setup: + BlobClient bu2 = cu.getBlockBlobClient(generateBlobName()) + match = setupBlobMatchCondition(bu, match) + def mac = new ModifiedAccessConditions().ifModifiedSince(modified).ifUnmodifiedSince(unmodified) + .ifMatch(match).ifNoneMatch(noneMatch) + + expect: + bu2.startCopyFromURL(bu.toURL(), null, mac, null, null).blockingGet().statusCode() == 202 + + where: + modified | unmodified | match | noneMatch + null | null | null | null + oldDate | null | null | null + null | newDate | null | null + null | null | receivedEtag | null + null | null | null | garbageEtag + }*/ + + /*@Unroll + def "Copy source AC fail"() { + setup: + BlobClient bu2 = cu.getBlockBlobClient(generateBlobName()) + noneMatch = setupBlobMatchCondition(bu, noneMatch) + def mac = new ModifiedAccessConditions().ifModifiedSince(modified).ifUnmodifiedSince(unmodified) + .ifMatch(match).ifNoneMatch(noneMatch) + + when: + bu2.startCopyFromURL(bu.toURL(), null, mac, null, null) + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch + newDate | null | null | null + null | oldDate | null | null + null | null | garbageEtag | null + null | null | null | receivedEtag + }*/ + + /*@Unroll + def "Copy dest AC"() { + setup: + BlobClient bu2 = cu.getBlockBlobClient(generateBlobName()) + bu2.upload(defaultFlux, defaultDataSize) + match = setupBlobMatchCondition(bu2, match) + leaseID = setupBlobLeaseCondition(bu2, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().ifModifiedSince(modified).ifUnmodifiedSince(unmodified) + .ifMatch(match).ifNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().leaseId(leaseID)) + + expect: + bu2.startCopyFromURL(bu.toURL(), null, null, bac, null) + .blockingGet().statusCode() == 202 + + where: + modified | unmodified | match | noneMatch | leaseID + null | null | null | null | null + oldDate | null | null | null | null + null | newDate | null | null | null + null | null | receivedEtag | null | null + null | null | null | garbageEtag | null + null | null | null | null | receivedLeaseID + }*/ + + /*@Unroll + def "Copy dest AC fail"() { + setup: + BlobClient bu2 = cu.getBlockBlobClient(generateBlobName()) + bu2.upload(defaultFlux, defaultDataSize) + noneMatch = setupBlobMatchCondition(bu2, noneMatch) + setupBlobLeaseCondition(bu2, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().ifModifiedSince(modified).ifUnmodifiedSince(unmodified) + .ifMatch(match).ifNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().leaseId(leaseID)) + + when: + bu2.startCopyFromURL(bu.toURL(), null, null, bac, null) + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch | leaseID + newDate | null | null | null | null + null | oldDate | null | null | null + null | null | garbageEtag | null | null + null | null | null | receivedEtag | null + null | null | null | null | garbageLeaseID + }*/ + + /*def "Abort copy lease fail"() { + // Data has to be large enough and copied between accounts to give us enough time to abort + ByteBuffer data = getRandomData(8 * 1024 * 1024) + bu.toBlockBlobClient() + .upload(Flux.just(data), 8 * 1024 * 1024, null, null, null, null) + .blockingGet() + // So we don't have to create a SAS. + cu.setAccessPolicy(PublicAccessType.BLOB) + + ContainerClient cu2 = alternateServiceURL.getContainerClient(generateBlobName()) + cu2.create(null, null, null) + BlockBlobClient bu2 = cu2.getBlockBlobClient(generateBlobName()) + bu2.upload(defaultFlux, defaultDataSize) + String leaseID = setupBlobLeaseCondition(bu2, receivedLeaseID) + + when: + String copyID = + bu2.startCopyFromURL(bu.toURL(), null, null, + new BlobAccessConditions().withLeaseAccessConditions(new LeaseAccessConditions() + .leaseId(leaseID)), null) + bu2.abortCopyFromURL(copyID, new LeaseAccessConditions().leaseId(garbageLeaseID), null) + + then: + def e = thrown(StorageException) + e.statusCode() == 412 + cu2.delete(null, null).blockingGet() + }*/ + + /*def "Copy context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(202, BlobStartCopyFromURLHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.startCopyFromURL(new URL("http://www.example.com")) + + then: + notThrown(RuntimeException) + }*/ + + /*def "Abort copy"() { + setup: + // Data has to be large enough and copied between accounts to give us enough time to abort + ByteBuffer data = getRandomData(8 * 1024 * 1024) + + bu.asBlockBlobClient().upload(new ByteArrayInputStream(data.array()), 8 * 1024 * 1024) + // So we don't have to create a SAS. + cu.setAccessPolicy(PublicAccessType.BLOB, null) + + ContainerClient cu2 = alternateServiceURL.getContainerClient(generateBlobName()) + cu2.create() + BlobClient bu2 = cu2.getBlobClient(generateBlobName()) + + when: + String copyID = bu2.startCopyFromURL(bu.toURL()) + BlobsAbortCopyFromURLResponse response = bu2.abortCopyFromURL(copyID) + BlobAbortCopyFromURLHeaders headers = response.deserializedHeaders() + + then: + response.statusCode() == 204 + headers.requestId() != null + headers.version() != null + headers.dateProperty() != null + // Normal test cleanup will not clean up containers in the alternate account. + cu2.delete() //.blockingGet().statusCode() == 202 + }*/ + + /*def "Abort copy min"() { + setup: + // Data has to be large enough and copied between accounts to give us enough time to abort + ByteBuffer data = getRandomData(8 * 1024 * 1024) + bu.asBlockBlobClient().upload(Flux.just(data), 8 * 1024 * 1024) + // So we don't have to create a SAS. + cu.setAccessPolicy(PublicAccessType.BLOB, null) + + ContainerClient cu2 = alternateServiceURL.getContainerClient(generateBlobName()) + cu2.create() + BlobClient bu2 = cu2.getBlobClient(generateBlobName()) + + when: + String copyID = + bu2.startCopyFromURL(bu.toURL(), null, null, null, null) + .blockingGet().headers().copyId() + + then: + bu2.abortCopyFromURL(copyID).blockingGet().statusCode() == 204 + }*/ + + /*def "Abort copy lease"() { + setup: + // Data has to be large enough and copied between accounts to give us enough time to abort + ByteBuffer data = getRandomData(8 * 1024 * 1024) + bu.asBlockBlobClient() + .upload(Flux.just(data), 8 * 1024 * 1024) + // So we don't have to create a SAS. + cu.setAccessPolicy(PublicAccessType.BLOB, null) + + ContainerClient cu2 = alternateServiceURL.getContainerClient(generateBlobName()) + cu2.create() + BlockBlobClient bu2 = cu2.getBlockBlobClient(generateBlobName()) + bu2.upload(defaultFlux, defaultDataSize) + String leaseID = setupBlobLeaseCondition(bu2, receivedLeaseID) + + when: + String copyID = + bu2.startCopyFromURL(bu.toURL(), null, null, + new BlobAccessConditions().withLeaseAccessConditions(new LeaseAccessConditions() + .leaseId(leaseID)), null) + + then: + bu2.abortCopyFromURL(copyID, new LeaseAccessConditions().withLeaseId(leaseID), null) + .blockingGet().statusCode() == 204 + // Normal test cleanup will not clean up containers in the alternate account. + cu2.delete(null, null).blockingGet() + }*/ + + def "Copy error"() { + setup: + bu = cu.getBlockBlobClient(generateBlobName()) + + when: + bu.startCopyFromURL(new URL("http://www.error.com")) + + then: + thrown(StorageException) + } + + def "Abort copy error"() { + setup: + bu = cu.getBlockBlobClient(generateBlobName()) + + when: + bu.abortCopyFromURL("id", null, null) + + then: + thrown(StorageException) + } + + /*def "Abort copy context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(204, BlobAbortCopyFromURLHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.abortCopyFromURL("id") + + then: + notThrown(RuntimeException) + }*/ + + /*def "Sync copy"() { + setup: + // Sync copy is a deep copy, which requires either sas or public access. + cu.setAccessPolicy(PublicAccessType.CONTAINER, null) + BlobClient bu2 = cu.getBlockBlobClient(generateBlobName()) + def headers = bu2.syncCopyFromURL(bu.toURL(), null, null,null, null).blockingGet().headers() + + expect: + headers.copyStatus() == SyncCopyStatusType.SUCCESS + headers.copyId() != null + validateBasicHeaders(headers) + }*/ + + /*def "Sync copy min"() { + setup: + cu.setAccessPolicy(PublicAccessType.CONTAINER, null) + BlobClient bu2 = cu.getBlockBlobClient(generateBlobName()) + + expect: + bu2.syncCopyFromURL(bu.toURL()).blockingGet().statusCode() == 202 + }*/ + + /*@Unroll + def "Sync copy metadata"() { + setup: + cu.setAccessPolicy(PublicAccessType.CONTAINER, null) + BlobClient bu2 = cu.getBlockBlobClient(generateBlobName()) + Metadata metadata = new Metadata() + if (key1 != null && value1 != null) { + metadata.put(key1, value1) + } + if (key2 != null && value2 != null) { + metadata.put(key2, value2) + } + + when: + bu2.syncCopyFromURL(bu.toURL(), metadata, null, null, null).blockingGet() + + then: + bu2.getProperties().blockingGet().headers().metadata() == metadata + + where: + key1 | value1 | key2 | value2 + null | null | null | null + "foo" | "bar" | "fizz" | "buzz" + }*/ + + /*@Unroll + def "Sync copy source AC"() { + setup: + cu.setAccessPolicy(PublicAccessType.CONTAINER, null).blockingGet() + BlobClient bu2 = cu.getBlockBlobClient(generateBlobName()) + match = setupBlobMatchCondition(bu, match) + def mac = new ModifiedAccessConditions().ifModifiedSince(modified).ifUnmodifiedSince(unmodified) + .ifMatch(match).ifNoneMatch(noneMatch) + + expect: + bu2.syncCopyFromURL(bu.toURL(), null, mac, null, null).blockingGet().statusCode() == 202 + + where: + modified | unmodified | match | noneMatch + null | null | null | null + oldDate | null | null | null + null | newDate | null | null + null | null | receivedEtag | null + null | null | null | garbageEtag + }*/ + + /*@Unroll + def "Sync copy source AC fail"() { + setup: + cu.setAccessPolicy(PublicAccessType.CONTAINER, null).blockingGet() + BlobClient bu2 = cu.getBlockBlobClient(generateBlobName()) + noneMatch = setupBlobMatchCondition(bu, noneMatch) + def mac = new ModifiedAccessConditions().ifModifiedSince(modified).ifUnmodifiedSince(unmodified) + .ifMatch(match).ifNoneMatch(noneMatch) + + when: + bu2.syncCopyFromURL(bu.toURL(), null, mac, null, null).blockingGet() + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch + newDate | null | null | null + null | oldDate | null | null + null | null | garbageEtag | null + null | null | null | receivedEtag + }*/ + + /*@Unroll + def "Sync copy dest AC"() { + setup: + cu.setAccessPolicy(PublicAccessType.CONTAINER, null) + BlobClient bu2 = cu.getBlockBlobClient(generateBlobName()) + bu2.upload(defaultFlux, defaultDataSize) + match = setupBlobMatchCondition(bu2, match) + leaseID = setupBlobLeaseCondition(bu2, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().ifModifiedSince(modified).ifUnmodifiedSince(unmodified) + .ifMatch(match).ifNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().leaseId(leaseID)) + + expect: + bu2.syncCopyFromURL(bu.toURL(), null, null, bac, null).blockingGet().statusCode() == 202 + + where: + modified | unmodified | match | noneMatch | leaseID + null | null | null | null | null + oldDate | null | null | null | null + null | newDate | null | null | null + null | null | receivedEtag | null | null + null | null | null | garbageEtag | null + null | null | null | null | receivedLeaseID + }*/ + + /*@Unroll + def "Sync copy dest AC fail"() { + setup: + cu.setAccessPolicy(PublicAccessType.CONTAINER, null) + BlobClient bu2 = cu.getBlockBlobClient(generateBlobName()) + bu2.upload(defaultFlux, defaultDataSize) + noneMatch = setupBlobMatchCondition(bu2, noneMatch) + setupBlobLeaseCondition(bu2, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().ifModifiedSince(modified).ifUnmodifiedSince(unmodified) + .ifMatch(match).ifNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().leaseId(leaseID)) + + when: + bu2.syncCopyFromURL(bu.toURL(), null, null, bac, null).blockingGet() + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch | leaseID + newDate | null | null | null | null + null | oldDate | null | null | null + null | null | garbageEtag | null | null + null | null | null | receivedEtag | null + null | null | null | null | garbageLeaseID + }*/ + + /*def "Sync copy error"() { + setup: + def bu2 = cu.getBlockBlobClient(generateBlobName()) + + when: + bu2.syncCopyFromURL(bu.toURL(), null, null, null, null).blockingGet() + + then: + thrown(StorageException) + }*/ + + /*def "Sync copy context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(202, BlobCopyFromURLHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.syncCopyFromURL(new URL("http://www.example.com")) + + then: + notThrown(RuntimeException) + }*/ + + /*def "Delete"() { + when: + BlobsDeleteResponse response = bu.delete() + BlobDeleteHeaders headers = response.deserializedHeaders() + + then: + response.statusCode() == 202 + headers.requestId() != null + headers.version() != null + headers.dateProperty() != null + }*/ + + /*def "Delete min"() { + expect: + bu.delete().blockingGet().statusCode() == 202 + }*/ + + @Unroll + def "Delete options"() { + setup: + bu.createSnapshot() + // Create an extra blob so the list isn't empty (null) when we delete base blob, too + BlockBlobClient bu2 = cu.getBlockBlobClient(generateBlobName()) + bu2.upload(defaultInputStream.get(), defaultDataSize) + + when: + bu.delete(option, null, null) + + then: + Iterator blobs = cu.listBlobsFlat(null).iterator() + + int blobCount = 0 + for ( ; blobs.hasNext(); blobCount++ ) + blobs.next() + + blobCount == blobsRemaining + + where: + option | blobsRemaining + DeleteSnapshotsOptionType.INCLUDE | 1 + DeleteSnapshotsOptionType.ONLY | 2 + } + + @Unroll + def "Delete AC"() { + setup: + match = setupBlobMatchCondition(bu, match) + leaseID = setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions() + .withLeaseAccessConditions(new LeaseAccessConditions().leaseId(leaseID)) + .withModifiedAccessConditions(new ModifiedAccessConditions() + .ifModifiedSince(modified) + .ifUnmodifiedSince(unmodified) + .ifMatch(match) + .ifNoneMatch(noneMatch)) + + expect: + bu.delete(DeleteSnapshotsOptionType.INCLUDE, bac, null) //.blockingGet().statusCode() == 202 + + where: + modified | unmodified | match | noneMatch | leaseID + null | null | null | null | null + oldDate | null | null | null | null + null | newDate | null | null | null + null | null | receivedEtag | null | null + null | null | null | garbageEtag | null + null | null | null | null | receivedLeaseID + } + + @Unroll + def "Delete AC fail"() { + setup: + noneMatch = setupBlobMatchCondition(bu, noneMatch) + setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions() + .withLeaseAccessConditions(new LeaseAccessConditions().leaseId(leaseID)) + .withModifiedAccessConditions(new ModifiedAccessConditions() + .ifModifiedSince(modified) + .ifUnmodifiedSince(unmodified) + .ifMatch(match) + .ifNoneMatch(noneMatch)) + + when: + bu.delete(DeleteSnapshotsOptionType.INCLUDE, bac, null) + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch | leaseID + newDate | null | null | null | null + null | oldDate | null | null | null + null | null | garbageEtag | null | null + null | null | null | receivedEtag | null + null | null | null | null | garbageLeaseID + } + + def "Blob delete error"() { + setup: + bu = cu.getBlockBlobClient(generateBlobName()) + + when: + bu.delete(null, null, null) + + then: + thrown(StorageException) + } + + /*def "Delete context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(202, BlobDeleteHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.delete() + + then: + notThrown(RuntimeException) + }*/ + + /*@Unroll + def "Set tier block blob"() { + setup: + ContainerClient cu = blobStorageServiceURL.getContainerClient(generateContainerName()) + BlockBlobClient bu = cu.getBlockBlobClient(generateBlobName()) + cu.create() + bu.upload(defaultInputStream.get(), defaultData.remaining()) + + when: + BlobsSetTierResponse initialResponse = bu.setTier(tier) + BlobSetTierHeaders headers = initialResponse.deserializedHeaders() + + then: + initialResponse.statusCode() == 200 || initialResponse.statusCode() == 202 + headers.version() != null + headers.requestId() != null + bu.getProperties().accessTier() == tier.toString() + cu.listBlobsFlat(null).iterator().next().properties().accessTier() == tier + + where: + tier | _ + AccessTier.HOT | _ + AccessTier.COOL | _ + AccessTier.ARCHIVE | _ + }*/ + + /*@Unroll + def "Set tier page blob"() { + setup: + ContainerClient cu = premiumServiceURL.getContainerClient(generateContainerName()) + PageBlobClient bu = cu.getPageBlobClient(generateBlobName()) + cu.create() + bu.create(512) + + when: + bu.setTier(tier, null, null) + + then: + bu.getProperties().accessTier() == tier.toString() + cu.listBlobsFlat(null).iterator().next().properties().accessTier() == tier + cu.delete() + + where: + tier | _ + AccessTier.P4 | _ + AccessTier.P6 | _ + AccessTier.P10 | _ + AccessTier.P20 | _ + AccessTier.P30 | _ + AccessTier.P40 | _ + AccessTier.P50 | _ + }*/ + + /*def "Set tier min"() { + setup: + ContainerClient cu = blobStorageServiceURL.getContainerClient(generateContainerName()) + BlockBlobClient bu = cu.getBlockBlobClient(generateBlobName()) + cu.create() + bu.upload(defaultInputStream.get(), defaultData.remaining()) + + when: + def statusCode = bu.setTier(AccessTier.HOT) //.blockingGet().statusCode() + + then: + statusCode == 200 || statusCode == 202 + }*/ + + /*def "Set tier inferred"() { + setup: + ContainerClient cu = blobStorageServiceURL.getContainerClient(generateBlobName()) + BlockBlobClient bu = cu.getBlockBlobClient(generateBlobName()) + cu.create() + bu.upload(defaultInputStream.get(), defaultDataSize) + + when: + boolean inferred1 = bu.getProperties(null, null).accessTierInferred() + Boolean inferredList1 = cu.listBlobsFlat(null).iterator().next().properties().accessTierInferred() + + bu.setTier(AccessTier.HOT, null, null) + + BlobGetPropertiesHeaders headers = bu.getProperties(null, null) + Boolean inferred2 = headers.accessTierInferred() + Boolean inferredList2 = cu.listBlobsFlat(null).iterator().next().properties().accessTierInferred() + + then: + inferred1 + inferredList1 + inferred2 == null + inferredList2 == null + }*/ + + /*@Unroll + def "Set tier archive status"() { + setup: + ContainerClient cu = blobStorageServiceURL.getContainerClient(generateBlobName()) + BlockBlobClient bu = cu.getBlockBlobClient(generateBlobName()) + cu.create(null, null, null, defaultContext) + bu.upload(defaultInputStream.get(), defaultDataSize) + + when: + bu.setTier(sourceTier) + bu.setTier(destTier) + + then: + bu.getProperties().archiveStatus() == status.toString() + cu.listBlobsFlat(null).iterator().next().properties().archiveStatus() + + where: + sourceTier | destTier | status + AccessTier.ARCHIVE | AccessTier.COOL | ArchiveStatus.REHYDRATE_PENDING_TO_COOL + AccessTier.ARCHIVE | AccessTier.HOT | ArchiveStatus.REHYDRATE_PENDING_TO_HOT + }*/ + + /*def "Set tier error"() { + setup: + ContainerClient cu = blobStorageServiceURL.getContainerClient(generateBlobName()) + BlockBlobClient bu = cu.getBlockBlobClient(generateBlobName()) + cu.create() + bu.upload(defaultInputStream.get(), defaultDataSize) + + when: + bu.setTier(AccessTier.fromString("garbage"), null, null) + + then: + def e = thrown(StorageException) + e.errorCode() == StorageErrorCode.INVALID_HEADER_VALUE + }*/ + + def "Set tier illegal argument"() { + when: + bu.setTier(null, null, null) + + then: + thrown(IllegalArgumentException) + } + + /*def "Set tier lease"() { + setup: + ContainerClient cu = blobStorageServiceURL.getContainerClient(generateBlobName()) + BlockBlobClient bu = cu.getBlockBlobClient(generateBlobName()) + cu.create() + bu.upload(defaultInputStream.get(), defaultDataSize) + def leaseID = setupBlobLeaseCondition(bu, receivedLeaseID) + + when: + bu.setTier(AccessTier.HOT, new LeaseAccessConditions().leaseId(leaseID), null) + + then: + notThrown(StorageException) + }*/ + + /*def "Set tier lease fail"() { + setup: + ContainerClient cu = blobStorageServiceURL.getContainerClient(generateBlobName()) + BlockBlobClient bu = cu.getBlockBlobClient(generateBlobName()) + cu.create() + bu.upload(defaultInputStream.get(), defaultDataSize) + + when: + bu.setTier(AccessTier.HOT, new LeaseAccessConditions().leaseId("garbage"), null) + + then: + thrown(StorageException) + }*/ + + /*def "Set tier context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(202, BlobSetTierHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.setTier(AccessTier.HOT) + + then: + notThrown(RuntimeException) + }*/ + + /*def "Undelete"() { + setup: + enableSoftDelete() + bu.delete(null, null, null) + + when: + def response = bu.undelete(null) + bu.getProperties(null, null) + + then: + notThrown(StorageException) + response.headers().requestId() != null + response.headers().version() != null + response.headers().date() != null + + disableSoftDelete() == null + }*/ + + def "Undelete min"() { + setup: + enableSoftDelete() + bu.delete() + + expect: + bu.undelete() //.blockingGet().statusCode() == 200 + } + + def "Undelete error"() { + bu = cu.getBlockBlobClient(generateBlobName()) + + when: + bu.undelete(null) + + then: + thrown(StorageException) + } + + /*def "Undelete context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(200, BlobUndeleteHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.undelete(null) + + then: + notThrown(RuntimeException) + }*/ + + def "Get account info"() { + when: + def response = primaryServiceURL.getAccountInfo() + + then: + response.dateProperty() != null + response.version() != null + response.requestId() != null + response.accountKind() != null + response.skuName() != null + } + + def "Get account info min"() { + expect: + bu.getAccountInfo() //.statusCode() == 200 + } + + /*def "Get account info error"() { + when: + StorageClient serviceURL = StorageClient.builder() new ServiceURL(primaryServiceURL.toURL(), + StorageURL.createPipeline(new AnonymousCredentials(), new PipelineOptions())) + serviceURL.getContainerClient(generateContainerName()).getBlobClient(generateBlobName()) + .getAccountInfo(null) + + then: + thrown(StorageException) + }*/ + + /*def "Get account info context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(200, BlobGetAccountInfoHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.getAccountInfo(null) + + then: + notThrown(RuntimeException) + }*/ +} diff --git a/storage/client/src/test/java/com/azure/storage/blob/BlobPocTests.java b/storage/client/src/test/java/com/azure/storage/blob/BlobPocTests.java index 00e888fbff336..406299a461834 100644 --- a/storage/client/src/test/java/com/azure/storage/blob/BlobPocTests.java +++ b/storage/client/src/test/java/com/azure/storage/blob/BlobPocTests.java @@ -1,61 +1,88 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -// Code generated by Microsoft (R) AutoRest Code Generator. - -package com.azure.storage.blob; - -import com.azure.core.http.HttpClient; -import com.azure.core.http.HttpPipeline; -import com.azure.core.http.ProxyOptions; -import com.azure.core.http.ProxyOptions.Type; -import com.azure.storage.blob.implementation.AzureBlobStorageImpl; -import com.azure.storage.blob.models.BlobsGetPropertiesResponse; -import com.azure.storage.blob.models.BlockLookupList; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.Unpooled; -import org.apache.commons.codec.binary.Base64; -import reactor.core.publisher.Flux; - -import java.net.InetSocketAddress; -import java.nio.charset.StandardCharsets; -import java.util.Arrays; -import java.util.Collections; -import java.util.Random; - -public class BlobPocTests { - - //@Test - public void testCreateBlob() { - AzureBlobStorageImpl client = new AzureBlobStorageImpl(HttpPipeline.builder().httpClient(HttpClient.createDefault().proxy(() -> new ProxyOptions(Type.HTTP, new InetSocketAddress("localhost", 8888))))/*, - new HttpPipelinePolicy() { - @Override - public Mono process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { - String url = context.httpRequest().url().toString(); - String sasToken = System.getenv("AZURE_STORAGE_SAS_TOKEN"); - if (url.contains("?")) { - sasToken = sasToken.replaceFirst("\\?", "&"); - } - url += sasToken; - try { - context.withHttpRequest(context.httpRequest().withUrl(new URL(url))); - } catch (MalformedURLException e) { - return Mono.error(e); - } - return next.process(); - } - }*/.build()).withUrl("https://" + System.getenv("AZURE_STORAGE_ACCOUNT_NAME") + ".blob.core.windows.net/mycontainer/random223" + System.getenv("AZURE_STORAGE_SAS_TOKEN")); - - Random random = new Random(); - - byte[] randomBytes = new byte[4096]; - random.nextBytes(randomBytes); - ByteBuf bb = Unpooled.wrappedBuffer(randomBytes); - String base64 = Base64.encodeBase64String("0001".getBytes(StandardCharsets.UTF_8)); - client.blockBlobs().stageBlockWithRestResponseAsync(null, null, base64, 4096, Flux.just(bb), null).block(); - client.blockBlobs().commitBlockListWithRestResponseAsync(null, null, new BlockLookupList().latest(Arrays.asList(base64)), null).block(); - - client.blobs().setMetadataWithRestResponseAsync(null, null, null, Collections.singletonMap("foo", "bar"), null, null, null, null, null, null, null).block(); - BlobsGetPropertiesResponse res = client.blobs().getPropertiesWithRestResponseAsync(null, null, null).block(); - System.out.println(res.deserializedHeaders().metadata().size()); - } -} +//// Copyright (c) Microsoft Corporation. All rights reserved. +//// Licensed under the MIT License. +//// Code generated by Microsoft (R) AutoRest Code Generator. +// +//package com.azure.storage.blob; +// +//import com.azure.core.http.HttpClient; +//import com.azure.core.http.HttpPipeline; +//import com.azure.core.http.ProxyOptions; +//import com.azure.core.http.ProxyOptions.Type; +//import com.azure.storage.blob.implementation.AzureBlobStorageBuilder; +//import com.azure.storage.blob.implementation.AzureBlobStorageImpl; +//import com.azure.storage.blob.models.BlobGetPropertiesHeaders; +//import com.azure.storage.blob.models.BlobsGetPropertiesResponse; +//import com.azure.storage.blob.models.BlockLookupList; +//import io.netty.buffer.ByteBuf; +//import io.netty.buffer.Unpooled; +//import org.apache.commons.codec.binary.Base64; +//import org.junit.Test; +//import reactor.core.publisher.Flux; +// +//import java.net.InetSocketAddress; +//import java.nio.charset.StandardCharsets; +//import java.util.Arrays; +//import java.util.Collections; +//import java.util.Random; +// +//public class BlobPocTests { +// +// @Test +// public void testCreateBlobWithAutoRestClient() { +// AzureBlobStorageImpl client = new AzureBlobStorageBuilder().pipeline(HttpPipeline.builder().httpClient(HttpClient.createDefault().proxy(() -> new ProxyOptions(Type.HTTP, new InetSocketAddress("localhost", 8888))))/*, +// new HttpPipelinePolicy() { +// @Override +// public Mono process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { +// String url = context.httpRequest().url().toString(); +// String sasToken = System.getenv("AZURE_STORAGE_SAS_TOKEN"); +// if (url.contains("?")) { +// sasToken = sasToken.replaceFirst("\\?", "&"); +// } +// url += sasToken; +// try { +// context.withHttpRequest(context.httpRequest().withUrl(new URL(url))); +// } catch (MalformedURLException e) { +// return Mono.error(e); +// } +// return next.process(); +// } +// }*/.build()).url("https://" + System.getenv("AZURE_STORAGE_ACCOUNT_NAME") + ".blob.core.windows.net/mycontainer/random223" + System.getenv("AZURE_STORAGE_SAS_TOKEN")).build(); +// +// Random random = new Random(); +// +// byte[] randomBytes = new byte[4096]; +// random.nextBytes(randomBytes); +// ByteBuf bb = Unpooled.wrappedBuffer(randomBytes); +// String base64 = Base64.encodeBase64String("0001".getBytes(StandardCharsets.UTF_8)); +// client.blockBlobs().stageBlockWithRestResponseAsync(null, null, base64, 4096, Flux.just(bb), null).block(); +// client.blockBlobs().commitBlockListWithRestResponseAsync(null, null, new BlockLookupList().latest(Arrays.asList(base64)), null).block(); +// +// client.blobs().setMetadataWithRestResponseAsync(null, null, null, Collections.singletonMap("foo", "bar"), null, null, null, null, null, null, null).block(); +// BlobsGetPropertiesResponse res = client.blobs().getPropertiesWithRestResponseAsync(null, null, null).block(); +// System.out.println(res.deserializedHeaders().metadata().size()); +// } +// +// @Test +// public void testCreateBlob() { +// BlockBlobClient blockBlobClient = BlockBlobClient.blockBlobClientBuilder() +// .connectionString(System.getenv("AZURE_STORAGE_CONNECTION_STRING")) +// .buildClient(); +// +// Random random = new Random(); +// +// byte[] randomBytes = new byte[4096]; +// random.nextBytes(randomBytes); +// ByteBuf bb = Unpooled.wrappedBuffer(randomBytes); +// String base64 = Base64.encodeBase64String("0001".getBytes(StandardCharsets.UTF_8)); +// blockBlobClient.stageBlock(base64, Flux.just(bb), 4096); +// blockBlobClient.commitBlockList(Arrays.asList(base64)); +// +// BlobClient blobClient = BlobClient.blobClientBuilder() +// .connectionString(System.getenv("AZURE_STORAGE_CONNECTION_STRING")) +// .buildClient(); +// +// blobClient.setMetadata(new Metadata(Collections.singletonMap("foo", "bar"))); +// BlobGetPropertiesHeaders res = blobClient.getProperties(); +// System.out.println(res.metadata().size()); +// } +//} diff --git a/storage/client/src/test/java/com/azure/storage/blob/BlockBlobAPITest.groovy b/storage/client/src/test/java/com/azure/storage/blob/BlockBlobAPITest.groovy new file mode 100644 index 0000000000000..ab5f59fbd095f --- /dev/null +++ b/storage/client/src/test/java/com/azure/storage/blob/BlockBlobAPITest.groovy @@ -0,0 +1,796 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob + +import com.azure.storage.blob.models.* +import spock.lang.Unroll + +import java.nio.charset.StandardCharsets +import java.security.MessageDigest + +class BlockBlobAPITest extends APISpec { + BlockBlobClient bu + + def setup() { + bu = cu.getBlockBlobClient(generateBlobName()) + bu.upload(defaultInputStream.get(), defaultDataSize) + } + + def getBlockID() { + return Base64.encoder.encodeToString(UUID.randomUUID().toString().getBytes(StandardCharsets.UTF_8)) + } + + def "Stage block"() { + setup: + bu.stageBlock(getBlockID(), defaultInputStream.get(), defaultDataSize) +// BlockBlobStageBlockHeaders headers = response.headers() +// +// expect: +// notThrown(StorageException) +// response.statusCode() == 201 +// headers.contentMD5() != null +// headers.requestId() != null +// headers.version() != null +// headers.date() != null +// headers.isServerEncrypted() + } + +// def "Stage block min"() { +// expect: +// bu.stageBlock(getBlockID(), defaultInputStream.get(), defaultDataSize).statusCode() == 201 +// } + + @Unroll + def "Stage block illegal arguments"() { + when: + bu.stageBlock(blockID, data == null ? null : data.get(), dataSize) + + then: + def e = thrown(Exception) + exceptionType.isInstance(e) + + where: + blockID | data | dataSize | exceptionType + null | defaultInputStream | defaultDataSize | StorageException + getBlockID() | null | defaultDataSize | NullPointerException + // TODO: No exceptions are thrown for the following two, expected? +// getBlockID() | defaultInputStream | defaultDataSize + 1 | IllegalArgumentException +// getBlockID() | defaultInputStream | defaultDataSize - 1 | IllegalArgumentException + } + + def "Stage block empty body"() { + when: + bu.stageBlock(getBlockID(), new ByteArrayInputStream(new byte[0]), 0) + + then: + thrown(StorageException) + } + + def "Stage block null body"() { + when: + bu.stageBlock(getBlockID(), null, 0) + + then: + thrown(NullPointerException) // Thrown by Flux.just(). + } + +// def "Stage block lease"() { +// setup: +// String leaseID = setupBlobLeaseCondition(bu, receivedLeaseID) +// +// expect: +// bu.stageBlock(getBlockID(), defaultInputStream.get(), defaultDataSize, new LeaseAccessConditions().withLeaseId(leaseID), +// null).statusCode() == 201 +// } +// +// def "Stage block lease fail"() { +// setup: +// setupBlobLeaseCondition(bu, receivedLeaseID) +// +// when: +// bu.stageBlock(getBlockID(), defaultInputStream.get(), defaultDataSize, new LeaseAccessConditions() +// .withLeaseId(garbageLeaseID), null) +// +// then: +// def e = thrown(StorageException) +// e.errorCode() == StorageErrorCode.LEASE_ID_MISMATCH_WITH_BLOB_OPERATION +// } +// + def "Stage block error"() { + setup: + bu = cu.getBlockBlobClient(generateBlobName()) + + when: + bu.stageBlock("id", defaultInputStream.get(), defaultDataSize) + + then: + thrown(StorageException) + } + + // TODO: need to be able to get current blob client's endpoint +// def "Stage block context"() { +// setup: +// bu = BlockBlobClient.blockBlobClientBuilder().endpoint("http://dummy").addPolicy(getContextStubPolicy(201, BlockBlobStageBlockHeaders)).buildClient() +// +// when: +// // No service call is made. Just satisfy the parameters. +// bu.stageBlock("id", defaultInputStream.get(), defaultDataSize, null, null, defaultContext) +// +// then: +// notThrown(RuntimeException) +// } + + //TODO: Add back the following 12 tests once BlockBlobClient.toURL() is implemented +// def "Stage block from url"() { +// setup: +// cu.setAccessPolicy(PublicAccessType.CONTAINER, null) +// def bu2 = cu.getBlockBlobClient(generateBlobName()) +// def blockID = getBlockID() +// +// when: +// def response = bu2.stageBlockFromURL(blockID, bu.toURL(), null) +// def listResponse = bu2.listBlocks(BlockListType.ALL, null, null) +// bu2.commitBlockList(Arrays.asList(blockID), null, null, null, null) +// +// then: +// response.headers().requestId() != null +// response.headers().version() != null +// response.headers().requestId() != null +// response.headers().contentMD5() != null +// response.headers().isServerEncrypted() != null +// +// listResponse.body().uncommittedBlocks().get(0).name() == blockID +// listResponse.body().uncommittedBlocks().size() == 1 +// +// FluxUtil.collectBytesInBuffer(bu2.download(null, null, false, null) +// .body(null)) == defaultData +// } + +// def "Stage block from url min"() { +// setup: +// cu.setAccessPolicy(PublicAccessType.CONTAINER, null) +// def bu2 = cu.getBlockBlobClient(generateBlobName()) +// def blockID = getBlockID() +// +// expect: +// bu2.stageBlockFromURL(blockID, bu.toURL(), null).statusCode() == 201 +// } +// +// @Unroll +// def "Stage block from URL IA"() { +// when: +// bu.stageBlockFromURL(blockID, sourceURL, null, null, null, null, null) +// +// +// then: +// thrown(IllegalArgumentException) +// +// where: +// blockID | sourceURL +// null | new URL("http://www.example.com") +// getBlockID() | null +// } +// +// def "Stage block from URL range"() { +// setup: +// cu.setAccessPolicy(PublicAccessType.CONTAINER, null, null, null) +// def destURL = cu.createBlockBlobURL(generateBlobName()) +// +// when: +// destURL.stageBlockFromURL(getBlockID(), bu.toURL(), new BlobRange().withOffset(2).withCount(3), null, null, +// null, null) +// +// then: +// destURL.listBlocks(BlockListType.ALL, null, null).body().uncommittedBlocks().get(0) +// .size() == 3 +// } +// +// def "Stage block from URL MD5"() { +// setup: +// cu.setAccessPolicy(PublicAccessType.CONTAINER, null, null, null) +// def destURL = cu.createBlockBlobURL(generateBlobName()) +// +// when: +// destURL.stageBlockFromURL(getBlockID(), bu.toURL(), null, +// MessageDigest.getInstance("MD5").digest(defaultData.array()), null, null, null) +// +// then: +// notThrown(StorageException) +// } +// +// def "Stage block from URL MD5 fail"() { +// setup: +// cu.setAccessPolicy(PublicAccessType.CONTAINER, null, null, null) +// def destURL = cu.createBlockBlobURL(generateBlobName()) +// +// when: +// destURL.stageBlockFromURL(getBlockID(), bu.toURL(), null, "garbage".getBytes(), +// null, null, null) +// +// then: +// thrown(StorageException) +// } +// +// def "Stage block from URL lease"() { +// setup: +// cu.setAccessPolicy(PublicAccessType.CONTAINER, null, null, null) +// def lease = new LeaseAccessConditions().withLeaseId(setupBlobLeaseCondition(bu, receivedLeaseID)) +// +// when: +// bu.stageBlockFromURL(getBlockID(), bu.toURL(), null, null, lease, null, null) +// +// then: +// notThrown(StorageException) +// } +// +// def "Stage block from URL lease fail"() { +// setup: +// cu.setAccessPolicy(PublicAccessType.CONTAINER, null, null, null) +// def lease = new LeaseAccessConditions().withLeaseId("garbage") +// +// when: +// bu.stageBlockFromURL(getBlockID(), bu.toURL(), null, null, lease, null, null) +// +// then: +// thrown(StorageException) +// } +// +// def "Stage block from URL error"() { +// setup: +// cu = primaryServiceURL.createContainerURL(generateContainerName()) +// bu = cu.createBlockBlobURL(generateBlobName()) +// +// when: +// bu.stageBlockFromURL(getBlockID(), bu.toURL(), null, null, null, null, null) +// +// +// then: +// thrown(StorageException) +// } +// +// def "Stage block from URL context"() { +// setup: +// def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(201, BlockBlobStageBlockFromURLHeaders))) +// +// bu = bu.withPipeline(pipeline) +// +// when: +// // No service call is made. Just satisfy the parameters. +// bu.stageBlockFromURL("id", bu.toURL(), null, null, null, null, defaultContext) +// +// then: +// notThrown(RuntimeException) +// } +// +// @Unroll +// def "Stage block from URL source AC"() { +// setup: +// cu.setAccessPolicy(PublicAccessType.CONTAINER, null, null, null) +// def blockID = getBlockID() +// +// def sourceURL = cu.createBlockBlobURL(generateBlobName()) +// sourceURL.upload(defaultInputStream.get(), defaultDataSize) +// +// sourceIfMatch = setupBlobMatchCondition(sourceURL, sourceIfMatch) +// def smac = new SourceModifiedAccessConditions() +// .withSourceIfModifiedSince(sourceIfModifiedSince) +// .withSourceIfUnmodifiedSince(sourceIfUnmodifiedSince) +// .withSourceIfMatch(sourceIfMatch) +// .withSourceIfNoneMatch(sourceIfNoneMatch) +// +// expect: +// bu.stageBlockFromURL(blockID, sourceURL.toURL(), null, null, null, smac, null).statusCode() == 201 +// +// where: +// sourceIfModifiedSince | sourceIfUnmodifiedSince | sourceIfMatch | sourceIfNoneMatch +// null | null | null | null +// oldDate | null | null | null +// null | newDate | null | null +// null | null | receivedEtag | null +// null | null | null | garbageEtag +// } +// +// @Unroll +// def "Stage block from URL source AC fail"() { +// setup: +// cu.setAccessPolicy(PublicAccessType.CONTAINER, null, null, null) +// def blockID = getBlockID() +// +// def sourceURL = cu.createBlockBlobURL(generateBlobName()) +// sourceURL.upload(defaultInputStream.get(), defaultDataSize) +// +// sourceIfNoneMatch = setupBlobMatchCondition(sourceURL, sourceIfNoneMatch) +// def smac = new SourceModifiedAccessConditions() +// .withSourceIfModifiedSince(sourceIfModifiedSince) +// .withSourceIfUnmodifiedSince(sourceIfUnmodifiedSince) +// .withSourceIfMatch(sourceIfMatch) +// .withSourceIfNoneMatch(sourceIfNoneMatch) +// +// when: +// bu.stageBlockFromURL(blockID, sourceURL.toURL(), null, null, null, smac, null).statusCode() == 201 +// +// then: +// thrown(StorageException) +// +// where: +// sourceIfModifiedSince | sourceIfUnmodifiedSince | sourceIfMatch | sourceIfNoneMatch +// newDate | null | null | null +// null | oldDate | null | null +// null | null | garbageEtag | null +// null | null | null | receivedEtag +// } + + def "Commit block list"() { + setup: + String blockID = getBlockID() + bu.stageBlock(blockID, defaultInputStream.get(), defaultDataSize) + ArrayList ids = new ArrayList<>() + ids.add(blockID) + + when: + BlockBlobCommitBlockListHeaders headers = + bu.commitBlockList(ids) + + then: +// response.statusCode() == 201 + validateBasicHeaders(headers) + headers.contentMD5() + headers.isServerEncrypted() + } + + def "Commit block list min"() { + setup: + String blockID = getBlockID() + bu.stageBlock(blockID, defaultInputStream.get(), defaultDataSize) + ArrayList ids = new ArrayList<>() + ids.add(blockID) + + expect: + bu.commitBlockList(ids) != null + } + +// def "Commit block list null"() { +// expect: +// bu.commitBlockList(null,) +// .statusCode() == 201 +// } +// +// @Unroll +// def "Commit block list headers"() { +// setup: +// String blockID = getBlockID() +// bu.stageBlock(blockID, defaultInputStream.get(), defaultDataSize, +// null, null) +// ArrayList ids = new ArrayList<>() +// ids.add(blockID) +// BlobHTTPHeaders headers = new BlobHTTPHeaders().withBlobCacheControl(cacheControl) +// .withBlobContentDisposition(contentDisposition) +// .withBlobContentEncoding(contentEncoding) +// .withBlobContentLanguage(contentLanguage) +// .withBlobContentMD5(contentMD5) +// .withBlobContentType(contentType) +// +// when: +// bu.commitBlockList(ids, headers, null, null, null) +// BlobGetPropertiesResponse response = bu.getProperties(null, null) +// +// then: +// response.statusCode() == 200 +// validateBlobHeaders(response.headers(), cacheControl, contentDisposition, contentEncoding, contentLanguage, +// contentMD5, contentType == null ? "application/octet-stream" : contentType) +// // HTTP default content type is application/octet-stream +// +// where: +// cacheControl | contentDisposition | contentEncoding | contentLanguage | contentMD5 | contentType +// null | null | null | null | null | null +// "control" | "disposition" | "encoding" | "language" | MessageDigest.getInstance("MD5").digest(defaultData.array()) | "type" +// } +// +// @Unroll +// def "Commit block list metadata"() { +// setup: +// Metadata metadata = new Metadata() +// if (key1 != null) { +// metadata.put(key1, value1) +// } +// if (key2 != null) { +// metadata.put(key2, value2) +// } +// +// when: +// bu.commitBlockList(null, null, metadata, null, null) +// BlobGetPropertiesResponse response = bu.getProperties(null, null) +// +// then: +// response.statusCode() == 200 +// response.headers().metadata() == metadata +// +// where: +// key1 | value1 | key2 | value2 +// null | null | null | null +// "foo" | "bar" | "fizz" | "buzz" +// } +// +// @Unroll +// def "Commit block list AC"() { +// setup: +// match = setupBlobMatchCondition(bu, match) +// leaseID = setupBlobLeaseCondition(bu, leaseID) +// BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( +// new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) +// .withIfMatch(match).withIfNoneMatch(noneMatch)) +// .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) +// +// expect: +// bu.commitBlockList(null, null, null, bac, null).statusCode() == 201 +// +// where: +// modified | unmodified | match | noneMatch | leaseID +// null | null | null | null | null +// oldDate | null | null | null | null +// null | newDate | null | null | null +// null | null | receivedEtag | null | null +// null | null | null | garbageEtag | null +// null | null | null | null | receivedLeaseID +// } +// +// @Unroll +// def "Commit block list AC fail"() { +// setup: +// noneMatch = setupBlobMatchCondition(bu, noneMatch) +// setupBlobLeaseCondition(bu, leaseID) +// BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( +// new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) +// .withIfMatch(match).withIfNoneMatch(noneMatch)) +// .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) +// +// when: +// bu.commitBlockList(null, null, null, bac, null) +// +// then: +// def e = thrown(StorageException) +// e.errorCode() == StorageErrorCode.CONDITION_NOT_MET || +// e.errorCode() == StorageErrorCode.LEASE_ID_MISMATCH_WITH_BLOB_OPERATION +// +// where: +// modified | unmodified | match | noneMatch | leaseID +// newDate | null | null | null | null +// null | oldDate | null | null | null +// null | null | garbageEtag | null | null +// null | null | null | receivedEtag | null +// null | null | null | null | garbageLeaseID +// } +// +// def "Commit block list error"() { +// setup: +// bu = cu.createBlockBlobURL(generateBlobName()) +// +// when: +// bu.commitBlockList(new ArrayList(), null, null, new BlobAccessConditions().withLeaseAccessConditions( +// new LeaseAccessConditions().withLeaseId("garbage")), null) +// +// then: +// thrown(StorageException) +// } +// +// def "Commit block list info context"() { +// setup: +// def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(201, BlockBlobCommitBlockListHeaders))) +// +// bu = bu.withPipeline(pipeline) +// +// when: +// // No service call is made. Just satisfy the parameters. +// bu.commitBlockList(new ArrayList(), null, null, null, defaultContext) +// +// then: +// notThrown(RuntimeException) +// } + + def "Get block list"() { + setup: + List committedBlocks = Arrays.asList(getBlockID(), getBlockID()) + bu.stageBlock(committedBlocks.get(0), defaultInputStream.get(), defaultDataSize) + bu.stageBlock(committedBlocks.get(1), defaultInputStream.get(), defaultDataSize) + bu.commitBlockList(committedBlocks) + + List uncommittedBlocks = Arrays.asList(getBlockID(), getBlockID()) + bu.stageBlock(uncommittedBlocks.get(0), defaultInputStream.get(), defaultDataSize) + bu.stageBlock(uncommittedBlocks.get(1), defaultInputStream.get(), defaultDataSize) + uncommittedBlocks.sort(true) + + when: + Iterable response = bu.listBlocks(BlockListType.ALL) + + then: + for (BlockItem block : response) { + assert committedBlocks.contains(block.name()) || uncommittedBlocks.contains(block.name()) + assert block.size() == defaultDataSize + } +// for (int i = 0; i < committedBlocks.size(); i++) { +// assert response.body().committedBlocks().get(i).name() == committedBlocks.get(i) +// assert response.body().committedBlocks().get(i).size() == defaultDataSize +// assert response.body().uncommittedBlocks().get(i).name() == uncommittedBlocks.get(i) +// assert response.body().uncommittedBlocks().get(i).size() == defaultDataSize +// } +// validateBasicHeaders(response.headers()) +// response.headers().contentType() != null +// response.headers().blobContentLength() == defaultDataSize * 2L + } + +// def "Get block list min"() { +// expect: +// bu.listBlocks(BlockListType.ALL).statusCode() == 200 +// } + + @Unroll + def "Get block list type"() { + setup: + String blockID = getBlockID() + bu.stageBlock(blockID, defaultInputStream.get(), defaultDataSize) + ArrayList ids = new ArrayList<>() + ids.add(blockID) + bu.commitBlockList(ids) + blockID = new String(getBlockID()) + bu.stageBlock(blockID, defaultInputStream.get(), defaultDataSize) + + when: + Iterable response = bu.listBlocks(type) + + then: + int committed = 0 + int uncommitted = 0 + for (BlockItem item : response) { + if (item.isCommitted()) { + committed ++ + } else { + uncommitted ++ + } + } + committed == committedCount + uncommitted == uncommittedCount + + where: + type | committedCount | uncommittedCount + BlockListType.ALL | 1 | 1 + BlockListType.COMMITTED | 1 | 0 + BlockListType.UNCOMMITTED | 0 | 1 + } + +// def "Get block list type null"() { +// when: +// bu.listBlocks(null, null, null) +// +// then: +// thrown(IllegalArgumentException) +// } +// +// def "Get block list lease"() { +// setup: +// String leaseID = setupBlobLeaseCondition(bu, receivedLeaseID) +// +// expect: +// bu.listBlocks(BlockListType.ALL, new LeaseAccessConditions().withLeaseId(leaseID), null) +// .statusCode() == 200 +// } +// +// def "Get block list lease fail"() { +// setup: +// setupBlobLeaseCondition(bu, garbageLeaseID) +// +// when: +// bu.listBlocks(BlockListType.ALL, new LeaseAccessConditions().withLeaseId(garbageLeaseID), null) +// +// then: +// def e = thrown(StorageException) +// e.errorCode() == StorageErrorCode.LEASE_ID_MISMATCH_WITH_BLOB_OPERATION +// } +// +// def "Get block list error"() { +// setup: +// bu = cu.createBlockBlobURL(generateBlobName()) +// +// when: +// bu.listBlocks(BlockListType.ALL, null, null) +// +// then: +// thrown(StorageException) +// } +// +// def "Get block list context"() { +// setup: +// def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(200, BlockBlobGetBlockListHeaders))) +// +// bu = bu.withPipeline(pipeline) +// +// when: +// // No service call is made. Just satisfy the parameters. +// bu.listBlocks(BlockListType.ALL, null, defaultContext) +// +// then: +// notThrown(RuntimeException) +// } + + def "Upload"() { + when: + BlockBlobUploadHeaders headers = bu.upload(defaultInputStream.get(), defaultDataSize) + + then: +// response.statusCode() == 201 + def outStream = new ByteArrayOutputStream() + bu.download(outStream) + outStream.toByteArray() == "default".getBytes(StandardCharsets.UTF_8) + validateBasicHeaders(headers) + headers.contentMD5() != null + headers.isServerEncrypted() + } + +// def "Upload min"() { +// expect: +// bu.upload(defaultInputStream.get(), defaultDataSize).statusCode() == 201 +// } + +// @Unroll +// def "Upload illegal argument"() { +// when: +// bu.upload(data, dataSize, null, null, null, null) +// +// then: +// def e = thrown(Exception) +// exceptionType.isInstance(e) +// +// where: +// data | dataSize | exceptionType +// null | defaultDataSize | IllegalArgumentException +// defaultInputStream.get() | defaultDataSize + 1 | UnexpectedLengthException +// defaultInputStream.get() | defaultDataSize - 1 | UnexpectedLengthException +// } +// +// def "Upload empty body"() { +// expect: +// bu.upload(Flux.just(ByteBuffer.wrap(new byte[0])), 0, null, null, +// null, null).statusCode() == 201 +// } +// +// def "Upload null body"() { +// when: +// bu.upload(Flux.just(null), 0, null, null, null, null) +// +// then: +// thrown(NullPointerException) // Thrown by Flux.just(). +// } + + @Unroll + def "Upload headers"() { + setup: + BlobHTTPHeaders headers = new BlobHTTPHeaders().blobCacheControl(cacheControl) + .blobContentDisposition(contentDisposition) + .blobContentEncoding(contentEncoding) + .blobContentLanguage(contentLanguage) + .blobContentMD5(contentMD5) + .blobContentType(contentType) + + when: + bu.upload(defaultInputStream.get(), defaultDataSize, + headers, null, null, null, null) + BlobGetPropertiesHeaders responseHeaders = bu.getProperties(null, null) + + then: + validateBlobHeaders(responseHeaders, cacheControl, contentDisposition, contentEncoding, contentLanguage, + MessageDigest.getInstance("MD5").digest(defaultData.array()), + contentType == null ? "application/octet-stream" : contentType) + // For uploading a block blob, the service will auto calculate an MD5 hash if not present + // HTTP default content type is application/octet-stream + + where: + cacheControl | contentDisposition | contentEncoding | contentLanguage | contentMD5 | contentType + // TODO: following line throws NPE +// null | null | null | null | null | null + "control" | "disposition" | "encoding" | "language" | MessageDigest.getInstance("MD5").digest(defaultData.array()) | "type" + } + + @Unroll + def "Upload metadata"() { + setup: + Metadata metadata = new Metadata() + if (key1 != null) { + metadata.put(key1, value1) + } + if (key2 != null) { + metadata.put(key2, value2) + } + + when: + bu.upload(defaultInputStream.get(), defaultDataSize, + null, metadata, null, null, null) + BlobGetPropertiesHeaders responseHeaders = bu.getProperties(null, null) + + then: +// response.statusCode() == 200 + responseHeaders.metadata() == metadata + + where: + key1 | value1 | key2 | value2 + null | null | null | null + "foo" | "bar" | "fizz" | "buzz" + } + +// @Unroll +// def "Upload AC"() { +// setup: +// match = setupBlobMatchCondition(bu, match) +// leaseID = setupBlobLeaseCondition(bu, leaseID) +// BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( +// new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) +// .withIfMatch(match).withIfNoneMatch(noneMatch)) +// .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) +// +// expect: +// bu.upload(defaultInputStream.get(), defaultDataSize, +// null, null, bac, null).statusCode() == 201 +// +// where: +// modified | unmodified | match | noneMatch | leaseID +// null | null | null | null | null +// oldDate | null | null | null | null +// null | newDate | null | null | null +// null | null | receivedEtag | null | null +// null | null | null | garbageEtag | null +// null | null | null | null | receivedLeaseID +// } +// +// @Unroll +// def "Upload AC fail"() { +// setup: +// noneMatch = setupBlobMatchCondition(bu, noneMatch) +// setupBlobLeaseCondition(bu, leaseID) +// BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( +// new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) +// .withIfMatch(match).withIfNoneMatch(noneMatch)) +// .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) +// +// when: +// bu.upload(defaultInputStream.get(), defaultDataSize, null, null, bac, null) +// +// then: +// def e = thrown(StorageException) +// e.errorCode() == StorageErrorCode.CONDITION_NOT_MET || +// e.errorCode() == StorageErrorCode.LEASE_ID_MISMATCH_WITH_BLOB_OPERATION +// +// where: +// modified | unmodified | match | noneMatch | leaseID +// newDate | null | null | null | null +// null | oldDate | null | null | null +// null | null | garbageEtag | null | null +// null | null | null | receivedEtag | null +// null | null | null | null | garbageLeaseID +// } + +// def "Upload error"() { +// setup: +// bu = cu.createBlockBlobURL(generateBlobName()) +// +// when: +// bu.upload(defaultInputStream.get(), defaultDataSize, null, null, +// new BlobAccessConditions().withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId("id")), +// null) +// +// then: +// thrown(StorageException) +// } +// +// def "Upload context"() { +// setup: +// def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(201, BlockBlobUploadHeaders))) +// +// bu = bu.withPipeline(pipeline) +// +// when: +// // No service call is made. Just satisfy the parameters. +// bu.upload(defaultInputStream.get(), defaultDataSize, null, null, null, defaultContext) +// +// then: +// notThrown(RuntimeException) +// } +} diff --git a/storage/client/src/test/java/com/azure/storage/blob/ContainerAPITest.groovy b/storage/client/src/test/java/com/azure/storage/blob/ContainerAPITest.groovy new file mode 100644 index 0000000000000..f1a7b2c659331 --- /dev/null +++ b/storage/client/src/test/java/com/azure/storage/blob/ContainerAPITest.groovy @@ -0,0 +1,321 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob + +import com.azure.storage.blob.models.BlobItem +import com.azure.storage.blob.models.BlobType +import com.azure.storage.blob.models.ContainerGetPropertiesHeaders +import com.azure.storage.blob.models.PublicAccessType +import org.junit.Assume +import spock.lang.Unroll + +class ContainerAPITest extends APISpec { + ///////////////////////////////////// + ////// Champion: ALL PASSED ///////// + ///////////////////////////////////// + def "Create all null"() { + setup: + // Overwrite the existing cu, which has already been created + String containerName = generateContainerName() + System.out.println("create all null name: " + containerName) + cu = primaryServiceURL.getContainerClient(containerName) + + when: + cu.create(null, null, null, null) + + then: + System.out.println(cu.properties.toString()) + } + + def "delete"(){ + cu.delete(null, null, null) + when: + cu.getProperties() + then: + thrown(StorageException) + } + + ///////////////////////////////////// + ////// High-pri: ALL PASSED ///////// + ///////////////////////////////////// + def "Create min"() { + setup: + String containerName = generateContainerName() + System.out.println("create min name: " + containerName) + when: + primaryServiceURL.getContainerClient(containerName).create() + then: + primaryServiceURL.properties + } + + @Unroll + def "Create metadata"() { + setup: + cu = primaryServiceURL.getContainerClient(generateContainerName()) + Metadata metadata = new Metadata() + if (key1 != null) { + metadata.put(key1, value1) + } + if (key2 != null) { + metadata.put(key2, value2) + } + + when: + cu.create(metadata, null, null, null) + ContainerGetPropertiesHeaders response = cu.getProperties() + + then: + response.metadata() == metadata + + where: + key1 | value1 | key2 | value2 + null | null | null | null + "foo" | "bar" | "fizz" | "buzz" + } + + @Unroll + def "Create publicAccess"() { + setup: + cu = primaryServiceURL.getContainerClient(generateContainerName()) + + when: + cu.create(null, publicAccess, null, null) + PublicAccessType access = + cu.getProperties().blobPublicAccess() + + then: + access.toString() == publicAccess.toString() + + where: + publicAccess | _ + PublicAccessType.BLOB | _ + PublicAccessType.CONTAINER | _ + null | _ + } + + // Failed at Error code + def "Create error"() { + when: + cu.create() + + then: + def e = thrown(StorageException) + e.response().statusCode() == 409 + //e.errorCode() == "ContainerAlreadyExists" + e.message().contains("The specified container already exists.") + } + + def "Create context"() { + setup: + String containerName = generateContainerName() + System.out.println("create context name: " + containerName) + def cuContext = primaryServiceURL.getContainerClient(containerName) + + when: + // No service call is made. Just satisfy the parameters. + cuContext.create(null, null, null, defaultContext) + + then: + notThrown(RuntimeException) + } + + def "Delete min"() { + when: + cu.delete() + cu.getProperties() + then: + thrown(StorageException) + + } + + def "Delete error"() { + setup: + cu = primaryServiceURL.getContainerClient(generateContainerName()) + + when: + cu.delete() + + then: + thrown(StorageException) + } + + def "Delete context"() { + setup: + def cuDeleteContext = primaryServiceURL.getContainerClient(generateContainerName()) + cuDeleteContext.create() + + when: + // No service call is made. Just satisfy the parameters. + cuDeleteContext.delete(null, null, defaultContext) + + then: + notThrown(RuntimeException) + } + + + def "List blobs flat"() { + setup: + String name = generateBlobName() + PageBlobClient bu = cu.getPageBlobClient(name) + bu.create(512, null, null, null, null, null, null) + + when: + List blobs = cu.listBlobsFlat(null, null, null).asList() + + then: + blobs.size() == 1 + blobs.get(0).name() == name + blobs.get(0).properties().blobType() == BlobType.PAGE_BLOB + blobs.get(0).properties().copyCompletionTime() == null + blobs.get(0).properties().copyStatusDescription() == null + blobs.get(0).properties().copyId() == null + blobs.get(0).properties().copyProgress() == null + blobs.get(0).properties().copySource() == null + blobs.get(0).properties().copyStatus() == null + blobs.get(0).properties().incrementalCopy() == null + blobs.get(0).properties().destinationSnapshot() == null + blobs.get(0).properties().leaseDuration() == null + blobs.get(0).properties().contentLength() != null + blobs.get(0).properties().contentType() != null + blobs.get(0).properties().contentMD5 == null + blobs.get(0).properties().contentEncoding() == null + blobs.get(0).properties().contentDisposition() == null + blobs.get(0).properties().contentLanguage() == null + blobs.get(0).properties().cacheControl() == null + blobs.get(0).properties().blobSequenceNumber() == 0 + blobs.get(0).properties().serverEncrypted() + blobs.get(0).properties().accessTierInferred() + blobs.get(0).properties().archiveStatus() == null + blobs.get(0).properties().creationTime() != null + } + + def "List blobs flat min"() { + when: + def containerName = generateBlobName() + BlockBlobClient bu = cu.getBlockBlobClient(containerName) + bu.upload(defaultInputStream, defaultDataSize) + + then: + cu.listBlobsFlat(null).each() { + blob -> + Assume.assumeTrue(blob.name().contains("javabloblistblobsflatmin")) + System.out.println("blob name: " + blob.name()) + } + } + + + + def "Set metadata"() { + setup: + cu = primaryServiceURL.getContainerClient(generateContainerName()) + Metadata metadata = new Metadata() + metadata.put("key", "value") + cu.create(metadata, null, null, null) + cu.setMetadata(metadata) + + expect: + cu.getProperties().metadata().size() == 1 + } + + def "Set metadata min"() { + setup: + Metadata metadata = new Metadata() + metadata.put("foo", "bar") + + when: + cu.setMetadata(metadata) + + then: + cu.getProperties().metadata() == metadata + } + + @Unroll + def "Set metadata metadata"() { + setup: + Metadata metadata = new Metadata() + if (key1 != null) { + metadata.put(key1, value1) + } + if (key2 != null) { + metadata.put(key2, value2) + } + cu.setMetadata(metadata) + + expect: + cu.getProperties().metadata() == metadata + + where: + key1 | value1 | key2 | value2 + null | null | null | null + "foo" | "bar" | "fizz" | "buzz" + } + + def "Set metadata error"() { + setup: + cu = primaryServiceURL.getContainerClient(generateContainerName()) + + when: + cu.setMetadata(null, null, null, null) + + then: + thrown(StorageException) + } + + def "Set metadata context"() { + setup: + def cuMetadataContext = primaryServiceURL.getContainerClient(generateContainerName()) + cuMetadataContext.create() + + when: + // No service call is made. Just satisfy the parameters. + cuMetadataContext.setMetadata(null, null, null, defaultContext) + + then: + notThrown(RuntimeException) + } + + + def "Get properties null"() { + when: + ContainerGetPropertiesHeaders headers = cu.getProperties(null, null, null) + + then: + validateBasicHeaders(headers) + headers.blobPublicAccess() == null + headers.leaseDuration() == null + headers.metadata().size() == 0 + !headers.hasImmutabilityPolicy() + !headers.hasLegalHold() + } + + def "Get properties min"() { + expect: + cu.getProperties().blobPublicAccess() == null + } + + def "Get properties error"() { + setup: + cu = primaryServiceURL.getContainerClient(generateContainerName()) + + when: + cu.getProperties(null, null, null) + + then: + thrown(StorageException) + } + + def "Get properties context"() { + setup: + def cuPropertyContext = primaryServiceURL.getContainerClient(generateContainerName()) + cuPropertyContext.create() + + when: + // No service call is made. Just satisfy the parameters. + cu.getProperties(null, null, defaultContext) + + then: + notThrown(RuntimeException) + } + +} diff --git a/storage/client/src/test/java/com/azure/storage/blob/Sample.java b/storage/client/src/test/java/com/azure/storage/blob/Sample.java new file mode 100644 index 0000000000000..d51b9d6868f29 --- /dev/null +++ b/storage/client/src/test/java/com/azure/storage/blob/Sample.java @@ -0,0 +1,208 @@ +package com.azure.storage.blob; + +import com.azure.core.http.HttpClient; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.ContainerItem; +import org.junit.Test; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.io.*; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.UUID; + +public class Sample { + + final static String accountEndpoint = ""; + final static String accountName = ""; + final static String accountKey = ""; + + @Test + public void sample() throws IOException { + // get service client + StorageClient serviceClient = new StorageClientBuilder().endpoint(accountEndpoint) + .credentials(new SharedKeyCredentials(accountName, accountKey)) + .httpClient(HttpClient.createDefault()/*.proxy(() -> new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888)))*/) + .buildClient(); + + // create 5 containers + ContainerClient containerClient = null; + for (int i = 0; i < 5; i++) { + String name = "uxtesting" + UUID.randomUUID(); + containerClient = serviceClient.getContainerClient(name); + containerClient.create(); + System.out.println("Created container: " + name); + } + System.out.println(); + + // list containers in account + System.out.println("Listing containers in account:"); + for (ContainerItem item : serviceClient.listContainers()) { + System.out.println(item.name()); + } + System.out.println(); + + // in the last container, create 5 blobs + for (int i = 0; i < 5; i++) { + BlockBlobClient blobClient = containerClient.getBlockBlobClient("testblob-" + i); + ByteArrayInputStream testdata = new ByteArrayInputStream(("test data" + i).getBytes(StandardCharsets.UTF_8)); + + blobClient.upload(testdata, testdata.available()); + System.out.println("Uploaded blob."); + } + System.out.println(); + + // list blobs and download results + System.out.println("Listing/downloading blobs:"); + for (BlobItem item : containerClient.listBlobsFlat()) { + ByteArrayOutputStream stream = new ByteArrayOutputStream(); + containerClient.getBlobClient(item.name()).download(stream); + System.out.println(item.name() + ": " + new String(stream.toByteArray())); + } + System.out.println(); + + // cleanup + for (ContainerItem item : serviceClient.listContainers()) { + containerClient = serviceClient.getContainerClient(item.name()); + containerClient.delete(); + System.out.println("Deleted container: " + item.name()); + } + } + + @Test + public void asyncSample() throws IOException { + // get service client + StorageAsyncClient serviceClient = new StorageClientBuilder().endpoint(accountEndpoint) + .credentials(new SharedKeyCredentials(accountName, accountKey)) + .httpClient(HttpClient.createDefault()/*.proxy(() -> new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888)))*/) + .buildAsyncClient(); + + // create 5 containers + ContainerAsyncClient containerClient = null; + Mono createContainerTask = Mono.empty(); + for (int i = 0; i < 5; i++) { + String name = "uxtesting" + UUID.randomUUID(); + containerClient = serviceClient.getContainerAsyncClient(name); + + createContainerTask = createContainerTask.and(containerClient.create().then(Mono.defer(() -> { + System.out.println("Created container: " + name); + return Mono.empty(); + }))); + } + ContainerAsyncClient finalContainerClient = containerClient; // final variable for lambda usage + + createContainerTask + // list containers + .thenMany(Flux.defer(() -> { + System.out.println("Listing containers in account:"); + return serviceClient.listContainers() + .flatMap(containerItem -> { + System.out.println(containerItem.name()); + return Mono.empty(); + }); + })) + // in the last container, create 5 blobs + .then(Mono.defer(() -> { + Mono finished = Mono.empty(); + for (int i = 0; i < 5; i++) { + BlockBlobAsyncClient blobClient = finalContainerClient.getBlockBlobAsyncClient("testblob-" + i); + byte[] message = ("test data" + i).getBytes(StandardCharsets.UTF_8); + Flux testdata = Flux.just(ByteBuffer.wrap(message)); + + + finished = finished.and(blobClient.upload(testdata, message.length) + .then(Mono.defer(() -> { + System.out.println("Uploaded blob."); + return Mono.empty(); + }))); + } + + return finished; + })) + // list blobs + .thenMany(Flux.defer(() -> { + System.out.println(); + System.out.println("Listing/downloading blobs:"); + return finalContainerClient.listBlobsFlat(); + })) + // download results + .flatMap(listItem -> + finalContainerClient.getBlobAsyncClient(listItem.name()) + .download() + .map(buffer -> new String(buffer.array())) + .doOnNext(string -> System.out.println(listItem.name() + ": " + string))) + // cleanup + .thenMany(serviceClient.listContainers()) + .flatMap(containerItem -> serviceClient + .getContainerAsyncClient(containerItem.name()) + .delete()) + .blockLast(); + } + + @Test + public void uploadDownloadFromFile() throws IOException { + final String data = "TEST DATA" + UUID.randomUUID(); + final String folderPath = "C:/Users/jaschrep/Desktop/temp"; + + // make start file + File startFile = new File(folderPath, "startFile" + UUID.randomUUID()); + FileOutputStream fstream = new FileOutputStream(startFile); + fstream.write(data.getBytes()); + fstream.close(); + + // get service client + StorageClient serviceClient = new StorageClientBuilder().endpoint(accountEndpoint) + .credentials(new SharedKeyCredentials(accountName, accountKey)) + .httpClient(HttpClient.createDefault()/*.proxy(() -> new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888)))*/) + .buildClient(); + + // make container + ContainerClient containerClient = serviceClient.getContainerClient("uxstudy" + UUID.randomUUID()); + containerClient.create(); + + // upload data + BlockBlobClient blobClient = containerClient.getBlockBlobClient("testblob_" + UUID.randomUUID()); + blobClient.uploadFromFile(startFile.getAbsolutePath()); + + // download data + File endFile = new File(folderPath, "endFile" + UUID.randomUUID()); + blobClient.downloadToFile(endFile.getAbsolutePath()); + } + + @Test + public void uploadDownloadFromFileAsync() throws IOException { + final String data = "TEST DATA" + UUID.randomUUID(); + final String folderPath = "C:/Users/jaschrep/Desktop/temp"; + + // make start file + File startFile = new File(folderPath, "startFile" + UUID.randomUUID()); + FileOutputStream fstream = new FileOutputStream(startFile); + fstream.write(data.getBytes()); + fstream.close(); + + // get service client + StorageAsyncClient serviceClient = new StorageClientBuilder().endpoint(accountEndpoint) + .credentials(new SharedKeyCredentials(accountName, accountKey)) + .httpClient(HttpClient.createDefault()/*.proxy(() -> new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 8888)))*/) + .buildAsyncClient(); + + // make container + ContainerAsyncClient containerClient = serviceClient.getContainerAsyncClient("uxstudy" + UUID.randomUUID()); + containerClient.create() + + // upload data + .then(Mono.defer(() -> { + BlockBlobAsyncClient blobClient = containerClient.getBlockBlobAsyncClient("testblob_" + UUID.randomUUID()); + return blobClient.uploadFromFile(startFile.getAbsolutePath()) + .then(Mono.just(blobClient)); + })) + + // download data + .flatMap(blobClient -> + blobClient.downloadToFile(new File(folderPath, "endFile" + UUID.randomUUID()).getAbsolutePath())) + + .block(); + + } +}