diff --git a/eng/code-quality-reports/src/main/resources/spotbugs/spotbugs-exclude.xml b/eng/code-quality-reports/src/main/resources/spotbugs/spotbugs-exclude.xml
index f7e3bcb5eb317..162fdadfc96b8 100755
--- a/eng/code-quality-reports/src/main/resources/spotbugs/spotbugs-exclude.xml
+++ b/eng/code-quality-reports/src/main/resources/spotbugs/spotbugs-exclude.xml
@@ -114,6 +114,11 @@
+ *
*/
- public String computeHmac256(final String stringToSign) throws InvalidKeyException {
+ public String computeHmac256(final String stringToSign) {
try {
/*
We must get a new instance of the Mac calculator for each signature calculated because the instances are
@@ -111,8 +115,10 @@ public String computeHmac256(final String stringToSign) throws InvalidKeyExcepti
hmacSha256.init(new SecretKeySpec(this.accountKey, "HmacSHA256"));
byte[] utf8Bytes = stringToSign.getBytes(StandardCharsets.UTF_8);
return Base64.getEncoder().encodeToString(hmacSha256.doFinal(utf8Bytes));
- } catch (final NoSuchAlgorithmException e) {
- throw new Error(e);
+ } catch (final NoSuchAlgorithmException e) {
+ throw new RuntimeException("There is no such algorithm. Error Details: " + e.getMessage());
+ } catch (InvalidKeyException e) {
+ throw new RuntimeException("Please double check the account key. Error details: " + e.getMessage());
}
}
@@ -121,20 +127,20 @@ private String buildStringToSign(URL requestURL, String httpMethod, Map
For more samples, please see the samples file
+ * @throws IllegalArgumentException If one of the following case exists: + *Instantiating an Asynchronous Directory Client
+ * + *+ * DirectoryAsyncClient client = DirectoryAsyncClient.builder() + * .connectionString(connectionString) + * .endpoint(endpoint) + * .buildAsyncClient(); + *+ * + *
View {@link DirectoryClientBuilder this} for additional ways to construct the client.
+ * + * @see DirectoryClientBuilder + * @see DirectoryClient + * @see SharedKeyCredential + * @see SASTokenCredential + */ public class DirectoryAsyncClient { + private final AzureFileStorageImpl azureFileStorageClient; + private final String shareName; + private final String directoryName; + private final String shareSnapshot; + + /** + * Creates a DirectoryAsyncClient that sends requests to the storage directory at {@link AzureFileStorageImpl#url() endpoint}. + * Each service call goes through the {@link HttpPipeline pipeline} in the {@code client}. + * @param azureFileStorageClient Client that interacts with the service interfaces + * @param shareName Name of the share + * @param directoryName Name of the directory + * @param shareSnapshot The snapshot of the share + */ + DirectoryAsyncClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String directoryName, String shareSnapshot) { + this.shareName = shareName; + this.directoryName = directoryName; + this.shareSnapshot = shareSnapshot; + this.azureFileStorageClient = new AzureFileStorageBuilder().pipeline(azureFileStorageClient.httpPipeline()) + .url(azureFileStorageClient.url()) + .version(azureFileStorageClient.version()) + .build(); + } - DirectoryAsyncClient() { - throw new UnsupportedOperationException(); + /** + * Creates a DirectoryAsyncClient that sends requests to the storage account at {@code endpoint}. + * Each service call goes through the {@code httpPipeline}. + * @param endpoint URL for the Storage File service + * @param httpPipeline HttpPipeline that HTTP requests and response flow through + * @param shareName Name of the share + * @param directoryName Name of the directory + * @param shareSnapshot Optional. The snapshot of the share + */ + DirectoryAsyncClient(URL endpoint, HttpPipeline httpPipeline, String shareName, String directoryName, String shareSnapshot) { + this.shareName = shareName; + this.directoryName = directoryName; + this.shareSnapshot = shareSnapshot; + this.azureFileStorageClient = new AzureFileStorageBuilder().pipeline(httpPipeline) + .url(endpoint.toString()) + .build(); } - public static DirectoryClientBuilder asyncBuilder() { - throw new UnsupportedOperationException(); + /** + * Get the getDirectoryUrl of the storage directory client. + * @return the URL of the storage directory client + */ + public String getDirectoryUrl() { + return azureFileStorageClient.url(); } - public FileAsyncClient getFileClient(String name) { - throw new UnsupportedOperationException(); + /** + * Constructs a FileAsyncClient that interacts with the specified file. + * + *If the file doesn't exist in the storage account {@link FileAsyncClient#create(long)} create} in the client will + * need to be called before interaction with the file can happen.
+ * + * @param fileName Name of the file + * @return a FileAsyncClient that interacts with the specified share + */ + public FileAsyncClient getFileClient(String fileName) { + String filePath = directoryName + "/" + fileName; + return new FileAsyncClient(azureFileStorageClient, shareName, filePath, null); } - public DirectoryAsyncClient getDirectoryClient(String directoryName) { - throw new UnsupportedOperationException(); + /** + * Constructs a DirectoryAsyncClient that interacts with the specified directory. + * + *If the file doesn't exist in the storage account {@link DirectoryAsyncClient#create()} create} in the client will + * need to be called before interaction with the directory can happen.
+ * + * @param subDirectoryName Name of the directory + * @return a DirectoryAsyncClient that interacts with the specified directory + */ + public DirectoryAsyncClient getSubDirectoryClient(String subDirectoryName) { + String directoryPath = directoryName + "/" + subDirectoryName; + return new DirectoryAsyncClient(azureFileStorageClient, shareName, directoryPath, shareSnapshot); } + /** + * Creates a directory in the storage account and returns a response of {@link DirectoryInfo} to interact with it. + * + *Code Samples
+ * + *Create the directory
+ * + * {@codesnippet com.azure.storage.file.directoryAsyncClient.create} + * + * @return A response containing the directory info and the status of creating the directory. + * @throws StorageErrorException If the directory has already existed, the parent directory does not exist or directory name is an invalid resource name. + */ + public MonoCode Samples
+ * + *Create the directory
+ * + *+ * client.create(Collections.singletonMap("directory", "metadata")) + * .subscribe(response -> System.out.printf("Creating the directory completed with status code %d", response.statusCode())); + *+ * + * @param metadata Optional. Metadata to associate with the directory + * @return A response containing the directory info and the status of creating the directory. + * @throws StorageErrorException If the directory has already existed, the parent directory does not exist or directory name is an invalid resource name. + */ public Mono
Code Samples
+ * + *Delete the directory
+ * + * {@codesnippet com.azure.storage.file.directoryClient.delete} + * + * @return A response that only contains headers and response status code + * @throws StorageErrorException If the share doesn't exist + */ public MonoCode Samples
+ * + *Retrieve directory properties
+ * + *+ * client.getProperties() + * .subscribe(response -> { + * DirectoryProperties properties = response.value(); + * System.out.printf("Directory latest modified date is %s.", properties.lastModified()); + * }); + *+ * + * @return Storage directory properties + */ + public Mono
If {@code null} is passed for the metadata it will clear the metadata associated to the directory.
+ * + *Code Samples
+ * + *Set the metadata to "directory:updatedMetadata"
+ * + *+ * client.setMetadata(Collections.singletonMap("directory", "updatedMetadata")) + * .subscribe(response -> System.out.printf("Setting the directory metadata completed with status code %d", response.statusCode())); + *+ * + *
Clear the metadata of the directory
+ * + *+ * client.setMetadata(null) + * .subscribe(response -> System.out.printf("Clearing the directory metadata completed with status code %d", response.statusCode())); + *+ * + * @param metadata Optional. Metadata to set on the directory, if null is passed the metadata for the directory is cleared + * @return information about the directory + * @throws StorageErrorException If the directory doesn't exist or the metadata contains invalid keys + */ + public Mono
Code Samples
+ * + *List all directories and files in the account
+ * + *+ * client.listFilesAndDirectories() + * .subscribe(result -> System.out.printf("The file or directory %s exists in the account", result.name())); + *+ * + * @return {@link FileRef File info} in the storage directory + */ + public Flux
Code Samples
+ * + *List all directories with "subdir" prefix and return 10 results in the account
+ * + * {@codesnippet com.azure.storage.file.directoryAsyncClient.listFilesAndDirectories} + * + * @param prefix Optional. Filters the results to return only files and directories whose name begins with the specified prefix. + * @param maxResults Optional. Specifies the maximum number of files and/or directories to return per page. + * If the request does not specify maxresults or specifies a value greater than 5,000, the server will return up to 5,000 items. + * @return {@link FileRef File info} in the storage account with prefix and max number of return results. + */ + public FluxCode Samples
+ * + *Get 10 handles with recursive call.
+ * + *+ * client.getHandles(10, true) + * .subscribe(handleItem -> System.out.printf("Get handles completed with handle id %s", handleItem.handleId())); + *+ * @param maxResult Optional. The number of results will return per page + * @param recursive Specifies operation should apply to the directory specified in the URI, its files, its subdirectories and their files. + * @return {@link HandleItem handles} in the directory that satisfy the requirements + */ + public Flux
Code Samples
+ * + *Force close handles with handles returned by get handles in recursive.
+ * + *+ * client.getHandles(10, true) + * .subscribe(handleItem -> { + * client.forceCloseHandles(handleItem.handleId(), true).subscribe(numOfClosedHandles -> + * System.out.printf("Close %d handles.", numOfClosedHandles) + * )}); + *+ * @param handleId Specifies the handle ID to be closed. Use an asterisk ('*') as a wildcard string to specify all handles. + * @param recursive A boolean value that specifies if the operation should also apply to the files and subdirectories of the directory specified in the URI. + * @return The counts of number of handles closed + */ public Flux
Code Samples
+ * + *Create the sub directory "subdir"
+ * + * {@codesnippet com.azure.storage.file.directoryAsyncClient.createSubDirectory#string} + * + * @param subDirectoryName Name of the subdirectory + * @return A response containing the subdirectory client and the status of creating the directory. + * @throws StorageErrorException If the subdirectory has already existed, the parent directory does not exist or directory is an invalid resource name. + */ + public MonoCode Samples
+ * + *Create the subdirectory named "subdir", with metadata
+ * + *+ * client.createSubDirectory("subdir", Collections.singletonMap("directory", "metadata")) + * .subscribe(response -> System.out.printf("Creating the subdirectory completed with status code %d", response.statusCode())); + *+ * + * @param subDirectoryName Name of the subdirectory + * @param metadata Optional. Metadata to associate with the subdirectory + * @return A response containing the subdirectory client and the status of creating the directory. + * @throws StorageErrorException If the directory has already existed, the parent directory does not exist or subdirectory is an invalid resource name. + */ + public Mono
Code Samples
+ * + *Delete the subdirectory named "subdir"
+ * + * {@codesnippet com.azure.storage.file.directoryAsyncClient.deleteSubDirectory#string} + * + * @param subDirectoryName Name of the subdirectory + * @return A response that only contains headers and response status code + * @throws StorageErrorException If the subdirectory doesn't exist, the parent directory does not exist or subdirectory name is an invalid resource name. + */ + public MonoCode Samples
+ * + *Create 1k file with named "myFile"
+ * + * {@codesnippet com.azure.storage.file.directoryAsyncClient.createFile#string-long} + * + * @param fileName Name of the file + * @param maxSize Size of the file + * @return A response containing the FileAsyncClient and the status of creating the directory. + * @throws StorageErrorException If the file has already existed, the parent directory does not exist or file name is an invalid resource name. + */ + public MonoCode Samples
+ * + *Create the file named "myFile"
+ * + *+ * client.createFile("myFile", Collections.singletonMap("directory", "metadata")) + * .subscribe(response -> System.out.printf("Creating the file completed with status code %d", response.statusCode())); + *+ * + * @param fileName Name of the file + * @param maxSize Max size of the file + * @param httpHeaders the Http headers set to the file + * @param metadata Optional. Name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. + * @return A response containing the directory info and the status of creating the directory. + * @throws StorageErrorException If the directory has already existed, the parent directory does not exist or file name is an invalid resource name. + */ + public Mono
Code Samples
+ * + *Delete the file "filetest"
+ * + * {@codesnippet com.azure.storage.file.directoryAsyncClient.deleteFile#string} + * + * @param fileName Name of the file + * @return A response that only contains headers and response status code + * @throws StorageErrorException If the directory doesn't exist or the file doesn't exist or file name is an invalid resource name. + */ public MonoInstantiating an Synchronous Directory Client
+ * + *+ * DirectoryClient client = DirectoryClient.builder() + * .connectionString(connectionString) + * .endpoint(endpoint) + * .buildClient(); + *+ * + *
View {@link DirectoryClientBuilder this} for additional ways to construct the client.
+ * + * @see DirectoryClientBuilder + * @see DirectoryClient + * @see SharedKeyCredential + * @see SASTokenCredential + */ public class DirectoryClient { - private final DirectoryAsyncClient client; + private final DirectoryAsyncClient directoryAsyncClient; + + /** + * Creates a DirectoryClient that wraps a DirectoryAsyncClient and blocks requests. + * + * @param directoryAsyncClient DirectoryAsyncClient that is used to send requests + */ + DirectoryClient(DirectoryAsyncClient directoryAsyncClient) { + this.directoryAsyncClient = directoryAsyncClient; + } + + /** + * Get the getDirectoryUrl of the storage directory client. + * @return the URL of the storage directory client + */ + public String getDirectoryUrl() { + return directoryAsyncClient.getDirectoryUrl(); + } + + /** + * Constructs a FileClient that interacts with the specified file. + * + *If the file doesn't exist in the storage account {@link FileClient#create(long)} create} in the client will + * need to be called before interaction with the file can happen.
+ * + * @param fileName Name of the file + * @return a FileClient that interacts with the specified share + */ + public FileClient getFileClient(String fileName) { + return new FileClient(directoryAsyncClient.getFileClient(fileName)); + } + + /** + * Constructs a DirectoryClient that interacts with the specified directory. + * + *If the file doesn't exist in the storage account {@link DirectoryClient#create()} create} in the client will + * need to be called before interaction with the directory can happen.
+ * + * @param subDirectoryName Name of the directory + * @return a DirectoryClient that interacts with the specified directory + */ + public DirectoryClient getSubDirectoryClient(String subDirectoryName) { + return new DirectoryClient(directoryAsyncClient.getSubDirectoryClient(subDirectoryName)); + } - DirectoryClient() { - throw new UnsupportedOperationException(); + /** + * Creates a directory in the storage account and returns a response of {@link DirectoryInfo} to interact with it. + * + *Code Samples
+ * + *Create the directory
+ * + * {@codesnippet com.azure.storage.file.directoryClient.createDirectory} + * + * @return A response containing the directory info and the status of creating the directory. + * @throws StorageErrorException If the directory has already existed, the parent directory does not exist or directory name is an invalid resource name. + */ + public ResponseCode Samples
+ * + *Create the directory
+ * + *+ * Response<DirectoryInfo> response = client.create(Collections.singletonMap("directory", "metadata")); + * System.out.printf("Creating the directory completed with status code %d", response.statusCode()); + *+ * + * @param metadata Optional. Metadata to associate with the directory + * @return A response containing the directory info and the status of creating the directory. + * @throws StorageErrorException If the directory has already existed, the parent directory does not exist or directory name is an invalid resource name. + */ + public Response
Code Samples
+ * + *Delete the directory
+ * + * {@codesnippet com.azure.storage.file.directoryClient.delete} + * + * @return A response that only contains headers and response status code + * @throws StorageErrorException If the share doesn't exist + */ + public VoidResponse delete() { + return directoryAsyncClient.delete().block(); } - public DirectoryClient getDirectoryClient(String directoryName) { - throw new UnsupportedOperationException(); + /** + * Retrieves the properties of the storage account's directory. + * The properties includes directory metadata, last modified date, is server encrypted, and eTag. + * + *Code Samples
+ * + *Retrieve directory properties
+ * + *+ * Response<DirectoryProperties> response = client.getProperties(); + * System.out.printf("Directory latest modified date is %s.", properties.value().lastModified()); + *+ * + * @return Storage directory properties + */ + public Response
If {@code null} is passed for the metadata it will clear the metadata associated to the directory.
+ * + *Code Samples
+ * + *Set the metadata to "directory:updatedMetadata"
+ * + *+ * Response<DirectorySetMetadataInfo> response = client.setMetadata(Collections.singletonMap("directory", "updatedMetadata")); + * System.out.printf("Setting the directory metadata completed with status code %d", response.statusCode()); + *+ * + *
Clear the metadata of the directory
+ * + *+ * client.setMetadata(null) + * .subscribe(response -> System.out.printf("Clearing the directory metadata completed with status code %d", response.statusCode())); + *+ * + * @param metadata Optional. Metadata to set on the directory, if null is passed the metadata for the directory is cleared + * @return information about the directory + * @throws StorageErrorException If the directory doesn't exist or the metadata contains invalid keys + */ + public Response
Code Samples
+ * + *List all directories and files in the account
+ * + *+ * Iterable<FileRef> result = client.listFilesAndDirectories() + * System.out.printf("The file or directory %s exists in the account", result.iterator().next().name()); + *+ * + * @return {@link FileRef File info} in the storage directory + */ + public Iterable
Code Samples
+ * + *List all directories with "subdir" prefix and return 10 results in the account
+ * + * {@codesnippet com.azure.storage.file.directoryClient.listFilesAndDirectories} + * + * @param prefix Optional. Filters the results to return only files and directories whose name begins with the specified prefix. + * @param maxResults Optional. Specifies the maximum number of files and/or directories to return per page. + * If the request does not specify maxresults or specifies a value greater than 5,000, the server will return up to 5,000 items. + * @return {@link FileRef File info} in the storage account with prefix and max number of return results. + */ + public IterableCode Samples
+ * + *Get 10 handles with recursive call.
+ * + *+ * Iterable<HandleItem> result = client.getHandles(10, true) + * System.out.printf("Get handles completed with handle id %s", result.iterator().next().handleId()); + *+ * @param maxResult Optional. The number of results will return per page + * @param recursive Specifies operation should apply to the directory specified in the URI, its files, its subdirectories and their files. + * @return {@link HandleItem handles} in the directory that satisfy the requirements + */ + public Iterable
Code Samples
+ * + *Force close handles with handles returned by get handles in recursive.
+ * + *+ * Iterable<HandleItem> result = client.getHandles(10, true) + * result.forEach(handleItem -> { + * client.forceCloseHandles(handleItem.handleId, true).forEach(numOfClosedHandles -> + * System.out.printf("Get handles completed with handle id %s", handleItem.handleId())); + * }); + *+ * @param handleId Specifies the handle ID to be closed. Use an asterisk ('*') as a wildcard string to specify all handles. + * @param recursive A boolean value that specifies if the operation should also apply to the files and subdirectories of the directory specified in the URI. + * @return The counts of number of handles closed. + */ + public Iterable
Code Samples
+ * + *Create the sub directory "subdir"
+ * + *+ * Response<DirectoryClient> response = client.createSubDirectory("subdir") + * System.out.printf("Creating the sub directory completed with status code %d", response.statusCode()); + *+ * + * @param subDirectoryName Name of the subdirectory + * @return A response containing the subdirectory client and the status of creating the directory. + * @throws StorageErrorException If the subdirectory has already existed, the parent directory does not exist or directory is an invalid resource name. + */ + public Response
Code Samples
+ * + *Create the subdirectory named "subdir", with metadata
+ * + * com.azure.storage.file.directoryClient.createSubDirectory#string + * + * @param subDirectoryName Name of the subdirectory + * @param metadata Optional. Metadata to associate with the subdirectory + * @return A response containing the subdirectory client and the status of creating the directory. + * @throws StorageErrorException If the directory has already existed, the parent directory does not exist or subdirectory is an invalid resource name. + */ + public ResponseCode Samples
+ * + *Delete the subdirectory named "subdir"
+ * + * {@codesnippet com.azure.storage.file.directoryClient.deleteSubDirectory#string} + * + * @param subDirectoryName Name of the subdirectory + * @return A response that only contains headers and response status code + * @throws StorageErrorException If the subdirectory doesn't exist, the parent directory does not exist or subdirectory name is an invalid resource name. + */ + public VoidResponse deleteSubDirectory(String subDirectoryName) { + return directoryAsyncClient.deleteSubDirectory(subDirectoryName).block(); } - public MonoCode Samples
+ * + *Create 1k file with named "myFile"
+ * + * {@codesnippet com.azure.storage.file.directoryClient.createFile#string-long} + * + * @param fileName Name of the file + * @param maxSize Size of the file + * @return A response containing the FileClient and the status of creating the directory. + * @throws StorageErrorException If the file has already existed, the parent directory does not exist or file name is an invalid resource name. + */ + public ResponseCode Samples
+ * + *Create the file named "myFile"
+ * + *+ * Response<FileClient> response = client.createFile("myFile", Collections.singletonMap("directory", "metadata")) + * System.out.printf("Creating the file completed with status code %d", response.statusCode()); + *+ * + * @param fileName Name of the file + * @param maxSize Max size of the file + * @param httpHeaders the Http headers set to the file + * @param metadata Optional. Name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. + * @return A response containing the directory info and the status of creating the directory. + * @throws StorageErrorException If the directory has already existed, the parent directory does not exist or file name is an invalid resource name. + */ + public Response
Code Samples
+ * + *Delete the file "filetest"
+ * + * {@codesnippet com.azure.storage.file.directoryClient.deleteFile#string} + * + * @param fileName Name of the file + * @return A response that only contains headers and response status code + * @throws StorageErrorException If the directory doesn't exist or the file doesn't exist or file name is an invalid resource name. + */ + public VoidResponse deleteFile(String fileName) { + return directoryAsyncClient.deleteFile(fileName).block(); } } diff --git a/storage/client/file/src/main/java/com/azure/storage/file/DirectoryClientBuilder.java b/storage/client/file/src/main/java/com/azure/storage/file/DirectoryClientBuilder.java index 7bbd3cb138d2e..37c8c01b4d2ad 100644 --- a/storage/client/file/src/main/java/com/azure/storage/file/DirectoryClientBuilder.java +++ b/storage/client/file/src/main/java/com/azure/storage/file/DirectoryClientBuilder.java @@ -3,7 +3,352 @@ package com.azure.storage.file; +import com.azure.core.http.HttpClient; +import com.azure.core.http.HttpPipeline; +import com.azure.core.http.policy.AddDatePolicy; +import com.azure.core.http.policy.HttpLogDetailLevel; +import com.azure.core.http.policy.HttpLoggingPolicy; +import com.azure.core.http.policy.HttpPipelinePolicy; +import com.azure.core.http.policy.RequestIdPolicy; +import com.azure.core.http.policy.RetryPolicy; +import com.azure.core.http.policy.UserAgentPolicy; +import com.azure.core.implementation.http.policy.spi.HttpPolicyProviders; +import com.azure.core.util.configuration.Configuration; +import com.azure.core.util.configuration.ConfigurationManager; +import com.azure.storage.common.credentials.SASTokenCredential; +import com.azure.storage.common.credentials.SharedKeyCredential; +import com.azure.storage.common.policy.SASTokenCredentialPolicy; +import com.azure.storage.common.policy.SharedKeyCredentialPolicy; +import java.net.MalformedURLException; +import java.net.URL; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; + +/** + * This class provides a fluent builder API to help aid the configuration and instantiation of the {@link DirectoryClient FileClients} + * and {@link DirectoryAsyncClient FileAsyncClients}, calling {@link DirectoryClientBuilder#buildClient() buildClient} + * constructs an instance of FileClient and calling {@link DirectoryClientBuilder#buildAsyncClient() buildAsyncClient} + * constructs an instance of FileAsyncClient. + * + *The client needs the endpoint of the Azure Storage File service, name of the share, and authorization credential. + * {@link DirectoryClientBuilder#endpoint(String) endpoint} gives the builder the endpoint and may give the builder the + * {@link DirectoryClientBuilder#shareName(String)}, {@link DirectoryClientBuilder#directoryName(String)} and a {@link SASTokenCredential} that authorizes the client.
+ * + *Instantiating a synchronous Directory Client with SAS token
+ * {@codesnippet com.azure.storage.file.directoryClient.instantiation.sastoken} + * + *Instantiating an Asynchronous Directory Client with SAS token
+ * {@codesnippet com.azure.storage.file.directoryClient.instantiation.sastoken} + * + *+ * DirectoryClient client = DirectoryClient.builder() + * .endpoint(endpointWithSASTokenQueryParams) + * .buildClient(); + *+ * + *
+ * DirectoryAsyncClient client = DirectoryAsyncClient.builder() + * .endpoint(endpointWithSASTokenQueryParams) + * .buildAsyncClient(); + *+ * + *
If the {@code endpoint} doesn't contain the query parameters to construct a {@code SASTokenCredential} they may + * be set using {@link DirectoryClientBuilder#credential(SASTokenCredential) credential}.
+ * + * + *Another way to authenticate the client is using a {@link SharedKeyCredential}. To create a SharedKeyCredential + * a connection string from the Storage File service must be used. Set the SharedKeyCredential with + * {@link DirectoryClientBuilder#connectionString(String) connectionString}. If the builder has both a SASTokenCredential and + * SharedKeyCredential the SharedKeyCredential will be preferred when authorizing requests sent to the service.
+ * + *Instantiating a synchronous Directory Client with connection string.
+ * {@codesnippet com.azure.storage.file.directoryClient.instantiation.connectionstring} + * + *Instantiating an Asynchronous Directory Client with connection string.
+ * {@codesnippet com.azure.storage.file.directoryAsyncClient.instantiation.connectionstring} + * + * @see DirectoryClient + * @see DirectoryAsyncClient + * @see SASTokenCredential + * @see SharedKeyCredential + */ public class DirectoryClientBuilder { + private static final String ACCOUNT_NAME = "accountname"; + private final List+ * If {@link DirectoryClientBuilder#pipeline(HttpPipeline) pipeline} is set, then the {@code pipeline} and + * {@link DirectoryClientBuilder#endpoint(String) endpoint} are used to create the + * {@link DirectoryAsyncClient client}. All other builder settings are ignored. + *
+ * + * @return A ShareAsyncClient with the options set from the builder. + * @throws NullPointerException If {@code endpoint} or {@code shareName} is {@code null}. + * @throws IllegalArgumentException If neither a {@link SharedKeyCredential} or {@link SASTokenCredential} has been set. + */ + public DirectoryAsyncClient buildAsyncClient() { + Objects.requireNonNull(endpoint); + + if (pipeline != null) { + return new DirectoryAsyncClient(endpoint, pipeline, shareName, directoryName, shareSnapshot); + } + + if (sasTokenCredential == null && sharedKeyCredential == null) { + throw new IllegalArgumentException("Credentials are required for authorization"); + } + + // Closest to API goes first, closest to wire goes last. + final List+ * If {@link DirectoryClientBuilder#pipeline(HttpPipeline) pipeline} is set, then the {@code pipeline} and + * {@link DirectoryClientBuilder#endpoint(String) endpoint} are used to create the + * {@link DirectoryClient client}. All other builder settings are ignored. + *
+ * + * @return A DirectoryClient with the options set from the builder. + * @throws NullPointerException If {@code endpoint}, {@code shareName} or {@code directoryName} is {@code null}. + * @throws IllegalArgumentException If neither a {@link SharedKeyCredential} or {@link SASTokenCredential} has been set. + */ + public DirectoryClient buildClient() { + return new DirectoryClient(this.buildAsyncClient()); + } + + /** + * Sets the endpoint for the Azure Storage File instance that the client will interact with. + * + *The first path segment, if the endpoint contains path segments, will be assumed to be the name of the share + * that the client will interact with. Rest of the path segments should be the path of the directory.
+ * + *Query parameters of the endpoint will be parsed using {@link SASTokenCredential#fromQuery(String)} in an + * attempt to generate a {@link SASTokenCredential} to authenticate requests sent to the service.
+ * + * @param endpoint The URL of the Azure Storage File instance to send service requests to and receive responses from. + * @return the updated DirectoryClientBuilder object + * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is an invalid URL + */ + public DirectoryClientBuilder endpoint(String endpoint) { + Objects.requireNonNull(endpoint); + try { + URL fullURL = new URL(endpoint); + this.endpoint = new URL(fullURL.getProtocol() + "://" + fullURL.getHost()); + String[] pathSegments = fullURL.getPath().split("/"); + int length = pathSegments.length; + this.shareName = length >= 2 ? pathSegments[1] : this.shareName; + this.directoryName = length >= 3 ? pathSegments[2] : this.directoryName; + + // Attempt to get the SAS token from the URL passed + SASTokenCredential credential = SASTokenCredential.fromQuery(fullURL.getQuery()); + if (credential != null) { + this.sasTokenCredential = credential; + } + } catch (MalformedURLException ex) { + throw new IllegalArgumentException("The Azure Storage Directory endpoint url is malformed."); + } + + return this; + } + + /** + * Sets the {@link SASTokenCredential} used to authenticate requests sent to the File service. + * + * @param credential SAS token credential generated from the Storage account that authorizes requests + * @return the updated DirectoryClientBuilder object + * @throws NullPointerException If {@code credential} is {@code null}. + */ + public DirectoryClientBuilder credential(SASTokenCredential credential) { + this.sasTokenCredential = credential; + return this; + } + + /** + * Creates a {@link SharedKeyCredential} from the {@code connectionString} used to authenticate requests sent to the + * File service. + * + * @param connectionString Connection string from the Access Keys section in the Storage account + * @return the updated DirectoryClientBuilder object + * @throws NullPointerException If {@code connectionString} is {@code null}. + */ + public DirectoryClientBuilder connectionString(String connectionString) { + Objects.requireNonNull(connectionString); + this.sharedKeyCredential = SharedKeyCredential.fromConnectionString(connectionString); + getEndPointFromConnectionString(connectionString); + return this; + } + + private void getEndPointFromConnectionString(String connectionString) { + MapIf {@code pipeline} is set, all other settings are ignored, aside from {@link DirectoryClientBuilder#endpoint(String) endpoint}, + * {@link DirectoryClientBuilder#shareName(String) shareName} @{link DirectoryClientBuilder#directoryName(String) filePath}, and {@link DirectoryClientBuilder#shareSnapshot(String) snaphotShot} + * when building clients.
+ * + * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. + * @return The updated DirectoryClientBuilder object. + * @throws NullPointerException If {@code pipeline} is {@code null}. + */ + public DirectoryClientBuilder pipeline(HttpPipeline pipeline) { + this.pipeline = Objects.requireNonNull(pipeline); + return this; + } + + /** + * Sets the configuration object used to retrieve environment configuration values used to buildClient the client with + * when they are not set in the builder, defaults to Configuration.NONE + * @param configuration configuration store + * @return the updated DirectoryClientBuilder object + */ + public DirectoryClientBuilder configuration(Configuration configuration) { + this.configuration = configuration; + return this; + } -// connectionString, shareName, directoryName, FileClientOptions, SharedKeyCredential, + /** + * Sets the snapshot that the constructed clients will interact with. This snapshot must be linked to the share + * that has been specified in the builder. + * + * @param shareSnapshot Identifier of the snapshot + * @return the updated DirectoryClientBuilder object + * @throws NullPointerException If {@code shareSnapshot} is {@code null}. + */ + public DirectoryClientBuilder shareSnapshot(String shareSnapshot) { + this.shareSnapshot = shareSnapshot; + return this; + } } diff --git a/storage/client/file/src/main/java/com/azure/storage/file/FileAsyncClient.java b/storage/client/file/src/main/java/com/azure/storage/file/FileAsyncClient.java index 0297818f4650b..02b2f809675b7 100644 --- a/storage/client/file/src/main/java/com/azure/storage/file/FileAsyncClient.java +++ b/storage/client/file/src/main/java/com/azure/storage/file/FileAsyncClient.java @@ -3,77 +3,775 @@ package com.azure.storage.file; +import com.azure.core.http.HttpPipeline; import com.azure.core.http.rest.Response; +import com.azure.core.http.rest.SimpleResponse; import com.azure.core.http.rest.VoidResponse; +import com.azure.core.implementation.util.FluxUtil; +import com.azure.core.util.Context; +import com.azure.core.util.logging.ClientLogger; +import com.azure.storage.common.credentials.SASTokenCredential; +import com.azure.storage.common.credentials.SharedKeyCredential; +import com.azure.storage.file.implementation.AzureFileStorageBuilder; +import com.azure.storage.file.implementation.AzureFileStorageImpl; +import com.azure.storage.file.models.CopyStatusType; import com.azure.storage.file.models.FileCopyInfo; import com.azure.storage.file.models.FileDownloadInfo; +import com.azure.storage.file.models.FileGetPropertiesHeaders; import com.azure.storage.file.models.FileHTTPHeaders; import com.azure.storage.file.models.FileInfo; +import com.azure.storage.file.models.FileMetadataInfo; import com.azure.storage.file.models.FileProperties; -import com.azure.storage.file.models.FileRangeInfo; +import com.azure.storage.file.models.FileRange; import com.azure.storage.file.models.FileRangeWriteType; import com.azure.storage.file.models.FileUploadInfo; +import com.azure.storage.file.models.FileUploadRangeHeaders; +import com.azure.storage.file.models.FilesCreateResponse; +import com.azure.storage.file.models.FilesDownloadResponse; +import com.azure.storage.file.models.FilesForceCloseHandlesResponse; +import com.azure.storage.file.models.FilesGetPropertiesResponse; +import com.azure.storage.file.models.FilesGetRangeListResponse; +import com.azure.storage.file.models.FilesListHandlesResponse; +import com.azure.storage.file.models.FilesSetHTTPHeadersResponse; +import com.azure.storage.file.models.FilesSetMetadataResponse; +import com.azure.storage.file.models.FilesStartCopyResponse; +import com.azure.storage.file.models.FilesUploadRangeResponse; import com.azure.storage.file.models.HandleItem; +import com.azure.storage.file.models.StorageErrorException; import io.netty.buffer.ByteBuf; +import java.io.File; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.net.MalformedURLException; +import java.net.URL; +import java.nio.channels.AsynchronousFileChannel; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; +import java.time.Duration; +import java.time.OffsetDateTime; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeoutException; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; +import reactor.core.scheduler.Schedulers; -import java.util.Map; - +/** + * This class provides a client that contains all the operations for interacting with file in Azure Storage File Service. + * Operations allowed by the client are creating, copying, uploading, downloading, deleting and listing on a file, retrieving properties, setting metadata + * and list or force close handles of the file. + * + *Instantiating an Asynchronous File Client
+ * + *+ * FileAsyncClient client = FileAsyncClient.builder() + * .connectionString(connectionString) + * .endpoint(endpoint) + * .buildAsyncClient(); + *+ * + *
View {@link FileClientBuilder this} for additional ways to construct the client.
+ * + * @see FileClientBuilder + * @see FileClient + * @see SharedKeyCredential + * @see SASTokenCredential + */ public class FileAsyncClient { - FileAsyncClient() { - throw new UnsupportedOperationException(); + private static final ClientLogger LOGGER = new ClientLogger(FileAsyncClient.class); + private static final long FILE_DEFAULT_BLOCK_SIZE = 4 * 1024 * 1024L; + + private final AzureFileStorageImpl azureFileStorageClient; + private final String shareName; + private final String filePath; + private final String shareSnapshot; + + /** + * Creates a FileAsyncClient that sends requests to the storage file at {@link AzureFileStorageImpl#url() endpoint}. + * Each service call goes through the {@link HttpPipeline pipeline} in the {@code client}. + * @param azureFileStorageClient Client that interacts with the service interfaces + * @param shareName Name of the share + * @param filePath Path to the file + * @param shareSnapshot The snapshot of the share + */ + FileAsyncClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String filePath, String shareSnapshot) { + this.shareName = shareName; + this.filePath = filePath; + this.shareSnapshot = shareSnapshot; + this.azureFileStorageClient = new AzureFileStorageBuilder().pipeline(azureFileStorageClient.httpPipeline()) + .url(azureFileStorageClient.url()) + .version(azureFileStorageClient.version()) + .build(); + } + + /** + * Creates a FileAsyncClient that sends requests to the storage account at {@code endpoint}. + * Each service call goes through the {@code httpPipeline}. + * @param endpoint URL for the Storage File service + * @param httpPipeline HttpPipeline that HTTP requests and response flow through + * @param shareName Name of the share + * @param filePath Path to the file + * @param shareSnapshot Optional. The snapshot of the share + */ + FileAsyncClient(URL endpoint, HttpPipeline httpPipeline, String shareName, String filePath, String shareSnapshot) { + this.shareName = shareName; + this.filePath = filePath; + this.shareSnapshot = shareSnapshot; + this.azureFileStorageClient = new AzureFileStorageBuilder().pipeline(httpPipeline) + .url(endpoint.toString()) + .build(); } - public static FileClientBuilder asyncBuilder() { - throw new UnsupportedOperationException(); + /** + * Get the getFileUrl of the storage file client. + * @return the URL of the storage file client + * @throws MalformedURLException if no protocol is specified, or an + * unknown protocol is found, or {@code spec} is {@code null}. + */ + public URL getFileUrl() throws MalformedURLException { + return new URL(azureFileStorageClient.url()); } + /** + * Creates a file in the storage account and returns a response of {@link FileInfo} to interact with it. + * + *Code Samples
+ * + *Create the file with size 1KB.
+ * + * {@codesnippet com.azure.storage.file.fileClient.create} + * + * @param maxSize The maximum size in bytes for the file, up to 1 TiB. + * @return A response containing the file info and the status of creating the file. + * @throws StorageErrorException If the file has already existed, the parent directory does not exist or fileName is an invalid resource name. + */ + public MonoCode Samples
+ * + *Create the file with length of 1024 bytes, some headers and metadata.
+ * + *+ * FileHTTPHeaders httpHeaders = new FileHTTPHeaders().fileContentType("text/plain"); + * client.create(1024, httpHeaders, Collections.singletonMap("file", "updatedMetadata")) + * .subscribe(response -> System.out.printf("Creating the file completed with status code %d", response.statusCode())); + *+ * + * @param maxSize The maximum size in bytes for the file, up to 1 TiB. + * @param httpHeaders Additional parameters for the operation. + * @param metadata Optional. Name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. + * @see C# identifiers + * @return A response containing the directory info and the status of creating the directory. + * @throws StorageErrorException If the directory has already existed, the parent directory does not exist or directory is an invalid resource name. + */ public Mono
Code Samples
+ * + *Copy file from source url to the {@code filePath}
+ * + * {@codesnippet com.azure.storage.file.fileAsyncClient.startCopy#string-map} + * + * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. + * @param metadata Optional. Name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. + * * @see C# identifiers + * @return A response containing the file copy info and the status of copying the file. + */ public MonoCode Samples
+ * + *Abort copy file from copy id("someCopyId")
+ * + *+ * client.abortCopy("someCopyId") + * .subscribe(response -> System.out.printf("Abort copying the file completed with status code %d", response.statusCode())); + *+ * + * @param copyId Specifies the copy id which has copying pending status associate with it. + * @return A response containing the status of aborting copy the file. + */ public Mono
Code Samples
+ * + *Download the file to current folder.
+ * + *+ * client.downloadToFile("someFilePath") + * .doOnTerminate(() -> if (Files.exist(Paths.get("someFilePath"))) { + * System.out.println("Download the file completed"); + * }); + *+ * + * @param downloadFilePath The path where store the downloaded file + * @return An empty response. + */ + public Mono
Code Samples
+ * + *Download the file from 1024 to 2048 bytes to current folder.
+ * + * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadToFile} + * + * @param downloadFilePath The path where store the downloaded file + * @param range Optional. Return file data only from the specified byte range. + * @return An empty response. + * @throws UncheckedIOException If an I/O error occurs. + */ + public MonoCode Samples
+ * + *Download the file with its metadata and properties.
+ * + * {@codesnippet com.azure.storage.file.fileAsyncClient.downloadWithProperties} + * + * @return A response that only contains headers and response status code + */ + public MonoCode Samples
+ * + *Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5.
+ * + *+ * client.downloadWithProperties(new Range(1024, 2048), false) + * .subscribe(response -> System.out.printf("Downloading the file range completed with status code %d", response.statusCode())); + *+ * + * @param range Optional. Return file data only from the specified byte range. + * @param rangeGetContentMD5 Optional. When this header is set to true and specified together with the Range header, the service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB in size. + * @return A response that only contains headers and response status code + */ + public Mono
Code Samples
+ * + *Delete the file
+ * + * {@codesnippet com.azure.storage.file.fileAsyncClient.delete} + * + * @return A response that only contains headers and response status code + * @throws StorageErrorException If the directory doesn't exist or the file doesn't exist. + */ public MonoCode Samples
+ * + *Retrieve file properties
+ * + *+ * client.getProperties() + * .subscribe(response -> { + * DirectoryProperties properties = response.value(); + * System.out.printf("File latest modified date is %s.", properties.lastModified()); + * }); + *+ * + * @return Storage file properties + */ + public Mono
If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.
+ * + *Code Samples
+ * + *Set the httpHeaders of contentType of "text/plain"
+ * + *+ * FileHTTPHeaders httpHeaders = new FileHTTPHeaders().fileContentType("text/plain"); + * client.setHttpHeaders(1024, httpHeaders) + * .subscribe(response -> System.out.printf("Setting the file httpHeaders completed with status code %d", response.statusCode())); + *+ * + *
Clear the metadata of the file
+ * + *+ * client.setHttpHeaders(1024, null) + * .subscribe(response -> System.out.printf("Clearing the file httpHeaders completed with status code %d", response.statusCode())); + *+ * + * @param newFileSize New file size of the file + * @param httpHeaders Resizes a file to the specified size. If the specified byte value is less than the current size of the file, then all ranges above the specified byte value are cleared. + * @return Response of the information about the file + * @throws IllegalArgumentException thrown if parameters fail the validation. + */ public Mono
If {@code null} is passed for the metadata it will clear the metadata associated to the file.
+ * + *Code Samples
+ * + *Set the metadata to "file:updatedMetadata"
+ * + *+ * client.setMetadata(Collections.singletonMap("file", "updatedMetadata")) + * .subscribe(response -> System.out.printf("Setting the file metadata completed with status code %d", response.statusCode())); + *+ * + *
Clear the metadata of the file
+ * + *+ * client.setMetadata(null) + * .subscribe(response -> System.out.printf("Clearing the file metadata completed with status code %d", response.statusCode())); + *+ * + * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared + * @return information about the file + * @throws StorageErrorException If the file doesn't exist or the metadata contains invalid keys + */ + public Mono
Code Samples
+ * + *Upload "default" to the file.
+ * + * {@codesnippet com.azure.storage.file.fileAsyncClient.upload} + * + * @param data The data which will upload to the storage file. + * @param length Specifies the number of bytes being transmitted in the request body. When the FileRangeWriteType is set to clear, the value of this header must be set to zero.. + * @return A response that only contains headers and response status code + * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) + */ + public MonoCode Samples
+ * + *Upload the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5.
+ * + *+ * ByteBuf defaultData = Unpooled.wrappedBuffer(defaultText.getBytes(StandardCharsets.UTF_8)); + * client.upload(defaultData, defaultData.readableBytes()) + * .subscribe(response -> System.out.printf("Upload the bytes to file range completed with status code %d", response.statusCode())); + *+ * + * @param data The data which will upload to the storage file. + * @param offset Optional. The starting point of the upload range. It will start from the beginning if it is {@code null} + * @param length Specifies the number of bytes being transmitted in the request body. When the FileRangeWriteType is set to clear, the value of this header must be set to zero. + * @param type You may specify one of the following options: + * - Update: Writes the bytes specified by the request body into the specified range. + * - Clear: Clears the specified range and releases the space used in storage for that range. To clear a range, set the Content-Length header to zero. + * @return A response that only contains headers and response status code + * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) + */ + public Mono
Code Samples
+ * + *Upload the file from the source file path.
+ * + * {@codesnippet com.azure.storage.file.fileAsyncClient.uploadFromFile} + * + * @param uploadFilePath The path where store the source file to upload + * @return An empty response. + */ + public MonoCode Samples
+ * + *Upload the file from the source file path.
+ * + *+ * client.uploadFromFile("someFilePath", FileRangeWriteType.UPDATE) + * .doOnTerminate(() -> if (client.getProperties() != null) { + * System.out.printf("Upload the file with length of %d completed", client.getProperties().block().value().contentLength()); + * }); + *+ * + * @param uploadFilePath The path where store the source file to upload + * @param type You may specify one of the following options: + * - Update: Writes the bytes specified by the request body into the specified range. + * - Clear: Clears the specified range and releases the space used in storage for that range. To clear a range, set the Content-Length header to zero. + * @return An empty response. + * @throws UncheckedIOException If an I/O error occurs. + */ + public Mono
Code Samples
+ * + *List all ranges for the file client.
+ * + *+ * client.listRanges() + * .subscribe(range -> System.out.printf("List ranges completed with start: %d, end: %d", range.start(), range.end())); + *+ * + * @return {@link FileRange ranges} in the files. + */ + public Flux
Code Samples
+ * + *List all ranges within the file range from 1KB to 2KB.
+ * + *+ * client.listRanges(new FileRange(1024, 2048) + * .subscribe(result -> System.out.printf("List ranges completed with start: %d, end: %d", result.start(), result.end())); + *+ * + * @param range Optional. Return file data only from the specified byte range. + * @return {@link FileRange ranges} in the files that satisfy the requirements + */ + public Flux
Code Samples
+ * + *List all handles for the file client.
+ * + *+ * client.listHandles() + * .subscribe(result -> System.out.printf("List handles completed with handle id %s", result.handleId())); + *+ * + * @return {@link HandleItem handles} in the files that satisfy the requirements + */ + public Flux
Code Samples
+ * + *List 10 handles for the file client.
+ * + *+ * client.listHandles(10) + * .subscribe(result -> System.out.printf("List handles completed with handle id %s", result.handleId())); *+ * @param maxResults Optional. The number of results will return per page + * @return {@link HandleItem handles} in the file that satisfy the requirements + */ + public Flux
Code Samples
+ * + *Force close handles with handles returned by list handles in recursive.
+ * + *+ * client.listHandles(10) + * .subscribe(result -> { + * client.forceCloseHandles(result.handleId(), true).subscribe(numOfClosedHandles -> + * System.out.printf("Close %d handles.", numOfClosedHandles) + * )}); + *+ * @param handleId Specifies the handle ID to be closed. Use an asterisk ('*') as a wildcard string to specify all handles. + * @return The counts of number of handles closed + */ public Flux
Instantiating a synchronous File Client
+ * + *+ * FileClient client = FileClient.builder() + * .connectionString(connectionString) + * .endpoint(endpoint) + * .buildClient(); + *+ * + *
View {@link FileClientBuilder this} for additional ways to construct the client.
+ * + * @see FileClientBuilder + * @see FileAsyncClient + * @see SharedKeyCredential + * @see SASTokenCredential + */ public class FileClient { - private final FileAsyncClient client; + private final FileAsyncClient fileAsyncClient; + + /** + * Creates a FileClient that wraps a FileAsyncClient and blocks requests. + * + * @param fileAsyncClient FileAsyncClient that is used to send requests + */ + FileClient(FileAsyncClient fileAsyncClient) { + this.fileAsyncClient = fileAsyncClient; + } + + /** + * Get the getFileUrl of the storage file client. + * @return the URL of the storage file client + * @throws MalformedURLException if no protocol is specified, or an + * unknown protocol is found, or {@code spec} is {@code null}. + */ + public URL getFileUrl() throws MalformedURLException { + return fileAsyncClient.getFileUrl(); + } + + /** + * Creates a file in the storage account and returns a response of {@link FileInfo} to interact with it. + * + *Code Samples
+ * + *Create the file with length of 1024 bytes, some headers and metadata.
+ * + * {@codesnippet com.azure.storage.file.fileClient.create} + * + * @param maxSize The maximum size in bytes for the file, up to 1 TiB. + * @return A response containing the file info and the status of creating the file. + * @throws StorageErrorException If the file has already existed, the parent directory does not exist or fileName is an invalid resource name. + */ + public ResponseCode Samples
+ * + *Create the file with length of 1024 bytes, some headers and metadata.
+ * + *+ * FileHTTPHeaders httpHeaders = new FileHTTPHeaders().fileContentType("text/plain"); + * Response<FileInfo> response = client.create(1024, httpHeaders, Collections.singletonMap("file", "updatedMetadata")); + * System.out.printf("Creating the file completed with status code %d", response.statusCode()); + *+ * + * @param maxSize The maximum size in bytes for the file, up to 1 TiB. + * @param httpHeaders Additional parameters for the operation. + * @param metadata Optional. Name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. + * @see C# identifiers + * @return A response containing the directory info and the status of creating the directory. + * @throws StorageErrorException If the directory has already existed, the parent directory does not exist or directory is an invalid resource name. + */ + public Response
Code Samples
+ * + *Copy file from source getDirectoryUrl to the {@code filePath}
+ * + * {@codesnippet com.azure.storage.file.fileClient.startCopy#string-map} + * + * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. + * @param metadata Optional. Name-value pairs associated with the file as metadata. Metadata names must adhere to the naming rules. + * * @see C# identifiers + * @return A response containing the file copy info and the status of copying the file. + */ + public ResponseCode Samples
+ * + *Abort copy file from copy id("someCopyId")
+ * + *+ * VoidResponse response = client.abortCopy("someCopyId") + * System.out.printf("Abort copying the file completed with status code %d", response.statusCode()); + *+ * + * @param copyId Specifies the copy id which has copying pending status associate with it. + * @return A response containing the status of aborting copy the file. + */ + public VoidResponse abortCopy(String copyId) { + return fileAsyncClient.abortCopy(copyId).block(); + } + + /** + * Downloads a file from the system, including its metadata and properties + * + *
Code Samples
+ * + *Download the file to current folder.
+ * + * {@codesnippet com.azure.storage.file.fileClient.downloadToFile} + * + * @param downloadFilePath The path where store the downloaded file + */ + public void downloadToFile(String downloadFilePath) { + downloadToFile(downloadFilePath, null); + } + + /** + * Downloads a file from the system, including its metadata and properties + * + *Code Samples
+ * + *Download the file from 1024 to 2048 bytes to current folder.
+ * + *+ * client.downloadToFile("someFilePath", new FileRange(1024, 2048)); + * if (Files.exist(Paths.get(downloadFilePath))) { + * System.out.println("Download the file completed"); + * } + *+ * + * @param downloadFilePath The path where store the downloaded file + * @param range Optional. Return file data only from the specified byte range. + */ + public void downloadToFile(String downloadFilePath, FileRange range) { + fileAsyncClient.downloadToFile(downloadFilePath, range).block(); + } + + /** + * Downloads a file from the system, including its metadata and properties + * + *
Code Samples
+ * + *Download the file with its metadata and properties.
+ * + * {@codesnippet com.azure.storage.file.fileClient.downloadWithProperties} + * + * @return A response that only contains headers and response status code + */ + public ResponseCode Samples
+ * + *Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5.
+ * + *+ * Response<FileDownloadInfo> response = client.downloadWithProperties() + * System.out.printf("Downloading the file completed with status code %d", response.statusCode()); + *+ * + * @param range Optional. Return file data only from the specified byte range. + * @param rangeGetContentMD5 Optional. When this header is set to true and specified together with the Range header, the service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB in size. + * @return A response that only contains headers and response status code + */ + public Response
Code Samples
+ * + *Delete the file
+ * + * {@codesnippet com.azure.storage.file.fileClient.delete} + * + * @return A response that only contains headers and response status code + * @throws StorageErrorException If the directory doesn't exist or the file doesn't exist. + */ + public VoidResponse delete() { + return fileAsyncClient.delete().block(); } - public MonoCode Samples
+ * + *Retrieve file properties
+ * + *+ * Response<FileProperties> response = client.getProperties() + * DirectoryProperties properties = response.value(); + * System.out.printf("File latest modified date is %s.", properties.lastModified()); + *+ * + * @return Storage file properties + */ + public Response
If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.
+ * + *Code Samples
+ * + *Set the httpHeaders of contentType of "text/plain"
+ * + *+ * FileHTTPHeaders httpHeaders = new FileHTTPHeaders().fileContentType("text/plain"); + * Response<FileInfo> response = client.setHttpHeaders(1024, httpHeaders); + * System.out.printf("Setting the file httpHeaders completed with status code %d", response.statusCode()); + *+ * + *
Clear the metadata of the file
+ * + *+ * Response<FileInfo> response = client.setHttpHeaders(1024, null) + * System.out.printf("Clearing the file httpHeaders completed with status code %d", response.statusCode()); + *+ * + * @param newFileSize New file size of the file + * @param httpHeaders Resizes a file to the specified size. If the specified byte value is less than the current size of the file, then all ranges above the specified byte value are cleared. + * @return Response of the information about the file + * @throws IllegalArgumentException thrown if parameters fail the validation. + */ + public Response
If {@code null} is passed for the metadata it will clear the metadata associated to the file.
+ * + *Code Samples
+ * + *Set the metadata to "file:updatedMetadata"
+ * + *+ * Response<FileMetadataInfo> response = client.setMetadata(Collections.singletonMap("file", "updatedMetadata")); + * System.out.printf("Setting the file metadata completed with status code %d", response.statusCode()); + *+ * + *
Clear the metadata of the file
+ * + *+ * client.setMetadata(null) + * .subscribe(response -> System.out.printf("Clearing the file metadata completed with status code %d", response.statusCode())); + *+ * + * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared + * @return information about the file + * @throws StorageErrorException If the file doesn't exist or the metadata contains invalid keys + */ + public Response
Code Samples
+ * + *Upload "default" to the file.
+ * + * {@codesnippet com.azure.storage.file.fileClient.upload} + * + * @param data The data which will upload to the storage file. + * @param length Specifies the number of bytes being transmitted in the request body. When the FileRangeWriteType is set to clear, the value of this header must be set to zero.. + * @return A response that only contains headers and response status code + * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) + */ + public ResponseCode Samples
+ * + *Upload the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5.
+ * + *+ * ByteBuf defaultData = Unpooled.wrappedBuffer("default".getBytes(StandardCharsets.UTF_8)); + * Response<FileUploadInfo> response = client.upload(defaultData, defaultData.readableBytes()); + * System.out.printf("Upload the bytes to file range completed with status code %d", response.statusCode()); + *+ * + * @param data The data which will upload to the storage file. + * @param offset Optional. The starting point of the upload range. It will start from the beginning if it is {@code null} + * @param length Specifies the number of bytes being transmitted in the request body. When the FileRangeWriteType is set to clear, the value of this header must be set to zero. + * @param type You may specify one of the following options: + * - Update: Writes the bytes specified by the request body into the specified range. + * - Clear: Clears the specified range and releases the space used in storage for that range. To clear a range, set the Content-Length header to zero. + * @return A response that only contains headers and response status code + * @throws StorageErrorException If you attempt to upload a range that is larger than 4 MB, the service returns status code 413 (Request Entity Too Large) + */ + public Response
Code Samples
+ * + *Upload the file from the source file path.
+ * + * {@codesnippet com.azure.storage.file.fileClient.uploadFromFile} + * + * @param uploadFilePath The path where store the source file to upload + */ + public void uploadFromFile(String uploadFilePath) { + uploadFromFile(uploadFilePath, FileRangeWriteType.UPDATE); } - public MonoCode Samples
+ * + *Upload the file from the source file path.
+ * + *+ * client.uploadFromFile("someFilePath", FileRangeWriteType.UPDATE); + * if (client.getProperties() != null) { + * System.out.printf("Upload the file with length of %d completed", client.getProperties().block().value().contentLength()); + * }; + *+ * + * @param uploadFilePath The path where store the source file to upload + * @param type You may specify one of the following options: + * - Update: Writes the bytes specified by the request body into the specified range. + * - Clear: Clears the specified range and releases the space used in storage for that range. To clear a range, set the Content-Length header to zero. + */ + public void uploadFromFile(String uploadFilePath, FileRangeWriteType type) { + fileAsyncClient.uploadFromFile(uploadFilePath, type).block(); } - public Mono
Code Samples
+ * + *List all ranges for the file client.
+ * + *+ * Iterable<FileRange> ranges = client.listRanges(); + * ranges.forEach(range -> + * System.out.printf("List ranges completed with start: %d, end: %d", range.start(), range.end())); + *+ * + * @return {@link FileRange ranges} in the files. + */ + public Iterable
Code Samples
+ * + *List all ranges within the file range from 1KB to 2KB.
+ * + *+ * Iterable%lt;FileRange> ranges = client.listRanges(new FileRange(1024, 2048)); + * ranges.forEach(range -> + * System.out.printf("List ranges completed with start: %d, end: %d", range.start(), range.end())); + *+ * + * @param range Optional. Return file data only from the specified byte range. + * @return {@link FileRange ranges} in the files that satisfy the requirements + */ + public Iterable
Code Samples
+ * + *List all handles for the file client.
+ * + *+ * client.listHandles() + * .forEach(handleItem -> System.out.printf("List handles completed with handleId %d", handleItem.handleId())); + *+ * + * @return {@link HandleItem handles} in the files that satisfy the requirements + */ + public Iterable
Code Samples
+ * + *List 10 handles for the file client.
+ * + *+ * client.listHandles(10) + * .forEach(handleItem -> System.out.printf("List handles completed with handleId %d", handleItem.handleId())); + *+ * @param maxResults Optional. The number of results will return per page + * @return {@link HandleItem handles} in the file that satisfy the requirements + */ + public Iterable
Code Samples
+ * + *Force close handles with handles returned by list handles in recursive.
+ * + *+ * client.listHandles(10) + * .forEach(result -> { + * client.forceCloseHandles(result.handleId(), true).subscribe(numOfClosedHandles -> + * System.out.printf("Close %d handles.", numOfClosedHandles) + * )}); + *+ * @param handleId Specifies the handle ID to be closed. Use an asterisk ('*') as a wildcard string to specify all handles. + * @return The counts of number of handles closed + */ + public Iterable
The client needs the endpoint of the Azure Storage File service, name of the share, and authorization credential. + * {@link FileClientBuilder#endpoint(String) endpoint} gives the builder the endpoint and may give the builder the + * {@link FileClientBuilder#shareName(String)}, {@link FileClientBuilder#filePath(String)} and a {@link SASTokenCredential} that authorizes the client.
+ * + *Instantiating a synchronous File Client with SAS token
+ * {@codesnippet com.azure.storage.file.fileClient.instantiation.sastoken} + * + *Instantiating an Asynchronous File Client with SAS token
+ * {@codesnippet com.azure.storage.file.directoryClient.instantiation.sastoken} + * + *If the {@code endpoint} doesn't contain the query parameters to construct a {@code SASTokenCredential} they may + * be set using {@link FileClientBuilder#credential(SASTokenCredential) credential}.
+ * + *+ * FileClient client = FileClient.builder() + * .endpoint(endpointWithoutSASTokenQueryParams) + * .shareName(shareName) + * .filePath(filePath) + * .credential(SASTokenCredential.fromQuery(SASTokenQueryParams)) + * .buildClient(); + *+ * + *
+ * FileAsyncClient client = FileAsyncClient.builder() + * .endpoint(endpointWithoutSASTokenQueryParams) + * .shareName(shareName) + * .filePath(filePath) + * .credential(SASTokenCredential.fromQuery(SASTokenQueryParams)) + * .buildAsyncClient(); + *+ * + *
Another way to authenticate the client is using a {@link SharedKeyCredential}. To create a SharedKeyCredential + * a connection string from the Storage File service must be used. Set the SharedKeyCredential with + * {@link FileClientBuilder#connectionString(String) connectionString}. If the builder has both a SASTokenCredential and + * SharedKeyCredential the SharedKeyCredential will be preferred when authorizing requests sent to the service.
+ * + *Instantiating a synchronous File Client with connection string.
+ * {@codesnippet com.azure.storage.file.directoryClient.instantiation.connectionstring} + * + *Instantiating an Asynchronous File Client with connection string.
+ * {@codesnippet com.azure.storage.file.directoryAsyncClient.instantiation.connectionstring} + * + * @see FileClient + * @see FileAsyncClient + * @see SASTokenCredential + * @see SharedKeyCredential + */ public class FileClientBuilder { + private static final String ACCOUNT_NAME = "accountname"; + private final List+ * If {@link FileClientBuilder#pipeline(HttpPipeline) pipeline} is set, then the {@code pipeline} and + * {@link FileClientBuilder#endpoint(String) endpoint} are used to create the + * {@link FileAsyncClient client}. All other builder settings are ignored. + *
+ * + * @return A ShareAsyncClient with the options set from the builder. + * @throws NullPointerException If {@code endpoint} or {@code shareName} is {@code null}. + * @throws IllegalArgumentException If neither a {@link SharedKeyCredential} or {@link SASTokenCredential} has been set. + */ + public FileAsyncClient buildAsyncClient() { + Objects.requireNonNull(endpoint); + + if (pipeline != null) { + return new FileAsyncClient(endpoint, pipeline, shareName, filePath, shareSnapshot); + } + + if (sasTokenCredential == null && sharedKeyCredential == null) { + throw new IllegalArgumentException("Credentials are required for authorization"); + } + + // Closest to API goes first, closest to wire goes last. + final List+ * If {@link FileClientBuilder#pipeline(HttpPipeline) pipeline} is set, then the {@code pipeline} and + * {@link FileClientBuilder#endpoint(String) endpoint} are used to create the + * {@link FileClient client}. All other builder settings are ignored. + *
+ * + * @return A FileClient with the options set from the builder. + * @throws NullPointerException If {@code endpoint}, {@code shareName} or {@code filePath} is {@code null}. + * @throws IllegalStateException If neither a {@link SharedKeyCredential} or {@link SASTokenCredential} has been set. + */ + public FileClient buildClient() { + return new FileClient(this.buildAsyncClient()); + } + + /** + * Sets the endpoint for the Azure Storage File instance that the client will interact with. + * + *The first path segment, if the endpoint contains path segments, will be assumed to be the name of the share + * that the client will interact with. Rest of the path segments should be the path of the file. + * It mush end up with the file name if more segments exist.
+ * + *Query parameters of the endpoint will be parsed using {@link SASTokenCredential#fromQuery(String)} in an + * attempt to generate a {@link SASTokenCredential} to authenticate requests sent to the service.
+ * + * @param endpoint The URL of the Azure Storage File instance to send service requests to and receive responses from. + * @return the updated FileClientBuilder object + * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is an invalid URL + */ + public FileClientBuilder endpoint(String endpoint) { + Objects.requireNonNull(endpoint); + try { + URL fullURL = new URL(endpoint); + this.endpoint = new URL(fullURL.getProtocol() + "://" + fullURL.getHost()); + + // Attempt to get the share name and file path from the URL passed + String[] pathSegments = fullURL.getPath().split("/"); + int length = pathSegments.length; + this.shareName = length >= 2 ? pathSegments[1] : this.shareName; + String[] filePathParams = length >= 3 ? Arrays.copyOfRange(pathSegments, 2, length) : null; + this.filePath = filePathParams != null ? String.join("/", filePathParams) : this.filePath; + + // Attempt to get the SAS token from the URL passed + SASTokenCredential credential = SASTokenCredential.fromQuery(fullURL.getQuery()); + if (credential != null) { + this.sasTokenCredential = credential; + } + } catch (MalformedURLException ex) { + throw new IllegalArgumentException("The Azure Storage File endpoint url is malformed."); + } + + return this; + } + + /** + * Sets the {@link SASTokenCredential} used to authenticate requests sent to the File service. + * + * @param credential SAS token credential generated from the Storage account that authorizes requests + * @return the updated FileClientBuilder object + * @throws NullPointerException If {@code credential} is {@code null}. + */ + public FileClientBuilder credential(SASTokenCredential credential) { + this.sasTokenCredential = credential; + return this; + } + + /** + * Creates a {@link SharedKeyCredential} from the {@code connectionString} used to authenticate requests sent to the + * File service. + * + * @param connectionString Connection string from the Access Keys section in the Storage account + * @return the updated FileClientBuilder object + * @throws NullPointerException If {@code connectionString} is {@code null}. + */ + public FileClientBuilder connectionString(String connectionString) { + Objects.requireNonNull(connectionString); + this.sharedKeyCredential = SharedKeyCredential.fromConnectionString(connectionString); + getEndPointFromConnectionString(connectionString); + return this; + } + + private void getEndPointFromConnectionString(String connectionString) { + MapIf {@code pipeline} is set, all other settings are ignored, aside from {@link FileClientBuilder#endpoint(String) endpoint}, + * {@link FileClientBuilder#shareName(String) shareName} @{link FileClientBuilder#filePath(String) filePath}, and {@link FileClientBuilder#shareSnapshot(String) snaphotShot} + * when building clients.
+ * + * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. + * @return The updated FileClientBuilder object. + * @throws NullPointerException If {@code pipeline} is {@code null}. + */ + public FileClientBuilder pipeline(HttpPipeline pipeline) { + this.pipeline = Objects.requireNonNull(pipeline); + return this; + } + + /** + * Sets the configuration object used to retrieve environment configuration values used to buildClient the client with + * when they are not set in the builder, defaults to Configuration.NONE + * @param configuration configuration store + * @return the updated FileClientBuilder object + */ + public FileClientBuilder configuration(Configuration configuration) { + this.configuration = configuration; + return this; + } - // fileUri, SharedKeyCredential, FileClientOptions ,withSnapshot + /** + * Sets the snapshot that the constructed clients will interact with. This snapshot must be linked to the share + * that has been specified in the builder. + * + * @param shareSnapshot Identifier of the snapshot + * @return the updated FileClientBuilder object + * @throws NullPointerException If {@code shareSnapshot} is {@code null}. + */ + public FileClientBuilder shareSnapshot(String shareSnapshot) { + this.shareSnapshot = shareSnapshot; + return this; + } } diff --git a/storage/client/file/src/main/java/com/azure/storage/file/FileConfiguration.java b/storage/client/file/src/main/java/com/azure/storage/file/FileConfiguration.java new file mode 100644 index 0000000000000..8cf9de8a7c250 --- /dev/null +++ b/storage/client/file/src/main/java/com/azure/storage/file/FileConfiguration.java @@ -0,0 +1,13 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.file; + +/* + * Gets the SDK information for this library component. + */ +class FileConfiguration { + //TODO: Eventually remove these hardcoded strings with https://github.com/Azure/azure-sdk-for-java/issues/3141 + static final String NAME = "storage-file"; + static final String VERSION = "1.0.0-SNAPSHOT"; +} diff --git a/storage/client/file/src/main/java/com/azure/storage/file/FileServiceAsyncClient.java b/storage/client/file/src/main/java/com/azure/storage/file/FileServiceAsyncClient.java index 9c66716ca61c0..976be54d82381 100644 --- a/storage/client/file/src/main/java/com/azure/storage/file/FileServiceAsyncClient.java +++ b/storage/client/file/src/main/java/com/azure/storage/file/FileServiceAsyncClient.java @@ -3,51 +3,362 @@ package com.azure.storage.file; +import com.azure.core.http.HttpPipeline; import com.azure.core.http.rest.Response; +import com.azure.core.http.rest.SimpleResponse; import com.azure.core.http.rest.VoidResponse; +import com.azure.core.implementation.util.ImplUtils; +import com.azure.core.util.Context; +import com.azure.storage.common.credentials.SASTokenCredential; +import com.azure.storage.common.credentials.SharedKeyCredential; +import com.azure.storage.file.implementation.AzureFileStorageBuilder; +import com.azure.storage.file.implementation.AzureFileStorageImpl; +import com.azure.storage.file.models.CorsRule; +import com.azure.storage.file.models.DeleteSnapshotsOptionType; import com.azure.storage.file.models.FileServiceProperties; +import com.azure.storage.file.models.ListSharesIncludeType; import com.azure.storage.file.models.ListSharesOptions; +import com.azure.storage.file.models.ListSharesResponse; +import com.azure.storage.file.models.ServicesListSharesSegmentResponse; import com.azure.storage.file.models.ShareItem; +import com.azure.storage.file.models.StorageErrorException; +import java.net.URL; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import org.reactivestreams.Publisher; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; -import java.util.Map; - -public class FileServiceAsyncClient { +/** + * This class provides a client that contains all the operations for interacting with a file account in Azure Storage. + * Operations allowed by the client are creating, listing, and deleting shares and retrieving and updating properties + * of the account. + * + *Instantiating an Asynchronous File Service Client
+ * + *+ * FileServiceAsyncClient client = FileServiceAsyncClient.builder() + * .connectionString(connectionString) + * .endpoint(endpoint) + * .buildAsyncClient(); + *+ * + *
View {@link FileServiceClientBuilder this} for additional ways to construct the client.
+ * + * @see FileServiceClientBuilder + * @see FileServiceClient + * @see SharedKeyCredential + * @see SASTokenCredential + */ +public final class FileServiceAsyncClient { + private final AzureFileStorageImpl client; - FileServiceAsyncClient() { - throw new UnsupportedOperationException(); + /** + * Creates a FileServiceClient that sends requests to the storage account at {@code endpoint}. + * Each service call goes through the {@code httpPipeline}. + * + * @param endpoint URL for the Storage File service + * @param httpPipeline HttpPipeline that the HTTP requests and responses flow through + */ + FileServiceAsyncClient(URL endpoint, HttpPipeline httpPipeline) { + this.client = new AzureFileStorageBuilder().pipeline(httpPipeline) + .url(endpoint.toString()) + .build(); } - public static FileServiceClientBuilder asyncBuilder() { - throw new UnsupportedOperationException(); + /** + * @return the getFileServiceUrl of the Storage File service + */ + public String getFileServiceUrl() { + return client.url(); } - public String url() { - throw new UnsupportedOperationException(); + /** + * Constructs a ShareAsyncClient that interacts with the specified share. + * + *If the share doesn't exist in the storage account {@link ShareAsyncClient#create() create} in the client will + * need to be called before interaction with the share can happen.
+ * + * @param shareName Name of the share + * @return a ShareAsyncClient that interacts with the specified share + */ + public ShareAsyncClient getShareAsyncClient(String shareName) { + return new ShareAsyncClient(client, shareName); } - public ShareAsyncClient getShareClient(String shareName) { - throw new UnsupportedOperationException(); + /** + * Lists all shares in the storage account without their metadata or snapshots. + * + *Code Samples
+ * + *List all shares in the account
+ * + * {@codesnippet com.azure.storage.file.fileServiceAsyncClient.listShares} + * + * @return {@link ShareItem Shares} in the storage account without their metadata or snapshots + */ + public FluxSet starts with name filter using {@link ListSharesOptions#prefix(String) prefix} to filter shares that are + * listed.
+ * + *Pass true to {@link ListSharesOptions#includeMetadata(boolean) includeMetadata} to have metadata returned for + * the shares.
+ * + *Pass true to {@link ListSharesOptions#includeSnapshots(boolean) includeSnapshots} to have snapshots of the + * shares listed.
+ * + *Code Samples
+ * + *List all shares that begin with "azure"
+ * + *+ * client.listShares(new ListSharesOptions().prefix("azure")) + * .subscribe(result -> System.out.printf("Share %s exists in the account", result.name())); + *+ * + *
List all shares including their snapshots and metadata
+ * + *+ * client.listShares(new ListSharesOptions().includeMetadata(true).includeSnapshots(true)) + * .subscribe(result -> System.out.printf("Share %s, Is Snapshot? %b, Metadata: %s", result.name(), result.snapshot() != null, result.metadata())); + *+ * + * @param options Options for listing shares + * @return {@link ShareItem Shares} in the storage account that satisfy the filter requirements + */ public Flux
Code Samples
+ * + *Retrieve File service properties
+ * + *+ * client.getProperties() + * .subscribe(response -> { + * FileServiceProperties properties = response.value(); + * System.out.printf("Hour metrics enabled: %b, Minute metrics enabled: %b", properties.hourMetrics().enabled(), properties.minuteMetrics().enabled()); + * }); + *+ * + * @return Storage account File service properties + */ public Mono
Code Sample
+ * + *Clear CORS in the File service
+ * + *+ * FileServiceProperties properties = client.getProperties().block().value(); + * properties.cors(Collections.emptyList()); + * + * client.setProperties(properties) + * .subscribe(response -> System.out.printf("Setting File service properties completed with status code %d", response.statusCode())); + *+ * + *
Enable Minute and Hour Metrics
+ * + *+ * FileServiceProperties properties = client.getProperties().block().value(); + * properties.minuteMetrics().enabled(true); + * properties.hourMetrics().enabled(true); + * + * client.setProperties(properties) + * .subscribe(response -> System.out.printf("Setting File service properties completed with status code %d", response.statusCode())); + *+ * + * @param properties Storage account File service properties + * @return A response that only contains headers and response status code + * @throws StorageErrorException When one of the following is true + *
Code Samples
+ * + *Create the share "test"
+ * + * {@codesnippet com.azure.storage.file.fileServiceAsyncClient.createShare#string} + * + * @param shareName Name of the share + * @return A response containing the ShareAsyncClient and the status of creating the share. + * @throws StorageErrorException If a share with the same name already exists + */ + public MonoCode Samples
+ * + *Create the share "test" with metadata "share:metadata"
+ * + *+ * client.createShare("test", Collections.singletonMap("share", "metadata"), null) + * .subscribe(response -> System.out.printf("Creating the share completed with status code %d", response.statusCode())); + *+ * + *
Create the share "test" with a quota of 10 GB
+ * + *+ * client.createShare("test", null, 10) + * .subscribe(response -> System.out.printf("Creating the share completed with status code %d", response.statusCode())); + *+ * + * @param shareName Name of the share + * @param metadata Optional. Metadata to associate with the share + * @param quotaInGB Optional. Maximum size the share is allowed to grow to in GB. This must be greater than 0 and + * less than or equal to 5120. The default value is 5120. + * @return A response containing the ShareAsyncClient and the status of creating the share. + * @throws StorageErrorException If a share with the same name already exists or {@code quotaInGB} is outside the + * allowed range. + */ + public Mono
Code Samples
+ * + *Delete the share "test"
+ * + * {@codesnippet com.azure.storage.file.fileServiceAsyncClient.deleteShare#string} + * + * @param shareName Name of the share + * @return A response that only contains headers and response status code + * @throws StorageErrorException If the share doesn't exist + */ + public MonoCode Samples
+ * + *Delete the snapshot of share "test" that was created at midnight
+ * + *+ * OffsetDateTime midnight = OffsetDateTime.of(LocalTime.MIDNIGHT, ZoneOffset.UTC)); + * client.deleteShare("test", midnight.toString()) + * .subscribe(response -> System.out.printf("Deleting the snapshot completed with status code %d", response.statusCode())); + *+ * + * @param shareName Name of the share + * @param shareSnapshot Identifier of the snapshot + * @return A response that only contains headers and response status code + * @throws StorageErrorException If the share doesn't exist or the snapshot doesn't exist + */ public Mono
Instantiating a Synchronous File Service Client
+ * + *+ * FileServiceClient client = FileServiceClient.builder() + * .connectionString(connectionString) + * .endpoint(endpoint) + * .buildClient(); + *+ * + *
View {@link FileServiceClientBuilder this} for additional ways to construct the client.
+ * + * @see FileServiceClientBuilder + * @see FileServiceAsyncClient + * @see SharedKeyCredential + * @see SASTokenCredential + */ +public final class FileServiceClient { private final FileServiceAsyncClient client; - FileServiceClient() { - throw new UnsupportedOperationException(); + /** + * Creates a FileServiceClient that wraps a FileServiceAsyncClient and blocks requests. + * + * @param client FileServiceAsyncClient that is used to send requests + */ + FileServiceClient(FileServiceAsyncClient client) { + this.client = client; } - public static FileServiceClientBuilder syncBuilder() { - throw new UnsupportedOperationException(); + /** + * @return the getFileServiceUrl of the Storage File service + */ + public String getFileServiceUrl() { + return client.getFileServiceUrl(); } - public String url() { - throw new UnsupportedOperationException(); + /** + * Constructs a ShareClient that interacts with the specified share. + * + *If the share doesn't exist in the storage account {@link ShareClient#create() create} in the client will + * need to be called before interaction with the share can happen.
+ * + * @param shareName Name of the share + * @return a ShareClient that interacts with the specified share + */ + public ShareClient getShareClient(String shareName) { + return new ShareClient(client.getShareAsyncClient(shareName)); } - public ShareClient getShareClient(String shareName) { - throw new UnsupportedOperationException(); + /** + * Lists all shares in the storage account without their metadata or snapshots. + * + *Code Samples
+ * + *List all shares in the account
+ * + * {@codesnippet com.azure.storage.file.fileServiceClient.listShares} + * + * @return {@link ShareItem Shares} in the storage account without their metadata or snapshots + */ + public IterableSet starts with name filter using {@link ListSharesOptions#prefix(String) prefix} to filter shares that are + * listed.
+ * + *Pass true to {@link ListSharesOptions#includeMetadata(boolean) includeMetadata} to have metadata returned for + * the shares.
+ * + *Pass true to {@link ListSharesOptions#includeSnapshots(boolean) includeSnapshots} to have snapshots of the + * shares listed.
+ * + *Code Samples
+ * + *List all shares that begin with "azure"
+ * + *+ * for (ShareItem result : client.listShares(new ListSharesOptions().prefix("azure"))) { + * System.out.printf("Share %s exists in the account", result.name()); + * } + *+ * + *
List all shares including their snapshots and metadata
+ * + *+ * for (ShareItem result : client.listShares(new ListSharesOptions().includeMetadata(true).includeSnapshots(true))) { + * System.out.printf("Share %s, Is Snapshot? %b, Metadata: %s", result.name(), result.snapshot() != null, result.metadata()); + * } + *+ * + * @param options Options for listing shares + * @return {@link ShareItem Shares} in the storage account that satisfy the filter requirements + */ + public Iterable
Code Samples
+ * + *Retrieve File service properties
+ * + *+ * FileServiceProperties properties = client.getProperties().value(); + * System.out.printf("Hour metrics enabled: %b, Minute metrics enabled: %b", properties.hourMetrics().enabled(), properties.minuteMetrics().enabled()); + *+ * + * @return Storage account File service properties + */ + public Response
Code Sample
+ * + *Clear CORS in the File service
+ * + *+ * FileServiceProperties properties = client.getProperties().value(); + * properties.cors(Collections.emptyList()); + * + * VoidResponse response = client.setProperties(properties); + * System.out.printf("Setting File service properties completed with status code %d", response.statusCode()); + *+ * + *
Enable Minute and Hour Metrics
+ * + *+ * FileServiceProperties properties = client.getProperties().value(); + * properties.minuteMetrics().enabled(true); + * properties.hourMetrics().enabled(true); + * + * VoidResponse respone = client.setProperties(properties); + * System.out.printf("Setting File service properties completed with status code %d", response.statusCode()); + *+ * + * @param properties Storage account File service properties + * @return A response that only contains headers and response status code + * @throws StorageErrorException When one of the following is true + *
Code Samples
+ * + *Create the share with share name of "myshare"
+ * {@codesnippet com.azure.storage.file.fileServiceClient.createShare#string} + * + * @param shareName Name of the share + * @return A response containing the ShareClient and the status of creating the share. + * @throws StorageErrorException If a share with the same name already exists + */ + public ResponseCode Samples
+ * + *Create the share "test" with metadata "share:metadata"
+ * + *+ * Response<ShareClient> response = client.createShare("test", Collections.singletonMap("share", "metadata"), null); + * System.out.printf("Creating the share completed with status code %d", response.statusCode()); + *+ * + *
Create the share "test" with a quota of 10 GB
+ * + *+ * Response<ShareClient> response = client.createShare("test", null, 10) + * System.out.printf("Creating the share completed with status code %d", response.statusCode()); + *+ * + * @param shareName Name of the share + * @param metadata Optional. Metadata to associate with the share + * @param quotaInGB Optional. Maximum size the share is allowed to grow to in GB. This must be greater than 0 and + * less than or equal to 5120. The default value is 5120. + * @return A response containing the ShareClient and the status of creating the share. + * @throws StorageErrorException If a share with the same name already exists or {@code quotaInGB} is outside the + * allowed range. + */ + public Response
Code Samples
+ * + *Delete the share "test"
+ * + * {@codesnippet com.azure.storage.file.fileServiceClient.deleteShare#string} + * + * @param shareName Name of the share + * @return A response that only contains headers and response status code + * @throws StorageErrorException If the share doesn't exist + */ + public VoidResponse deleteShare(String shareName) { + return deleteShare(shareName, null); } - public MonoCode Samples
+ * + *Delete the snapshot of share "test" that was created at midnight
+ * + *+ * OffsetDateTime midnight = OffsetDateTime.of(LocalTime.MIDNIGHT, ZoneOffset.UTC)); + * VoidResponse response = client.deleteShare("test", midnight.toString()); + * System.out.printf("Deleting the snapshot completed with status code %d", response.statusCode()); + *+ * + * @param shareName Name of the share + * @param shareSnapshot Identifier of the snapshot + * @return A response that only contains headers and response status code + * @throws StorageErrorException If the share doesn't exist or the snapshot doesn't exist + */ + public VoidResponse deleteShare(String shareName, String shareSnapshot) { + return client.deleteShare(shareName, shareSnapshot).block(); } } diff --git a/storage/client/file/src/main/java/com/azure/storage/file/FileServiceClientBuilder.java b/storage/client/file/src/main/java/com/azure/storage/file/FileServiceClientBuilder.java index 01148e8af57f5..d909c95295002 100644 --- a/storage/client/file/src/main/java/com/azure/storage/file/FileServiceClientBuilder.java +++ b/storage/client/file/src/main/java/com/azure/storage/file/FileServiceClientBuilder.java @@ -3,7 +3,309 @@ package com.azure.storage.file; -public class FileServiceClientBuilder { +import com.azure.core.http.HttpClient; +import com.azure.core.http.HttpPipeline; +import com.azure.core.http.policy.AddDatePolicy; +import com.azure.core.http.policy.HttpLogDetailLevel; +import com.azure.core.http.policy.HttpLoggingPolicy; +import com.azure.core.http.policy.HttpPipelinePolicy; +import com.azure.core.http.policy.RequestIdPolicy; +import com.azure.core.http.policy.RetryPolicy; +import com.azure.core.http.policy.UserAgentPolicy; +import com.azure.core.implementation.http.policy.spi.HttpPolicyProviders; +import com.azure.core.util.configuration.Configuration; +import com.azure.core.util.configuration.ConfigurationManager; +import com.azure.storage.common.credentials.SASTokenCredential; +import com.azure.storage.common.credentials.SharedKeyCredential; +import com.azure.storage.common.policy.SASTokenCredentialPolicy; +import com.azure.storage.common.policy.SharedKeyCredentialPolicy; +import java.net.MalformedURLException; +import java.net.URL; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; - // connection string, FileClientOptions, Uri, SharedKeyCredential +/** + * This class provides a fluent builder API to help aid the configuration and instantiation of the {@link FileServiceClient FileServiceClients} + * and {@link FileServiceAsyncClient FileServiceAsyncClients}, calling {@link FileServiceClientBuilder#buildClient() buildClient} + * constructs an instance of FileServiceClient and calling {@link FileServiceClientBuilder#buildAsyncClient() buildAsyncClient} + * constructs an instance of FileServiceAsyncClient. + * + *
The client needs the endpoint of the Azure Storage File service and authorization credential. + * {@link FileServiceClientBuilder#endpoint(String) endpoint} gives the builder the endpoint and may give the builder a + * {@link SASTokenCredential} that authorizes the client.
+ * + *Instantiating a synchronous FileService Client with SAS token
+ * {@codesnippet com.azure.storage.file.fileServiceClient.instantiation.sastoken} + * + *Instantiating an Asynchronous FileService Client with SAS token
+ * {@codesnippet com.azure.storage.file.fileServiceAsyncClient.instantiation.sastoken} + * + *If the {@code endpoint} doesn't contain the query parameters to construct a {@code SASTokenCredential} they may + * be set using {@link FileServiceClientBuilder#credential(SASTokenCredential) credential}.
+ * + *+ * FileServiceClient client = FileServiceClient.builder() + * .endpoint(endpointWithoutSASTokenQueryParams) + * .credential(SASTokenCredential.fromQuery(SASTokenQueryParams)) + * .buildClient(); + *+ * + *
+ * FileServiceAsyncClient client = FileServiceAsyncClient.builder() + * .endpoint(endpointWithoutSASTokenQueryParams) + * .credential(SASTokenCredential.fromQuery(SASTokenQueryParams)) + * .buildAsyncClient(); + *+ * + *
Another way to authenticate the client is using a {@link SharedKeyCredential}. To create a SharedKeyCredential + * a connection string from the Storage File service must be used. Set the SharedKeyCredential with + * {@link FileServiceClientBuilder#connectionString(String) connectionString}. If the builder has both a SASTokenCredential and + * SharedKeyCredential the SharedKeyCredential will be preferred when authorizing requests sent to the service.
+ * + *Instantiating a synchronous FileService Client with connection string.
+ * {@codesnippet com.azure.storage.file.fileServiceClient.instantiation.connectionstring} + * + *Instantiating an Asynchronous FileService Client with connection string.
+ * {@codesnippet com.azure.storage.file.fileServiceAsyncClient.instantiation.connectionstring} + * + * @see FileServiceClient + * @see FileServiceAsyncClient + * @see SASTokenCredential + * @see SharedKeyCredential + */ +public final class FileServiceClientBuilder { + private static final String ACCOUNT_NAME = "accountname"; + private final List+ * If {@link FileServiceClientBuilder#pipeline(HttpPipeline) pipeline} is set, then the {@code pipeline} and + * {@link FileServiceClientBuilder#endpoint(String) endpoint} are used to create the + * {@link FileServiceAsyncClient client}. All other builder settings are ignored. + *
+ * + * @return A FileServiceAsyncClient with the options set from the builder. + * @throws NullPointerException If {@code endpoint} is {@code null}. + * @throws IllegalArgumentException If neither a {@link SharedKeyCredential} or {@link SASTokenCredential} has been set. + */ + public FileServiceAsyncClient buildAsyncClient() { + Objects.requireNonNull(endpoint); + + if (pipeline != null) { + return new FileServiceAsyncClient(endpoint, pipeline); + } + + if (sasTokenCredential == null && sharedKeyCredential == null) { + throw new IllegalArgumentException("Credentials are required for authorization"); + } + + // Closest to API goes first, closest to wire goes last. + final List+ * If {@link FileServiceClientBuilder#pipeline(HttpPipeline) pipeline} is set, then the {@code pipeline} and + * {@link FileServiceClientBuilder#endpoint(String) endpoint} are used to create the + * {@link FileServiceClient client}. All other builder settings are ignored. + *
+ * + * @return A FileServiceClient with the options set from the builder. + * @throws NullPointerException If {@code endpoint} is {@code null}. + * @throws IllegalStateException If neither a {@link SharedKeyCredential} or {@link SASTokenCredential} has been set. + */ + public FileServiceClient buildClient() { + return new FileServiceClient(buildAsyncClient()); + } + + /** + * Sets the endpoint for the Azure Storage File instance that the client will interact with. + * + *Query parameters of the endpoint will be parsed using {@link SASTokenCredential#fromQuery(String) fromQuery} in an + * attempt to generate a {@link SASTokenCredential} to authenticate requests sent to the service.
+ * + * @param endpoint The URL of the Azure Storage File instance to send service requests to and receive responses from. + * @return the updated FileServiceClientBuilder object + * @throws IllegalArgumentException If {@code endpoint} isn't a proper URL + */ + public FileServiceClientBuilder endpoint(String endpoint) { + Objects.requireNonNull(endpoint); + try { + URL fullURL = new URL(endpoint); + this.endpoint = new URL(fullURL.getProtocol() + "://" + fullURL.getHost()); + + // Attempt to get the SAS token from the URL passed + SASTokenCredential credential = SASTokenCredential.fromQuery(fullURL.getQuery()); + if (credential != null) { + this.sasTokenCredential = credential; + } + } catch (MalformedURLException ex) { + throw new IllegalArgumentException("The Azure Storage File Service endpoint url is malformed."); + } + + return this; + } + + /** + * Sets the {@link SASTokenCredential} used to authenticate requests sent to the Queue service. + * + * @param credential SAS token credential generated from the Storage account that authorizes requests + * @return the updated FileServiceClientBuilder object + * @throws NullPointerException If {@code credential} is {@code null}. + */ + public FileServiceClientBuilder credential(SASTokenCredential credential) { + this.sasTokenCredential = Objects.requireNonNull(credential); + return this; + } + + /** + * Creates a {@link SharedKeyCredential} from the {@code connectionString} used to authenticate requests sent to the + * File service. + * + * @param connectionString Connection string from the Access Keys section in the Storage account + * @return the updated FileServiceClientBuilder object + * @throws NullPointerException If {@code connectionString} is {@code null}. + */ + public FileServiceClientBuilder connectionString(String connectionString) { + Objects.requireNonNull(connectionString); + this.sharedKeyCredential = SharedKeyCredential.fromConnectionString(connectionString); + getEndPointFromConnectionString(connectionString); + return this; + } + + private void getEndPointFromConnectionString(String connectionString) { + MapInstantiating an Asynchronous Share Client
+ * + *+ * ShareAsyncClient client = ShareAsyncClient.builder() + * .connectionString(connectionString) + * .endpoint(endpoint) + * .buildAsyncClient(); + *+ * + *
View {@link ShareClientBuilder this} for additional ways to construct the client.
+ * + * @see ShareClientBuilder + * @see ShareClient + * @see SharedKeyCredential + * @see SASTokenCredential + */ public class ShareAsyncClient { + private final AzureFileStorageImpl client; + private final String shareName; + private final String shareSnapshot; + + /** + * Creates a ShareAsyncClient that sends requests to the storage share at {@link AzureFileStorageImpl#url() endpoint}. + * Each service call goes through the {@link HttpPipeline pipeline} in the {@code client}. + * + * @param client Client that interacts with the service interfaces + * @param shareName Name of the share + */ + ShareAsyncClient(AzureFileStorageImpl client, String shareName) { + this.shareName = shareName; + this.shareSnapshot = null; - ShareAsyncClient() { - throw new UnsupportedOperationException(); + this.client = new AzureFileStorageBuilder().pipeline(client.httpPipeline()) + .url(client.url()) + .version(client.version()) + .build(); } - public static ShareClientBuilder asyncBuilder() { - throw new UnsupportedOperationException(); + /** + * Creates a ShareAsyncClient that sends requests to the storage share at {@code endpoint}. + * Each service call goes through the {@code httpPipeline}. + * + * @param endpoint URL for the Storage File service + * @param httpPipeline HttpPipeline that the HTTP requests and response flow through + * @param shareName Name of the share + * @param shareSnapshot Optional. Specific snapshot of the share + */ + ShareAsyncClient(URL endpoint, HttpPipeline httpPipeline, String shareName, String shareSnapshot) { + this.shareName = shareName; + this.shareSnapshot = shareSnapshot; + + this.client = new AzureFileStorageBuilder().pipeline(httpPipeline) + .url(endpoint.toString()) + .build(); } - public String url() { - throw new UnsupportedOperationException(); + /** + * @return the getShareUrl of the storage file service + */ + public String getShareUrl() { + return client.url(); } + + /** + * Constructs a {@link DirectoryAsyncClient} that interacts with the root directory in the share. + * + *If the directory doesn't exist in the share {@link DirectoryAsyncClient#create(Map) create} in the client will + * need to be called before interaction with the directory can happen.
+ * + * @return a {@link DirectoryAsyncClient} that interacts with the root directory in the share + */ public DirectoryAsyncClient getRootDirectoryClient() { - throw new UnsupportedOperationException(); + return getDirectoryClient(""); } + /** + * Constructs a {@link DirectoryAsyncClient} that interacts with the specified directory. + * + *If the directory doesn't exist in the share {@link DirectoryAsyncClient#create(Map) create} in the client will + * need to be called before interaction with the directory can happen.
+ * + * @param directoryName Name of the directory + * @return a {@link DirectoryAsyncClient} that interacts with the directory in the share + */ public DirectoryAsyncClient getDirectoryClient(String directoryName) { - throw new UnsupportedOperationException(); + return new DirectoryAsyncClient(client, shareName, directoryName, shareSnapshot); } - public MonoCode Samples
+ * + *Create the share
+ * + * {@codesnippet com.azure.storage.file.shareAsyncClient.create} + * + * @return A response containing information about the share and the status its creation. + * @throws StorageErrorException If the share already exists with different metadata + */ + public MonoCode Samples
+ * + *Create the share with metadata "share:metadata"
+ * + *+ * client.createShare(Collections.singletonMap("share", "metadata"), null) + * .subscribe(response -> System.out.printf("Creating the share completed with status code %d", response.statusCode())); + *+ * + *
Create the share with a quota of 10 GB
+ * + *+ * client.createShare(null, 10) + * .subscribe(response -> System.out.printf("Creating the share completed with status code %d", response.statusCode())); + *+ * + * @param metadata Optional. Metadata to associate with the share + * @param quotaInGB Optional. Maximum size the share is allowed to grow to in GB. This must be greater than 0 and + * less than or equal to 5120. The default value is 5120. + * @return A response containing information about the share and the status its creation. + * @throws StorageErrorException If the share already exists with different metadata or {@code quotaInGB} is outside the + * allowed range. + */ + public Mono
Code Samples
+ * + *Create a snapshot
+ * + * {@codesnippet com.azure.storage.file.shareAsyncClient.createSnapshot} + * + * @return A response containing information about the snapshot of share. + * @throws StorageErrorException If the share doesn't exist, there are 200 snapshots of the share, or a snapshot is + * in progress for the share + */ + public MonoCode Samples
+ * + *Create a snapshot with metadata "snapshot:metadata"
+ * + * {@codesnippet com.azure.storage.file.shareAsyncClient.createSnapshot#map} + * + * @param metadata Optional. Metadata to associate with the snapshot. If {@code null} the metadata of the share + * will be copied to the snapshot. + * @return A response containing information about the snapshot of share. + * @throws StorageErrorException If the share doesn't exist, there are 200 snapshots of the share, or a snapshot is + * in progress for the share + */ public MonoCode Samples
+ * + *Delete the share
+ * + * {@codesnippet com.azure.storage.file.shareAsyncClient.delete} + * + * @return A response that only contains headers and response status code + * @throws StorageErrorException If the share doesn't exist + */ + public MonoCode Samples
+ * + *Delete the snapshot of share that was created at midnight
+ * + *+ * OffsetDateTime midnight = OffsetDateTime.of(LocalTime.MIDNIGHT, ZoneOffset.UTC)); + * client.deleteShare(midnight.toString()) + * .subscribe(response -> System.out.printf("Deleting the snapshot completed with status code %d", response.statusCode())); + *+ * + * @param shareSnapshot Identifier of the snapshot + * @return A response that only contains headers and response status code + * @throws StorageErrorException If the share doesn't exist or the snapshot doesn't exist + */ public Mono
Code Samples
+ * + *Retrieve the share properties
+ * + *+ * client.getProperties() + * .subscribe(response -> { + * ShareProperties properties = response.value(); + * System.out.printf("Share quota: %d, Metadata: %s", properties.quota(), properties.metadata()); + * }); + *+ * + * @return the properties of the share + * @throws StorageErrorException If the share doesn't exist + */ + public Mono
Code Samples
+ * + *Retrieve the properties from the snapshot at midnight
+ * + *+ * OffsetDateTime midnight = OffsetDateTime.of(LocalTime.MIDNIGHT, ZoneOffset.UTC)); + * client.getProperties(midnight.toString()) + * .subscribe(response -> { + * ShareProperties properties = response.value(); + * System.out.printf("Share quota: %d, Metadata: %s", properties.quota(), properties.metadata()); + * }); + *+ * + * @param shareSnapshot Identifier of the snapshot + * @return the properties of the share snapshot + * @throws StorageErrorException If the share or snapshot doesn't exist + */ public Mono
Code Samples
+ * + *Set the quota to 1024 GB
+ * + *+ * client.setQuota(1024) + * .subscribe(response -> System.out.printf("Setting the share quota completed with status code %d", response.statusCode())); + *+ * + * @param quotaInGB Size in GB to limit the share's growth. The quota in GB must be between 1 and 5120. + * @return information about the share + * @throws StorageErrorException If the share doesn't exist or {@code quotaInGB} is outside the allowed bounds + */ public Mono
If {@code null} is passed for the metadata it will clear the metadata associated to the share.
+ * + *Code Samples
+ * + *Set the metadata to "share:updatedMetadata"
+ * + *+ * client.setMetadata(Collections.singletonMap("share", "updatedMetadata")) + * .subscribe(response -> System.out.printf("Setting the share metadata completed with status code %d", response.statusCode())); + *+ * + *
Clear the metadata of the share
+ * + *+ * client.setMetadata(null) + * .subscribe(response -> System.out.printf("Clearing the share metadata completed with status code %d", response.statusCode())); + *+ * + * @param metadata Metadata to set on the share, if null is passed the metadata for the share is cleared + * @return information about the share + * @throws StorageErrorException If the share doesn't exist or the metadata contains invalid keys + */ public Mono
Code Samples
+ * + *List the stored access policies
+ * + *+ * client.getAccessPolicy() + * .subscribe(result -> System.out.printf("Access policy %s allows these permissions: %s", result.id(), result.accessPolicy().permission())); + *+ * + * @return The stored access policies specified on the queue. + * @throws StorageErrorException If the share doesn't exist + */ + public Flux
Code Samples
+ * + *Set a read only stored access policy
+ * + *+ * AccessPolicy policy = new AccessPolicy().permission("r") + * .start(OffsetDateTime.now(ZoneOffset.UTC)) + * .expiry(OffsetDateTime.now(ZoneOffset.UTC).addDays(10)); + * + * SignedIdentifier permission = new SignedIdentifier().id("mypolicy").accessPolicy(accessPolicy); + * + * client.setAccessPolicy(Collections.singletonList(permission)) + * .subscribe(response -> System.out.printf("Setting access policies completed with status code %d", response.statusCode())); + *+ * + * @param permissions Access policies to set on the queue + * @return A response that only contains headers and response status code + * @throws StorageErrorException If the share doesn't exist, a stored access policy doesn't have all fields filled out, + * or the share will have more than five policies. + */ public Mono
Code Samples
+ * + *Retrieve the storage statistics
+ * + *+ * client.getStatistics() + * .subscribe(response -> System.out.printf("The share is using %d GB", response.value().getShareUsageInGB())); + *+ * + * @return the storage statistics of the share + */ public Mono
Code Samples
+ * + *Create the directory "mydirectory"
+ * + * {@codesnippet com.azure.storage.file.shareAsyncClient.createDirectory#string} + * * + * @param directoryName Name of the directory + * @return A response containing a {@link DirectoryAsyncClient} to interact with the created directory and the + * status of its creation. + * @throws StorageErrorException If the share doesn't exist, the directory already exists or is in the process of + * being deleted, or the parent directory for the new directory doesn't exist + */ + public MonoCode Samples
+ * + *Create the directory "documents" with metadata "directory:metadata"
+ * + *+ * client.createDirectory("documents", Collections.singletonMap("directory", "metadata")) + * .subscribe(response -> System.out.printf("Creating the directory completed with status code %d", response.statusCode())); + *+ * + * @param directoryName Name of the directory + * @param metadata Optional. Metadata to associate with the directory + * @return A response containing a {@link DirectoryAsyncClient} to interact with the created directory and the + * status of its creation. + * @throws StorageErrorException If the share doesn't exist, the directory already exists or is in the process of + * being deleted, the parent directory for the new directory doesn't exist, or the metadata is using an illegal + * key name + */ public Mono
Code Samples
+ * + *Delete the directory "empty"
+ * + * {@codesnippet com.azure.storage.file.shareAsyncClient.deleteDirectory#string} + * + * @param directoryName Name of the directory + * @return A response that only contains headers and response status code + * @throws StorageErrorException If the share doesn't exist or the directory isn't empty + */ public MonoInstantiating a Synchronous Share Client
+ * + *+ * ShareClient client = ShareClient.builder() + * .connectionString(connectionString) + * .endpoint(endpoint) + * .buildClient(); + *+ * + *
View {@link ShareClientBuilder this} for additional ways to construct the client.
+ * + * @see ShareClientBuilder + * @see ShareAsyncClient + * @see SharedKeyCredential + * @see SASTokenCredential + */ public class ShareClient { - private final ShareAsyncClient client; - ShareClient() { - throw new UnsupportedOperationException(); - } - - public static ShareClientBuilder syncBuilder() { - throw new UnsupportedOperationException(); + ShareClient(ShareAsyncClient client) { + this.client = client; } - public String url() { - throw new UnsupportedOperationException(); + /** + * @return the getShareUrl of the storage file service + */ + public String getShareUrl() { + return client.getShareUrl(); } + /** + * Constructs a {@link DirectoryClient} that interacts with the root directory in the share. + * + *If the directory doesn't exist in the share {@link DirectoryClient#create(Map) create} in the client will + * need to be called before interaction with the directory can happen.
+ * + * @return a {@link DirectoryClient} that interacts with the root directory in the share + */ public DirectoryClient getRootDirectoryClient() { - throw new UnsupportedOperationException(); + return getDirectoryClient(""); } + /** + * Constructs a {@link DirectoryClient} that interacts with the specified directory. + * + *If the directory doesn't exist in the share {@link DirectoryClient#create(Map) create} in the client will + * need to be called before interaction with the directory can happen.
+ * + * @param directoryName Name of the directory + * @return a {@link DirectoryClient} that interacts with the directory in the share + */ public DirectoryClient getDirectoryClient(String directoryName) { - throw new UnsupportedOperationException(); + return new DirectoryClient(client.getDirectoryClient(directoryName)); } - public MonoCode Samples
+ * + *Create the share
+ * + * {@codesnippet com.azure.storage.file.shareClient.create} + * + * @return A response containing information about the share and the status its creation. + * @throws StorageErrorException If the share already exists with different metadata + */ + public ResponseCode Samples
+ * + *Create the share with metadata "share:metadata"
+ * + *+ * Response<ShareInfo> response = client.createShare(Collections.singletonMap("share", "metadata"), null); + * System.out.printf("Creating the share completed with status code %d", response.statusCode()); + *+ * + *
Create the share with a quota of 10 GB
+ * + *+ * Response<ShareInfo> response = client.createShare(null, 10); + * System.out.printf("Creating the share completed with status code %d", response.statusCode()); + *+ * + * @param metadata Optional. Metadata to associate with the share + * @param quotaInGB Optional. Maximum size the share is allowed to grow to in GB. This must be greater than 0 and + * less than or equal to 5120. The default value is 5120. + * @return A response containing information about the share and the status its creation. + * @throws StorageErrorException If the share already exists with different metadata or {@code quotaInGB} is outside the + * allowed range. + */ + public Response