+ * This client is instantiated through {@link DataLakePathClientBuilder} or retrieved via + * {@link DataLakeFileSystemAsyncClient#getDirectoryAsyncClient(String) getDirectoryAsyncClient}. + * + *
+ * Please refer to the Azure + * Docs for more information. + */ +public final class DataLakeDirectoryAsyncClient extends DataLakePathAsyncClient { + + private final ClientLogger logger = new ClientLogger(DataLakeDirectoryAsyncClient.class); + + /** + * Package-private constructor for use by {@link DataLakePathClientBuilder}. + * + * @param pipeline The pipeline used to send and receive service requests. + * @param url The endpoint where to send service requests. + * @param serviceVersion The version of the service to receive requests. + * @param accountName The storage account name. + * @param fileSystemName The file system name. + * @param directoryName The directory name. + * @param blockBlobAsyncClient The underlying {@link BlobContainerAsyncClient} + */ + DataLakeDirectoryAsyncClient(HttpPipeline pipeline, String url, DataLakeServiceVersion serviceVersion, + String accountName, String fileSystemName, String directoryName, BlockBlobAsyncClient blockBlobAsyncClient) { + super(pipeline, url, serviceVersion, accountName, fileSystemName, directoryName, blockBlobAsyncClient); + } + + DataLakeDirectoryAsyncClient(DataLakePathAsyncClient dataLakePathAsyncClient) { + super(dataLakePathAsyncClient.getHttpPipeline(), dataLakePathAsyncClient.getPathUrl(), + dataLakePathAsyncClient.getServiceVersion(), dataLakePathAsyncClient.getAccountName(), + dataLakePathAsyncClient.getFileSystemName(), dataLakePathAsyncClient.getObjectPath(), + dataLakePathAsyncClient.getBlockBlobAsyncClient()); + } + + /** + * Gets the URL of the directory represented by this client on the Data Lake service. + * + * @return the URL. + */ + public String getDirectoryUrl() { + return getPathUrl(); + } + + /** + * Gets the path of this directory, not including the name of the resource itself. + * + * @return The path of the directory. + */ + public String getDirectoryPath() { + return getObjectPath(); + } + + /** + * Gets the name of this directory, not including its full path. + * + * @return The name of the directory. + */ + public String getDirectoryName() { + return getObjectName(); + } + + /** + * Creates a directory. + * + *
Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.create} + * + *For more information see the + * Azure + * Docs
+ * + * @return A reactive response containing information about the created directory. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createWithResponse#PathHttpHeaders-Map-DataLakeRequestConditions-String-String} + * + *For more information see the + * Azure + * Docs
+ * + * @param headers {@link PathHttpHeaders} + * @param metadata Metadata to associate with the resource. + * @param accessConditions {@link DataLakeRequestConditions} + * @param permissions POSIX access permissions for the file owner, the file owning group, and others. + * @param umask Restricts permissions of the file to be created. + * @return A {@link Mono} containing a {@link Response} whose {@link Response#getValue() value} contains a {@link + * PathItem}. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.delete} + * + *For more information see the + * Azure + * Docs
+ * + * @return A reactive response signalling completion. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteWithResponse#boolean-DataLakeRequestConditions} + * + *For more information see the + * Azure + * Docs
+ * + * @param recursive Whether or not to delete all paths beneath the directory. + * @param accessConditions {@link DataLakeRequestConditions} + * + * @return A reactive response signalling completion. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.getFileAsyncClient#String} + * + * @param fileName A {@code String} representing the name of the file. + * @return A new {@link DataLakeFileAsyncClient} object which references the file with the specified name in this + * file system. + */ + public DataLakeFileAsyncClient getFileAsyncClient(String fileName) { + if (ImplUtils.isNullOrEmpty(fileName)) { + throw logger.logExceptionAsError(new IllegalArgumentException("'fileName' can not be set to null")); + } + BlockBlobAsyncClient blockBlobAsyncClient = prepareBuilderAppendPath(fileName).buildBlockBlobAsyncClient(); + + return new DataLakeFileAsyncClient(getHttpPipeline(), + StorageImplUtils.appendToUrlPath(getPathUrl(), Utility.urlEncode(Utility.urlDecode(fileName))).toString(), + getServiceVersion(), getAccountName(), getFileSystemName(), getObjectPath() + "/" + + Utility.urlDecode(fileName), blockBlobAsyncClient); + } + + /** + * Creates a new file within a directory. If a file with the same name already exists, the file will be + * overwritten. For more information, see the + * Azure + * Docs. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFile#String} + * + * @param fileName Name of the file to create. + * @return A {@link Mono} containing a {@link DataLakeFileAsyncClient} used to interact with the file created. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFileWithResponse#String-PathHttpHeaders-Map-DataLakeRequestConditions-String-String} + * + * @param fileName Name of the file to create. + * @param headers {@link PathHttpHeaders} + * @param metadata Metadata to associate with the file. + * @param accessConditions {@link DataLakeRequestConditions} + * @param permissions POSIX access permissions for the file owner, the file owning group, and others. + * @param umask Restricts permissions of the file to be created. + * @return A {@link Mono} containing a {@link Response} whose {@link Response#getValue() value} contains a {@link + * DataLakeFileAsyncClient} used to interact with the file created. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteFile#String} + * + * @param fileName Name of the file to delete. + * @return A reactive response signalling completion. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteFileWithResponse#String-DataLakeRequestConditions} + * + * @param fileName Name of the file to delete. + * @param accessConditions {@link DataLakeRequestConditions} + * @return A {@link Mono} containing containing status code and HTTP headers + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.getSubDirectoryAsyncClient#String} + * + * @param subDirectoryName A {@code String} representing the name of the sub-directory. + * @return A new {@link DataLakeDirectoryAsyncClient} object which references the directory with the specified name + * in this file system. + */ + public DataLakeDirectoryAsyncClient getSubDirectoryAsyncClient(String subDirectoryName) { + if (ImplUtils.isNullOrEmpty(subDirectoryName)) { + throw logger.logExceptionAsError(new IllegalArgumentException("'directoryName' can not be set to null")); + } + BlockBlobAsyncClient blockBlobAsyncClient = prepareBuilderAppendPath(subDirectoryName) + .buildBlockBlobAsyncClient(); + + return new DataLakeDirectoryAsyncClient(getHttpPipeline(), + StorageImplUtils.appendToUrlPath(getPathUrl(), Utility.urlEncode(Utility.urlDecode(subDirectoryName))) + .toString(), getServiceVersion(), getAccountName(), getFileSystemName(), getObjectPath() + "/" + + Utility.urlDecode(subDirectoryName), blockBlobAsyncClient); + } + + /** + * Creates a new sub-directory within a directory. If a sub-directory with the same name already exists, the + * sub-directory will be overwritten. For more information, see the + * Azure Docs. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubDirectory#String} + * + * @param subDirectoryName Name of the sub-directory to create. + * @return A {@link Mono} containing a {@link DataLakeDirectoryAsyncClient} used to interact with the directory + * created. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubDirectoryWithResponse#String-PathHttpHeaders-Map-DataLakeRequestConditions-String-String} + * + * @param subDirectoryName Name of the sub-directory to create. + * @param headers {@link PathHttpHeaders} + * @param metadata Metadata to associate with the sub-directory. + * @param accessConditions {@link DataLakeRequestConditions} + * @param permissions POSIX access permissions for the sub-directory owner, the sub-directory owning group, and + * others. + * @param umask Restricts permissions of the sub-directory to be created. + * @return A {@link Mono} containing a {@link Response} whose {@link Response#getValue() value} contains a {@link + * DataLakeDirectoryAsyncClient} used to interact with the sub-directory created. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteSubDirectory#String} + * + * @param subDirectoryName Name of the sub-directory to delete. + * @return A reactive response signalling completion. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteSubDirectoryWithResponse#String-boolean-DataLakeRequestConditions} + * + * @param directoryName Name of the sub-directory to delete. + * @param recursive Whether or not to delete all paths beneath the sub-directory. + * @param accessConditions {@link DataLakeRequestConditions} + * @return A {@link Mono} containing containing status code and HTTP headers + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename#String} + * + * @param destinationPath Relative path from the file system to rename the directory to, excludes the file system + * name. For example if you want to move a directory with fileSystem = "myfilesystem", path = "mydir/mysubdir" to + * another path in myfilesystem (ex: newdir) then set the destinationPath = "newdir" + * @return A {@link Mono} containing a {@link DataLakeDirectoryAsyncClient} used to interact with the new directory + * created. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.renameWithResponse#String-DataLakeRequestConditions-DataLakeRequestConditions} + * + * @param destinationPath Relative path from the file system to rename the directory to, excludes the file system + * name. For example if you want to move a directory with fileSystem = "myfilesystem", path = "mydir/mysubdir" to + * another path in myfilesystem (ex: newdir) then set the destinationPath = "newdir" + * @param sourceAccessConditions {@link DataLakeRequestConditions} against the source. + * @param destAccessConditions {@link DataLakeRequestConditions} against the destination. + * @return A {@link Mono} containing a {@link Response} whose {@link Response#getValue() value} contains a {@link + * DataLakeDirectoryAsyncClient} used to interact with the directory created. + */ + public Mono+ * This client is instantiated through {@link DataLakePathClientBuilder} or retrieved via + * {@link DataLakeFileSystemClient#getDirectoryClient(String) getDirectoryClient}. + * + *
+ * Please refer to the Azure + * Docs for more information. + */ +public class DataLakeDirectoryClient extends DataLakePathClient { + private final ClientLogger logger = new ClientLogger(DataLakeDirectoryClient.class); + + private final DataLakeDirectoryAsyncClient dataLakeDirectoryAsyncClient; + + DataLakeDirectoryClient(DataLakeDirectoryAsyncClient pathAsyncClient, BlockBlobClient blockBlobClient) { + super(pathAsyncClient, blockBlobClient); + this.dataLakeDirectoryAsyncClient = pathAsyncClient; + } + + private DataLakeDirectoryClient(DataLakePathClient dataLakePathClient) { + super(dataLakePathClient.dataLakePathAsyncClient, dataLakePathClient.blockBlobClient); + this.dataLakeDirectoryAsyncClient = new DataLakeDirectoryAsyncClient( + dataLakePathClient.dataLakePathAsyncClient); + } + + /** + * Gets the URL of the directory represented by this client on the Data Lake service. + * + * @return the URL. + */ + public String getDirectoryUrl() { + return getPathUrl(); + } + + /** + * Gets the path of this directory, not including the name of the resource itself. + * + * @return The path of the directory. + */ + public String getDirectoryPath() { + return getObjectPath(); + } + + /** + * Gets the name of this directory, not including its full path. + * + * @return The name of the directory. + */ + public String getDirectoryName() { + return getObjectName(); + } + + /** + * Creates a directory. + * + *
Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.create} + * + *For more information see the + * Azure + * Docs
+ * + * @return Information about the created directory. + */ + public PathInfo create() { + return createWithResponse(null, null, null, null, null, null, Context.NONE).getValue(); + } + + /** + * Creates a directory. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.createWithResponse#PathHttpHeaders-Map-DataLakeRequestConditions-String-String-Duration-Context} + * + *For more information see the + * Azure + * Docs
+ * + * @param headers {@link PathHttpHeaders} + * @param metadata Metadata to associate with the resource. + * @param accessConditions {@link DataLakeRequestConditions} + * @param permissions POSIX access permissions for the file owner, the file owning group, and others. + * @param umask Restricts permissions of the file to be created. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context Additional context that is passed through the Http pipeline during the service call. + * + * @return A response containing information about the created directory. + */ + public ResponseCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.delete} + * + *For more information see the + * Azure + * Docs
+ */ + public void delete() { + deleteWithResponse(false, null, null, null).getValue(); + } + + /** + * Deletes a directory. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.deleteWithResponse#boolean-DataLakeRequestConditions-Duration-Context} + * + *For more information see the + * Azure + * Docs
+ * + * @param recursive Whether or not to delete all paths beneath the directory. + * @param accessConditions {@link DataLakeRequestConditions} + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context Additional context that is passed through the Http pipeline during the service call. + * + * @return A reactive response signalling completion. + */ + public ResponseCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.getFileClient#String} + * + * @return A new {@link DataLakeFileClient} object which references the file with the specified name in this + * directory. + */ + public DataLakeFileClient getFileClient(String fileName) { + if (ImplUtils.isNullOrEmpty(fileName)) { + throw logger.logExceptionAsError(new IllegalArgumentException("'fileName' can not be set to null")); + } + return new DataLakeFileClient(dataLakeDirectoryAsyncClient.getFileAsyncClient(fileName), + dataLakeDirectoryAsyncClient.prepareBuilderAppendPath(fileName).buildBlockBlobClient()); + } + + /** + * Creates a new file within a directory. If a file with the same name already exists, the file will be + * overwritten. For more information, see the + * Azure Docs. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.createFile#String} + * + * @param fileName Name of the file to create. + * @return A {@link DataLakeFileClient} used to interact with the file created. + */ + public DataLakeFileClient createFile(String fileName) { + return createFileWithResponse(fileName, null, null, null, null, null, null, null).getValue(); + } + + /** + * Creates a new file within a directory. If a file with the same name already exists, the file will be + * overwritten. For more information, see the + * Azure Docs. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.createFileWithResponse#String-PathHttpHeaders-Map-DataLakeRequestConditions-String-String-Duration-Context} + * + * @param fileName Name of the file to create. + * @param headers {@link PathHttpHeaders} + * @param metadata Metadata to associate with the file. + * @param accessConditions {@link DataLakeRequestConditions} + * @param permissions POSIX access permissions for the file owner, the file owning group, and others. + * @param umask Restricts permissions of the file to be created. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context Additional context that is passed through the Http pipeline during the service call. + * + * @return A {@link Response} whose {@link Response#getValue() value} contains the {@link DataLakeFileClient} used + * to interact with the file created. + */ + public ResponseCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.deleteFile#String} + * + * @param fileName Name of the file to delete. + */ + public void deleteFile(String fileName) { + deleteFileWithResponse(fileName, null, null, null); + } + + /** + * Deletes the specified file in the directory. If the file doesn't exist the operation fails. + * For more information see the Azure + * Docs. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.deleteFileWithResponse#String-DataLakeRequestConditions-Duration-Context} + * + * @param fileName Name of the file to delete. + * @param accessConditions {@link DataLakeRequestConditions} + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context Additional context that is passed through the Http pipeline during the service call. + * @return A response containing status code and HTTP headers + */ + public ResponseCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.getSubDirectoryClient#String} + * + * @return A new {@link DataLakeDirectoryClient} object which references the sub-directory with the specified name + * in this directory + */ + public DataLakeDirectoryClient getSubDirectoryClient(String subDirectoryName) { + if (ImplUtils.isNullOrEmpty(subDirectoryName)) { + throw logger.logExceptionAsError(new IllegalArgumentException("'subDirectoryName' can not be set to null")); + } + return new DataLakeDirectoryClient(dataLakeDirectoryAsyncClient.getSubDirectoryAsyncClient(subDirectoryName), + dataLakeDirectoryAsyncClient.prepareBuilderAppendPath(subDirectoryName).buildBlockBlobClient()); + } + + /** + * Creates a new sub-directory within a directory. If a sub-directory with the same name already exists, the + * sub-directory will be overwritten. For more information, see the + * Azure Docs. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.createSubDirectory#String} + * + * @param subDirectoryName Name of the sub-directory to create. + * @return A {@link DataLakeDirectoryClient} used to interact with the sub-directory created. + */ + public DataLakeDirectoryClient createSubDirectory(String subDirectoryName) { + return createSubDirectoryWithResponse(subDirectoryName, null, null, null, null, null, null, null).getValue(); + } + + /** + * Creates a new sub-directory within a directory. If a sub-directory with the same name already exists, the + * sub-directory will be overwritten. For more information, see the + * Azure Docs. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.createSubDirectoryWithResponse#String-PathHttpHeaders-Map-DataLakeRequestConditions-String-String-Duration-Context} + * + * @param subDirectoryName Name of the sub-directory to create. + * @param headers {@link PathHttpHeaders} + * @param metadata Metadata to associate with the sub-directory. + * @param accessConditions {@link DataLakeRequestConditions} + * @param permissions POSIX access permissions for the sub-directory owner, the sub-directory owning group, and + * others. + * @param umask Restricts permissions of the sub-directory to be created. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context Additional context that is passed through the Http pipeline during the service call. + * + * @return A {@link Response} whose {@link Response#getValue() value} contains a {@link DataLakeDirectoryClient} + * used to interact with the sub-directory created. + */ + public ResponseCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.deleteSubDirectory#String} + * + * @param subDirectoryName Name of the sub-directory to delete. + */ + public void deleteSubDirectory(String subDirectoryName) { + deleteSubDirectoryWithResponse(subDirectoryName, false, null, null, null); + } + + /** + * Deletes the specified sub-directory in the directory. If the sub-directory doesn't exist or is not empty the + * operation fails. + * For more information see the Azure + * Docs. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.deleteSubDirectoryWithResponse#String-boolean-DataLakeRequestConditions-Duration-Context} + * + * @param subDirectoryName Name of the sub-directory to delete. + * @param recursive Whether or not to delete all paths beneath the sub-directory. + * @param accessConditions {@link DataLakeRequestConditions} + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context Additional context that is passed through the Http pipeline during the service call. + * @return A response containing status code and HTTP headers + */ + public ResponseCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.rename#String} + * + * @param destinationPath Relative path from the file system to rename the directory to, excludes the file system + * name. For example if you want to move a directory with fileSystem = "myfilesystem", path = "mydir/mysubdir" to + * another path in myfilesystem (ex: newdir) then set the destinationPath = "newdir" + * @return A {@link DataLakeDirectoryClient} used to interact with the new directory created. + */ + public DataLakeDirectoryClient rename(String destinationPath) { + return renameWithResponse(destinationPath, null, null, null, null).getValue(); + } + + /** + * Moves the directory to another location within the file system. + * For more information, see the + * Azure Docs. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.renameWithResponse#String-DataLakeRequestConditions-DataLakeRequestConditions-Duration-Context} + * + * @param destinationPath Relative path from the file system to rename the directory to, excludes the file system + * name. For example if you want to move a directory with fileSystem = "myfilesystem", path = "mydir/mysubdir" to + * another path in myfilesystem (ex: newdir) then set the destinationPath = "newdir" + * @param sourceAccessConditions {@link DataLakeRequestConditions} against the source. + * @param destAccessConditions {@link DataLakeRequestConditions} against the destination. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context Additional context that is passed through the Http pipeline during the service call. + * + * @return A {@link Response} whose {@link Response#getValue() value} that contains a + * {@link DataLakeDirectoryClient} used to interact with the directory created. + */ + public Response+ * This client is instantiated through {@link DataLakePathClientBuilder} or retrieved via + * {@link DataLakeFileSystemAsyncClient#getFileAsyncClient(String)}. + * + *
+ * Please refer to the Azure + * Docs for more information. + */ +public class DataLakeFileAsyncClient extends DataLakePathAsyncClient { + + private final ClientLogger logger = new ClientLogger(DataLakeFileAsyncClient.class); + + /** + * Package-private constructor for use by {@link DataLakePathClientBuilder}. + * + * @param pipeline The pipeline used to send and receive service requests. + * @param url The endpoint where to send service requests. + * @param serviceVersion The version of the service to receive requests. + * @param accountName The storage account name. + * @param fileSystemName The file system name. + * @param fileName The file name. + * @param blockBlobAsyncClient The underlying {@link BlobContainerAsyncClient} + */ + DataLakeFileAsyncClient(HttpPipeline pipeline, String url, DataLakeServiceVersion serviceVersion, + String accountName, String fileSystemName, String fileName, BlockBlobAsyncClient blockBlobAsyncClient) { + super(pipeline, url, serviceVersion, accountName, fileSystemName, fileName, blockBlobAsyncClient); + } + + DataLakeFileAsyncClient(DataLakePathAsyncClient pathAsyncClient) { + super(pathAsyncClient.getHttpPipeline(), pathAsyncClient.getPathUrl(), pathAsyncClient.getServiceVersion(), + pathAsyncClient.getAccountName(), pathAsyncClient.getFileSystemName(), pathAsyncClient.getObjectPath(), + pathAsyncClient.getBlockBlobAsyncClient()); + } + + /** + * Gets the URL of the file represented by this client on the Data Lake service. + * + * @return the URL. + */ + public String getFileUrl() { + return getPathUrl(); + } + + /** + * Gets the path of this file, not including the name of the resource itself. + * + * @return The path of the file. + */ + public String getFilePath() { + return getObjectPath(); + } + + /** + * Gets the name of this file, not including its full path. + * + * @return The name of the file. + */ + public String getFileName() { + return getObjectName(); + } + + /** + * Creates a file. + * + *
Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.create} + * + *For more information see the + * Azure + * Docs
+ * + * @return A reactive response containing information about the created file. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.createWithResponse#PathHttpHeaders-Map-DataLakeRequestConditions-String-String} + * + *For more information see the + * Azure + * Docs
+ * + * @param headers {@link PathHttpHeaders} + * @param metadata Metadata to associate with the resource. + * @param accessConditions {@link DataLakeRequestConditions} + * @param permissions POSIX access permissions for the file owner, the file owning group, and others. + * @param umask Restricts permissions of the file to be created. + * @return A {@link Mono} containing a {@link Response} whose {@link Response#getValue() value} contains a {@link + * PathItem}. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.delete} + * + *For more information see the + * Azure + * Docs
+ * + * @return A reactive response signalling completion. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.deleteWithResponse#DataLakeRequestConditions} + * + *For more information see the + * Azure + * Docs
+ * + * @param accessConditions {@link DataLakeRequestConditions} + * + * @return A reactive response signalling completion. + */ + public MonoCode Samples>Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.append#Flux-long-long} + * + *For more information, see the + * Azure + * Docs
+ * + * @param data The data to write to the file. + * @param fileOffset The position where the data is to be appended. + * @param length The exact length of the data. It is important that this value match precisely the length of the + * data emitted by the {@code Flux}. + * + * @return A reactive response signalling completion. + */ + public MonoCode Samples>Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.appendWithResponse#Flux-long-long-byte-String} + * + *For more information, see the + * Azure + * Docs
+ * + * @param data The data to write to the file. + * @param fileOffset The position where the data is to be appended. + * @param length The exact length of the data. It is important that this value match precisely the length of the + * data emitted by the {@code Flux}. + * @param contentMd5 An MD5 hash of the content of the data. If specified, the service will calculate the MD5 of the + * received data and fail the request if it does not match the provided MD5. + * @param leaseId By setting lease id, requests will fail if the provided lease does not match the active lease on + * the file. + * + * @return A reactive response signalling completion. + */ + public MonoCode Samples>Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.flush#long} + * + *For more information, see the + * Azure + * Docs
+ * + * @param position The length of the file after all data has been written. + * + * @return A reactive response containing the information of the created resource. + */ + public MonoCode Samples>Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.flushWithResponse#long-boolean-boolean-PathHttpHeaders-DataLakeRequestConditions} + * + *For more information, see the + * Azure + * Docs
+ * + * @param position The length of the file after all data has been written. + * @param retainUncommittedData Whether or not uncommitted data is to be retained after the operation. + * @param close Whether or not a file changed event raised indicates completion (true) or modification (false). + * @param httpHeaders {@link PathHttpHeaders httpHeaders} + * @param accessConditions {@link DataLakeRequestConditions accessConditions} + * + * @return A reactive response containing the information of the created resource. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.read} + * + *For more information, see the + * Azure Docs
+ * + * @return A reactive response containing the file data. + */ + public FluxCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.readWithResponse#FileRange-DownloadRetryOptions-DataLakeRequestConditions-boolean} + * + *For more information, see the + * Azure Docs
+ * + * @param range {@link FileRange} + * @param options {@link DownloadRetryOptions} + * @param accessConditions {@link DataLakeRequestConditions} + * @param rangeGetContentMD5 Whether the contentMD5 for the specified file range should be returned. + * @return A reactive response containing the file data. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.rename#String} + * + * @param destinationPath Relative path from the file system to rename the file to, excludes the file system name. + * For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path + * in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt" + * @return A {@link Mono} containing a {@link DataLakeFileAsyncClient} used to interact with the new file created. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.renameWithResponse#String-DataLakeRequestConditions-DataLakeRequestConditions} + * + * @param destinationPath Relative path from the file system to rename the file to, excludes the file system name. + * For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path + * in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt" + * @param sourceAccessConditions {@link DataLakeRequestConditions} against the source. + * @param destAccessConditions {@link DataLakeRequestConditions} against the destination. + * @return A {@link Mono} containing a {@link Response} whose {@link Response#getValue() value} contains a {@link + * DataLakeFileAsyncClient} used to interact with the file created. + */ + public Mono+ * This client is instantiated through {@link DataLakePathClientBuilder} or retrieved via + * {@link DataLakeFileSystemClient#getFileClient(String) getFileClient}. + * + *
+ * Please refer to the Azure + * Docs for more information. + */ +public class DataLakeFileClient extends DataLakePathClient { + + private final ClientLogger logger = new ClientLogger(DataLakeFileClient.class); + + private final DataLakeFileAsyncClient dataLakeFileAsyncClient; + + DataLakeFileClient(DataLakeFileAsyncClient pathAsyncClient, BlockBlobClient blockBlobClient) { + super(pathAsyncClient, blockBlobClient); + this.dataLakeFileAsyncClient = pathAsyncClient; + } + + private DataLakeFileClient(DataLakePathClient dataLakePathClient) { + super(dataLakePathClient.dataLakePathAsyncClient, dataLakePathClient.blockBlobClient); + this.dataLakeFileAsyncClient = new DataLakeFileAsyncClient(dataLakePathClient.dataLakePathAsyncClient); + } + + /** + * Gets the URL of the file represented by this client on the Data Lake service. + * + * @return the URL. + */ + public String getFileUrl() { + return getPathUrl(); + } + + /** + * Gets the path of this file, not including the name of the resource itself. + * + * @return The path of the file. + */ + public String getFilePath() { + return getObjectPath(); + } + + /** + * Gets the name of this file, not including its full path. + * + * @return The name of the file. + */ + public String getFileName() { + return getObjectName(); + } + + /** + * Creates a file. + * + *
Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.create} + * + *For more information see the + * Azure + * Docs
+ * + * @return Information about the created file. + */ + public PathInfo create() { + return createWithResponse(null, null, null, null, null, null, Context.NONE).getValue(); + } + + /** + * Creates a file. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.createWithResponse#PathHttpHeaders-Map-DataLakeRequestConditions-String-String-Duration-Context} + * + *For more information see the + * Azure + * Docs
+ * + * @param headers {@link PathHttpHeaders} + * @param metadata Metadata to associate with the resource. + * @param accessConditions {@link DataLakeRequestConditions} + * @param permissions POSIX access permissions for the file owner, the file owning group, and others. + * @param umask Restricts permissions of the file to be created. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context Additional context that is passed through the Http pipeline during the service call. + * @return A response containing information about the created file + */ + public ResponseCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.delete} + * + *For more information see the + * Azure + * Docs
+ */ + public void delete() { + deleteWithResponse(null, null, Context.NONE).getValue(); + } + + /** + * Deletes a file. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.deleteWithResponse#DataLakeRequestConditions-Duration-Context} + * + *For more information see the + * Azure + * Docs
+ * + * @param accessConditions {@link DataLakeRequestConditions} + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context Additional context that is passed through the Http pipeline during the service call. + * + * @return A response containing status code and HTTP headers. + */ + public ResponseCode Samples>Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.append#InputStream-long-long} + * + *For more information, see the + * Azure + * Docs
+ * + * @param data The data to write to the file. + * @param fileOffset The position where the data is to be appended. + * @param length The exact length of the data. + */ + public void append(InputStream data, long fileOffset, long length) { + appendWithResponse(data, fileOffset, length, null, null, null, Context.NONE); + } + + /** + * Appends data to the specified resource to later be flushed (written) by a call to flush + * + *Code Samples>Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse#InputStream-long-long-byte-String-Duration-Context} + * + *For more information, see the + * Azure + * Docs
+ * + * @param data The data to write to the file. + * @param fileOffset The position where the data is to be appended. + * @param length The exact length of the data. + * @param contentMd5 An MD5 hash of the content of the data. If specified, the service will calculate the MD5 of the + * received data and fail the request if it does not match the provided MD5. + * @param leaseId By setting lease id, requests will fail if the provided lease does not match the active lease on + * the file. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context Additional context that is passed through the Http pipeline during the service call. + * + * @return A response signalling completion. + */ + public ResponseCode Samples>Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.flush#long} + * + *For more information, see the + * Azure + * Docs
+ * + * @param position The length of the file after all data has been written. + * + * @return Information about the created resource. + */ + public PathInfo flush(long position) { + return flushWithResponse(position, false, false, null, null, null, Context.NONE).getValue(); + } + + /** + * Flushes (writes) data previously appended to the file through a call to append. + * The previously uploaded data must be contiguous. + * + *Code Samples>Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse#long-boolean-boolean-PathHttpHeaders-DataLakeRequestConditions-Duration-Context} + * + *For more information, see the + * Azure + * Docs
+ * + * @param position The length of the file after all data has been written. + * @param retainUncommittedData Whether or not uncommitted data is to be retained after the operation. + * @param close Whether or not a file changed event raised indicates completion (true) or modification (false). + * @param httpHeaders {@link PathHttpHeaders httpHeaders} + * @param accessConditions {@link DataLakeRequestConditions accessConditions} + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context Additional context that is passed through the Http pipeline during the service call. + * + * @return A response containing the information of the created resource. + */ + public ResponseCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.read#OutputStream} + * + *For more information, see the + * Azure Docs
+ * + * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. + * @throws UncheckedIOException If an I/O error occurs. + * @throws NullPointerException if {@code stream} is null + */ + public void read(OutputStream stream) { + readWithResponse(stream, null, null, null, false, null, Context.NONE); + } + + /** + * Reads a range of bytes from a file into an output stream. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.readWithResponse#OutputStream-FileRange-DownloadRetryOptions-DataLakeRequestConditions-boolean-Duration-Context} + * + *For more information, see the + * Azure Docs
+ * + * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. + * @param range {@link FileRange} + * @param options {@link DownloadRetryOptions} + * @param accessConditions {@link DataLakeRequestConditions} + * @param rangeGetContentMD5 Whether the contentMD5 for the specified file range should be returned. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context Additional context that is passed through the Http pipeline during the service call. + * + * @return A response containing status code and HTTP headers. + * @throws UncheckedIOException If an I/O error occurs. + * @throws NullPointerException if {@code stream} is null + */ + public FileReadResponse readWithResponse(OutputStream stream, FileRange range, DownloadRetryOptions options, + DataLakeRequestConditions accessConditions, boolean rangeGetContentMD5, Duration timeout, Context context) { + BlobDownloadResponse response = blockBlobClient.downloadWithResponse(stream, Transforms.toBlobRange(range), + Transforms.toBlobDownloadRetryOptions(options), Transforms.toBlobRequestConditions(accessConditions), + rangeGetContentMD5, timeout, context); + return Transforms.toFileReadResponse(response); + } + + /** + * Moves the file to another location within the file system. + * For more information see the + * Azure + * Docs. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename#String} + * + * @param destinationPath Relative path from the file system to rename the file to, excludes the file system name. + * For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path + * in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt" + * @return A {@link DataLakeFileClient} used to interact with the new file created. + */ + public DataLakeFileClient rename(String destinationPath) { + return renameWithResponse(destinationPath, null, null, null, null).getValue(); + } + + /** + * Moves the file to another location within the file system. + * For more information, see the + * Azure Docs. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.renameWithResponse#String-DataLakeRequestConditions-DataLakeRequestConditions-Duration-Context} + * + * @param destinationPath Relative path from the file system to rename the file to, excludes the file system name. + * For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path + * in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt" + * @param sourceAccessConditions {@link DataLakeRequestConditions} against the source. + * @param destAccessConditions {@link DataLakeRequestConditions} against the destination. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context Additional context that is passed through the Http pipeline during the service call. + * + * @return A {@link Response} whose {@link Response#getValue() value} that contains a {@link DataLakeFileClient} + * used to interact with the file created. + */ + public Response+ * This client contains operations on a file system. Operations on a path are available on + * {@link DataLakeFileAsyncClient} and {@link DataLakeDirectoryAsyncClient} through {@link #getFileAsyncClient(String)} + * and {@link #getDirectoryAsyncClient(String)} respectively, and operations on the service are available on + * {@link DataLakeServiceAsyncClient}. + * + *
+ * Please refer to the + * Azure Docs for more information on file systems. + * + *
+ * Note this client is an async client that returns reactive responses from Spring Reactor Core project + * (https://projectreactor.io/). Calling the methods in this client will NOT start the actual network + * operation, until {@code .subscribe()} is called on the reactive response. You can simply convert one of these + * responses to a {@link java.util.concurrent.CompletableFuture} object through {@link Mono#toFuture()}. + */ +@ServiceClient(builder = DataLakeFileSystemClientBuilder.class, isAsync = true) +public class DataLakeFileSystemAsyncClient { + + public static final String ROOT_FILESYSTEM_NAME = "$root"; + +// public static final String STATIC_WEBSITE_FILESYSTEM_NAME = "$web"; + +// public static final String LOG_FILESYSTEM_NAME = "$logs"; + + private final ClientLogger logger = new ClientLogger(DataLakeFileSystemAsyncClient.class); + private final DataLakeStorageClientImpl azureDataLakeStorage; + private final BlobContainerAsyncClient blobContainerAsyncClient; + + private final String accountName; + private final String fileSystemName; + private final DataLakeServiceVersion serviceVersion; + + /** + * Package-private constructor for use by {@link DataLakeFileSystemClientBuilder}. + * + * @param pipeline The pipeline used to send and receive service requests. + * @param url The endpoint where to send service requests. + * @param serviceVersion The version of the service to receive requests. + * @param accountName The storage account name. + * @param fileSystemName The file system name. + * @param blobContainerAsyncClient The underlying {@link BlobContainerAsyncClient} + */ + DataLakeFileSystemAsyncClient(HttpPipeline pipeline, String url, DataLakeServiceVersion serviceVersion, + String accountName, String fileSystemName, BlobContainerAsyncClient blobContainerAsyncClient) { + this.azureDataLakeStorage = new DataLakeStorageClientBuilder() + .pipeline(pipeline) + .url(url) + .version(serviceVersion.getVersion()) + .build(); + this.serviceVersion = serviceVersion; + + this.accountName = accountName; + this.fileSystemName = fileSystemName; + this.blobContainerAsyncClient = blobContainerAsyncClient; + } + + /** + * Creates a new DataLakeFileAsyncClient object by concatenating fileName to the end of + * DataLakeFileSystemAsyncClient's URL. The new DataLakeFileAsyncClient uses the same request policy pipeline as + * the DataLakeFileSystemAsyncClient. + * + *
Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.getFileAsyncClient#String} + * + * @param fileName A {@code String} representing the name of the file. + * @return A new {@link DataLakeFileAsyncClient} object which references the file with the specified name in this + * file system. + */ + public DataLakeFileAsyncClient getFileAsyncClient(String fileName) { + if (ImplUtils.isNullOrEmpty(fileName)) { + throw logger.logExceptionAsError(new IllegalArgumentException("'fileName' can not be set to null")); + } + BlockBlobAsyncClient blockBlobAsyncClient = blobContainerAsyncClient.getBlobAsyncClient(fileName, + null).getBlockBlobAsyncClient(); + + return new DataLakeFileAsyncClient(getHttpPipeline(), + StorageImplUtils.appendToUrlPath(getFileSystemUrl(), Utility.urlEncode(Utility.urlDecode(fileName))) + .toString(), getServiceVersion(), getAccountName(), getFileSystemName(), fileName, + blockBlobAsyncClient); + } + + /** + * Creates a new DataLakeDirectoryAsyncClient object by concatenating directoryName to the end of + * DataLakeFileSystemAsyncClient's URL. The new DataLakeDirectoryAsyncClient uses the same request policy pipeline + * as the DataLakeFileSystemAsyncClient. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.getDirectoryAsyncClient#String} + * + * @param directoryName A {@code String} representing the name of the directory. + * @return A new {@link DataLakeDirectoryAsyncClient} object which references the directory with the specified name + * in this file system. + */ + public DataLakeDirectoryAsyncClient getDirectoryAsyncClient(String directoryName) { + if (ImplUtils.isNullOrEmpty(directoryName)) { + throw logger.logExceptionAsError(new IllegalArgumentException("'directoryName' can not be set to null")); + } + BlockBlobAsyncClient blockBlobAsyncClient = blobContainerAsyncClient.getBlobAsyncClient(directoryName, + null).getBlockBlobAsyncClient(); + return new DataLakeDirectoryAsyncClient(getHttpPipeline(), + StorageImplUtils.appendToUrlPath(getFileSystemUrl(), Utility.urlEncode(Utility.urlDecode(directoryName))) + .toString(), getServiceVersion(), getAccountName(), getFileSystemName(), directoryName, + blockBlobAsyncClient); + } + + /** + * Gets the URL of the file system represented by this client. + * + * @return the URL. + */ + public String getFileSystemUrl() { + return azureDataLakeStorage.getUrl(); + } + + /** + * Get the file system name. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.getFileSystemName} + * + * @return The name of file system. + */ + public String getFileSystemName() { + return fileSystemName; + } + + /** + * Get associated account name. + * + * @return account name associated with this storage resource. + */ + public String getAccountName() { + return this.accountName; + } + + /** + * Gets the service version the client is using. + * + * @return the service version the client is using. + */ + public DataLakeServiceVersion getServiceVersion() { + return serviceVersion; + } + + /** + * Gets the {@link HttpPipeline} powering this client. + * + * @return The pipeline. + */ + public HttpPipeline getHttpPipeline() { + return azureDataLakeStorage.getHttpPipeline(); + } + + /** + * Creates a new file system within a storage account. If a file system with the same name already exists, the + * operation fails. For more information, see the + * Azure Docs. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.create} + * + * @return A reactive response signalling completion. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.createWithResponse#Map-PublicAccessType} + * + * @param metadata Metadata to associate with the file system. + * @param accessType Specifies how the data in this file system is available to the public. See the + * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. + * @return A reactive response signalling completion. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.delete} + * + * @return A reactive response signalling completion. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.deleteWithResponse#DataLakeRequestConditions} + * + * @param accessConditions {@link DataLakeRequestConditions} + * @return A reactive response signalling completion. + * @throws UnsupportedOperationException If either {@link DataLakeRequestConditions#getIfMatch()} or + * {@link DataLakeRequestConditions#getIfNoneMatch()} is set. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.getProperties} + * + * @return A {@link Mono} containing a {@link Response} whose {@link Response#getValue() value} containing the + * file system properties. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.getPropertiesWithResponse#String} + * + * @param leaseId The lease ID the active lease on the file system must match. + * @return A reactive response containing the file system properties. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.setMetadata#Map} + * + * @param metadata Metadata to associate with the file system. + * @return A reactive response signalling completion. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.setMetadataWithResponse#Map-DataLakeRequestConditions} + * + * @param metadata Metadata to associate with the file system. + * @param accessConditions {@link DataLakeRequestConditions} + * @return A {@link Mono} containing a {@link Response} whose {@link Response#getValue() value} contains signalling + * completion. + * @throws UnsupportedOperationException If one of {@link DataLakeRequestConditions#getIfMatch()}, + * {@link DataLakeRequestConditions#getIfNoneMatch()}, or {@link DataLakeRequestConditions#getIfUnmodifiedSince()} + * is set. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.listPaths} + * + * @return A reactive response emitting the list of files/directories. + */ + public PagedFluxCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.listPaths#ListPathsOptions} + * + * @param options A {@link ListPathsOptions} which specifies what data should be returned by the service. + * @return A reactive response emitting the list of files/directories. + */ + public PagedFluxCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.createFile#String} + * + * @param fileName Name of the file to create. + * @return A {@link Mono} containing a {@link DataLakeFileAsyncClient} used to interact with the file created. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.createFileWithResponse#String-PathHttpHeaders-Map-DataLakeRequestConditions-String-String} + * + * @param fileName Name of the file to create. + * @param headers {@link PathHttpHeaders} + * @param metadata Metadata to associate with the file. + * @param accessConditions {@link DataLakeRequestConditions} + * @param permissions POSIX access permissions for the file owner, the file owning group, and others. + * @param umask Restricts permissions of the file to be created. + * @return A {@link Mono} containing a {@link Response} whose {@link Response#getValue() value} contains a {@link + * DataLakeFileAsyncClient} used to interact with the file created. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.deleteFile#String} + * + * @param fileName Name of the file to delete. + * @return A reactive response signalling completion. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.deleteFileWithResponse#String-DataLakeRequestConditions} + * + * @param fileName Name of the file to delete. + * @param accessConditions {@link DataLakeRequestConditions} + * @return A {@link Mono} containing containing status code and HTTP headers + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.createDirectory#String} + * + * @param directoryName Name of the directory to create. + * @return A {@link Mono} containing a {@link DataLakeDirectoryAsyncClient} used to interact with the directory + * created. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.createDirectoryWithResponse#String-PathHttpHeaders-Map-DataLakeRequestConditions-String-String} + * + * @param directoryName Name of the directory to create. + * @param headers {@link PathHttpHeaders} + * @param metadata Metadata to associate with the directory. + * @param accessConditions {@link DataLakeRequestConditions} + * @param permissions POSIX access permissions for the directory owner, the directory owning group, and others. + * @param umask Restricts permissions of the directory to be created. + * @return A {@link Mono} containing a {@link Response} whose {@link Response#getValue() value} contains a {@link + * DataLakeDirectoryAsyncClient} used to interact with the directory created. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.deleteDirectory#String} + * + * @param directoryName Name of the directory to delete. + * @return A reactive response signalling completion. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.deleteDirectoryWithResponse#String-boolean-DataLakeRequestConditions} + * + * @param directoryName Name of the directory to delete. + * @param recursive Whether or not to delete all paths beneath the directory. + * @param accessConditions {@link DataLakeRequestConditions} + * @return A {@link Mono} containing containing status code and HTTP headers + */ + public Mono+ * This client contains operations on a file system. Operations on a path are available on {@link DataLakeFileClient} + * and {@link DataLakeDirectoryClient} through {@link #getFileClient(String)} and {@link #getDirectoryClient(String)} + * respectively, and operations on the service are available on {@link DataLakeServiceClient}. + * + *
+ * Please refer to the + * Azure Docs for more information on file systems. + */ +@ServiceClient(builder = DataLakeFileSystemClientBuilder.class) +public class DataLakeFileSystemClient { + private final ClientLogger logger = new ClientLogger(DataLakeFileSystemClient.class); + + private final DataLakeFileSystemAsyncClient dataLakeFileSystemAsyncClient; + private final BlobContainerClient blobContainerClient; + + public static final String ROOT_FILESYSTEM_NAME = DataLakeFileSystemAsyncClient.ROOT_FILESYSTEM_NAME; + +// public static final String STATIC_WEBSITE_FILESYSTEM_NAME = +// DataLakeFileSystemAsyncClient.STATIC_WEBSITE_FILESYSTEM_NAME; + +// public static final String LOG_FILESYSTEM_NAME = DataLakeFileSystemAsyncClient.LOG_FILESYSTEM_NAME; + + /** + * Package-private constructor for use by {@link DataLakeFileSystemClientBuilder}. + * + * @param dataLakeFileSystemAsyncClient the async file system client. + * @param blobContainerClient the sync blob container client. + */ + DataLakeFileSystemClient(DataLakeFileSystemAsyncClient dataLakeFileSystemAsyncClient, + BlobContainerClient blobContainerClient) { + this.dataLakeFileSystemAsyncClient = dataLakeFileSystemAsyncClient; + this.blobContainerClient = blobContainerClient; + } + + /** + * Initializes a new DataLakeFileClient object by concatenating fileName to the end of DataLakeFileSystemClient's + * URL. The new DataLakeFileClient uses the same request policy pipeline as the DataLakeFileSystemClient. + * + * @param fileName A {@code String} representing the name of the file. + * + *
Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.getFileClient#String} + * + * @return A new {@link DataLakeFileClient} object which references the file with the specified name in this file + * system. + */ + public DataLakeFileClient getFileClient(String fileName) { + if (ImplUtils.isNullOrEmpty(fileName)) { + throw logger.logExceptionAsError(new IllegalArgumentException("'fileName' can not be set to null")); + } + return new DataLakeFileClient(dataLakeFileSystemAsyncClient.getFileAsyncClient(fileName), + blobContainerClient.getBlobClient(fileName).getBlockBlobClient()); + } + + /** + * Initializes a new DataLakeDirectoryClient object by concatenating directoryName to the end of + * DataLakeFileSystemClient's URL. The new DataLakeDirectoryClient uses the same request policy pipeline as the + * DataLakeFileSystemClient. + * + * @param directoryName A {@code String} representing the name of the directory. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.getDirectoryClient#String} + * + * @return A new {@link DataLakeDirectoryClient} object which references the directory with the specified name in + * this file system. + */ + public DataLakeDirectoryClient getDirectoryClient(String directoryName) { + return new DataLakeDirectoryClient(dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(directoryName), + blobContainerClient.getBlobClient(directoryName).getBlockBlobClient()); + } + + /** + * Get the file system name. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.getFileSystemName} + * + * @return The name of file system. + */ + public String getFileSystemName() { + return dataLakeFileSystemAsyncClient.getFileSystemName(); + } + + /** + * Gets the URL of the file system represented by this client. + * + * @return the URL. + */ + public String getFileSystemUrl() { + return dataLakeFileSystemAsyncClient.getFileSystemUrl(); + } + + /** + * Get associated account name. + * + * @return account name associated with this storage resource. + */ + public String getAccountName() { + return dataLakeFileSystemAsyncClient.getAccountName(); + } + + /** + * Gets the service version the client is using. + * + * @return the service version the client is using. + */ + public DataLakeServiceVersion getServiceVersion() { + return dataLakeFileSystemAsyncClient.getServiceVersion(); + } + + + /** + * Gets the {@link HttpPipeline} powering this client. + * + * @return The pipeline. + */ + public HttpPipeline getHttpPipeline() { + return dataLakeFileSystemAsyncClient.getHttpPipeline(); + } + + + /** + * Creates a new file system within a storage account. If a file system with the same name already exists, the + * operation fails. For more information, see the + * Azure Docs. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.create} + */ + public void create() { + createWithResponse(null, null, null, Context.NONE); + } + + /** + * Creates a new file system within a storage account. If a file system with the same name already exists, the + * operation fails. For more information, see the + * Azure Docs. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.createWithResponse#Map-PublicAccessType-Duration-Context} + * + * @param metadata Metadata to associate with the file system. + * @param accessType Specifies how the data in this file system is available to the public. See the + * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context Additional context that is passed through the Http pipeline during the service call. + * @return A response containing status code and HTTP headers + */ + public ResponseCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.delete} + */ + public void delete() { + deleteWithResponse(null, null, Context.NONE); + } + + /** + * Marks the specified file system for deletion. The file system and any files/directories contained within it are + * later deleted during garbage collection. For more information, see the + * Azure Docs. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.deleteWithResponse#DataLakeRequestConditions-Duration-Context} + * + * @param accessConditions {@link DataLakeRequestConditions} + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context Additional context that is passed through the Http pipeline during the service call. + * @return A response containing status code and HTTP headers + */ + public ResponseCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.getProperties} + * + * @return The file system properties. + */ + public FileSystemProperties getProperties() { + return getPropertiesWithResponse(null, null, Context.NONE).getValue(); + } + + /** + * Returns the file system's metadata and system properties. For more information, see the + * Azure Docs. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.getPropertiesWithResponse#String-Duration-Context} + * + * @param leaseId The lease ID the active lease on the file system must match. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context Additional context that is passed through the Http pipeline during the service call. + * @return A response containing the file system properties. + */ + public ResponseCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.setMetadata#Map} + * + * @param metadata Metadata to associate with the file system. + */ + public void setMetadata(MapCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.setMetadataWithResponse#Map-DataLakeRequestConditions-Duration-Context} + * @param metadata Metadata to associate with the file system. + * @param accessConditions {@link DataLakeRequestConditions} + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context Additional context that is passed through the Http pipeline during the service call. + * @return A response containing status code and HTTP headers + */ + public ResponseCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.listPaths} + * + * @return The list of files/directories. + */ + public PagedIterableCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.listPaths#ListPathsOptions-Duration} + * + * @param options A {@link ListPathsOptions} which specifies what data should be returned by the service. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @return The list of files/directories. + */ + public PagedIterableCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.createFile#String} + * + * @param fileName Name of the file to create. + * @return A {@link DataLakeFileClient} used to interact with the file created. + */ + public DataLakeFileClient createFile(String fileName) { + return createFileWithResponse(fileName, null, null, null, null, null, null, Context.NONE).getValue(); + } + + /** + * Creates a new file within a file system. If a file with the same name already exists, the file will be + * overwritten. For more information, see the Azure Docs. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.createFileWithResponse#String-PathHttpHeaders-Map-DataLakeRequestConditions-String-String-Duration-Context} + * + * @param fileName Name of the file to create. + * @param headers {@link PathHttpHeaders} + * @param metadata Metadata to associate with the file. + * @param accessConditions {@link DataLakeRequestConditions} + * @param permissions POSIX access permissions for the file owner, the file owning group, and others. + * @param umask Restricts permissions of the file to be created. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context Additional context that is passed through the Http pipeline during the service call. + * + * @return A {@link Response} whose {@link Response#getValue() value} contains the {@link DataLakeFileClient} used + * to interact with the file created. + */ + public ResponseCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.deleteFile#String} + * + * @param fileName Name of the file to delete. + */ + public void deleteFile(String fileName) { + deleteFileWithResponse(fileName, null, null, Context.NONE).getValue(); + } + + /** + * Deletes the specified file in the file system. If the file doesn't exist the operation fails. + * For more information see the Azure + * Docs. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.deleteFileWithResponse#String-DataLakeRequestConditions-Duration-Context} + * + * @param fileName Name of the file to delete. + * @param accessConditions {@link DataLakeRequestConditions} + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context Additional context that is passed through the Http pipeline during the service call. + * @return A response containing status code and HTTP headers + */ + public ResponseCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.createDirectory#String} + * + * @param directoryName Name of the directory to create. + * @return A {@link DataLakeDirectoryClient} used to interact with the directory created. + */ + public DataLakeDirectoryClient createDirectory(String directoryName) { + return createDirectoryWithResponse(directoryName, null, null, null, null, null, null, null).getValue(); + } + + /** + * Creates a new directory within a file system. If a directory with the same name already exists, the directory + * will be overwritten. For more information, see the + * Azure Docs. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.createDirectoryWithResponse#String-PathHttpHeaders-Map-DataLakeRequestConditions-String-String-Duration-Context} + * + * @param directoryName Name of the directory to create. + * @param headers {@link PathHttpHeaders} + * @param metadata Metadata to associate with the directory. + * @param accessConditions {@link DataLakeRequestConditions} + * @param permissions POSIX access permissions for the directory owner, the directory owning group, and others. + * @param umask Restricts permissions of the directory to be created. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context Additional context that is passed through the Http pipeline during the service call. + * + * @return A {@link Response} whose {@link Response#getValue() value} contains a {@link DataLakeDirectoryClient} + * used to interact with the directory created. + */ + public ResponseCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.deleteDirectory#String} + * + * @param directoryName Name of the directory to delete. + */ + public void deleteDirectory(String directoryName) { + deleteDirectoryWithResponse(directoryName, false, null, null, Context.NONE).getValue(); + } + + /** + * Deletes the specified directory in the file system. If the directory doesn't exist the operation fails. + * For more information see the Azure + * Docs. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.deleteDirectoryWithResponse#String-boolean-DataLakeRequestConditions-Duration-Context} + * + * @param directoryName Name of the directory to delete. + * @param recursive Whether or not to delete all paths beneath the directory. + * @param accessConditions {@link DataLakeRequestConditions} + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context Additional context that is passed through the Http pipeline during the service call. + * @return A response containing status code and HTTP headers + */ + public Response+ * The following information must be provided on this builder: + * + *
Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClientBuilder.buildClient} + * + * @return a {@link DataLakeFileSystemClient} created from the configurations in this builder. + */ + public DataLakeFileSystemClient buildClient() { + return new DataLakeFileSystemClient(buildAsyncClient(), blobContainerClientBuilder.buildClient()); + } + + /** + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClientBuilder.buildAsyncClient} + * + * @return a {@link DataLakeFileSystemAsyncClient} created from the configurations in this builder. + */ + public DataLakeFileSystemAsyncClient buildAsyncClient() { + /* + Implicit and explicit root file system access are functionally equivalent, but explicit references are easier + to read and debug. + */ + String dataLakeFileSystemName = ImplUtils.isNullOrEmpty(fileSystemName) + ? DataLakeFileSystemAsyncClient.ROOT_FILESYSTEM_NAME + : fileSystemName; + + DataLakeServiceVersion serviceVersion = version != null ? version : DataLakeServiceVersion.getLatest(); + + HttpPipeline pipeline = (httpPipeline != null) ? httpPipeline : BuilderHelper.buildPipeline(() -> { + if (storageSharedKeyCredential != null) { + return new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential); + } else if (tokenCredential != null) { + return new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint)); + } else if (sasTokenCredential != null) { + return new SasTokenCredentialPolicy(sasTokenCredential); + } else { + return null; + } + }, retryOptions, logOptions, httpClient, additionalPolicies, configuration, serviceVersion); + + return new DataLakeFileSystemAsyncClient(pipeline, String.format("%s/%s", endpoint, dataLakeFileSystemName), + serviceVersion, accountName, dataLakeFileSystemName, blobContainerClientBuilder.buildAsyncClient()); + } + + /** + * Sets the service endpoint, additionally parses it for information (SAS token, file system name) + * + * @param endpoint URL of the service + * @return the updated DataLakeFileSystemClientBuilder object + * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. + */ + public DataLakeFileSystemClientBuilder endpoint(String endpoint) { + blobContainerClientBuilder.endpoint(Transforms.endpointToDesiredEndpoint(endpoint, "blob", "dfs")); + try { + URL url = new URL(endpoint); + BlobUrlParts parts = BlobUrlParts.parse(url); + + this.endpoint = parts.getScheme() + "://" + parts.getHost(); + this.accountName = parts.getAccountName(); + this.fileSystemName = parts.getBlobContainerName(); + + String sasToken = parts.getSasQueryParameters().encode(); + if (!ImplUtils.isNullOrEmpty(sasToken)) { + this.sasToken(sasToken); + } + } catch (MalformedURLException ex) { + throw logger.logExceptionAsError( + new IllegalArgumentException("The Azure Storage Datalake endpoint url is malformed.")); + } + + return this; + } + + /** + * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. + * + * @param credential The credential to use for authenticating request. + * @return the updated DataLakeFileSystemClientBuilder + * @throws NullPointerException If {@code credential} is {@code null}. + */ + public DataLakeFileSystemClientBuilder credential(StorageSharedKeyCredential credential) { + blobContainerClientBuilder.credential(credential); + this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); + this.tokenCredential = null; + this.sasTokenCredential = null; + return this; + } + + /** + * Sets the {@link TokenCredential} used to authorize requests sent to the service. + * + * @param credential The credential to use for authenticating request. + * @return the updated DataLakeFileSystemClientBuilder + * @throws NullPointerException If {@code credential} is {@code null}. + */ + public DataLakeFileSystemClientBuilder credential(TokenCredential credential) { + blobContainerClientBuilder.credential(credential); + this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); + this.storageSharedKeyCredential = null; + this.sasTokenCredential = null; + return this; + } + + /** + * Sets the SAS token used to authorize requests sent to the service. + * + * @param sasToken The SAS token to use for authenticating requests. + * @return the updated DataLakeFileSystemClientBuilder + * @throws NullPointerException If {@code sasToken} is {@code null}. + */ + public DataLakeFileSystemClientBuilder sasToken(String sasToken) { + blobContainerClientBuilder.sasToken(sasToken); + this.sasTokenCredential = new SasTokenCredential(Objects.requireNonNull(sasToken, + "'sasToken' cannot be null.")); + this.storageSharedKeyCredential = null; + this.tokenCredential = null; + return this; + } + + /** + * Clears the credential used to authorize the request. + * + *This is for file systems that are publicly accessible.
+ * + * @return the updated DataLakeFileSystemClientBuilder + */ + public DataLakeFileSystemClientBuilder setAnonymousAccess() { + blobContainerClientBuilder.setAnonymousAccess(); + this.storageSharedKeyCredential = null; + this.tokenCredential = null; + this.sasTokenCredential = null; + return this; + } + + /** + * Sets the name of the file system. + * + * @param fileSystemName Name of the file system. If the value {@code null} or empty the root file system, + * {@code $root}, will be used. + * @return the updated DataLakeFileSystemClientBuilder object + */ + public DataLakeFileSystemClientBuilder fileSystemName(String fileSystemName) { + blobContainerClientBuilder.containerName(fileSystemName); + this.fileSystemName = fileSystemName; + return this; + } + + /** + * Sets the {@link HttpClient} to use for sending a receiving requests to and from the service. + * + * @param httpClient HttpClient to use for requests. + * @return the updated DataLakeFileSystemClientBuilder object + */ + public DataLakeFileSystemClientBuilder httpClient(HttpClient httpClient) { + blobContainerClientBuilder.httpClient(httpClient); + if (this.httpClient != null && httpClient == null) { + logger.info("'httpClient' is being set to 'null' when it was previously configured."); + } + + this.httpClient = httpClient; + return this; + } + + /** + * Gets the default Storage whitelist log headers and query parameters. + * + * @return the default http log options. + */ + public static HttpLogOptions getDefaultHttpLogOptions() { + return BuilderHelper.getDefaultHttpLogOptions(); + } + + /** + * Adds a pipeline policy to apply on each request sent. + * + * @param pipelinePolicy a pipeline policy + * @return the updated DataLakeFileSystemClientBuilder object + * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. + */ + public DataLakeFileSystemClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { + blobContainerClientBuilder.addPolicy(pipelinePolicy); + this.additionalPolicies.add(Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null")); + return this; + } + + /** + * Sets the {@link HttpLogOptions} for service requests. + * + * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. + * @return the updated DataLakeFileSystemClientBuilder object + * @throws NullPointerException If {@code logOptions} is {@code null}. + */ + public DataLakeFileSystemClientBuilder httpLogOptions(HttpLogOptions logOptions) { + blobContainerClientBuilder.httpLogOptions(logOptions); + this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); + return this; + } + + /** + * Sets the configuration object used to retrieve environment configuration values during building of the client. + * + * @param configuration Configuration store used to retrieve environment configurations. + * @return the updated DataLakeFileSystemClientBuilder object + */ + public DataLakeFileSystemClientBuilder configuration(Configuration configuration) { + blobContainerClientBuilder.configuration(configuration); + this.configuration = configuration; + return this; + } + + /** + * Sets the request retry options for all the requests made through the client. + * + * @param retryOptions The options used to configure retry behavior. + * @return the updated DataLakeFileSystemClientBuilder object + * @throws NullPointerException If {@code retryOptions} is {@code null}. + */ + public DataLakeFileSystemClientBuilder retryOptions(RequestRetryOptions retryOptions) { + blobContainerClientBuilder.retryOptions(retryOptions); + this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); + return this; + } + + /** + * Sets the {@link HttpPipeline} to use for the service client. + * + * If {@code pipeline} is set, all other settings are ignored, aside from {@link #endpoint(String) endpoint}. + * + * @param httpPipeline HttpPipeline to use for sending service requests and receiving responses. + * @return the updated DataLakeFileSystemClientBuilder object + */ + public DataLakeFileSystemClientBuilder pipeline(HttpPipeline httpPipeline) { + blobContainerClientBuilder.pipeline(httpPipeline); + if (this.httpPipeline != null && httpPipeline == null) { + logger.info("HttpPipeline is being set to 'null' when it was previously configured."); + } + + this.httpPipeline = httpPipeline; + return this; + } + + // TODO (gapra) : Determine how to set the blob service version here + /** + * Sets the {@link DataLakeServiceVersion} that is used when making API requests. + *+ * If a service version is not provided, the service version that will be used will be the latest known service + * version based on the version of the client library being used. If no service version is specified, updating to a + * newer version the client library will have the result of potentially moving to a newer service version. + * + * @param version {@link DataLakeServiceVersion} of the service to be used when making requests. + * @return the updated DataLakeFileSystemClientBuilder object + */ + public DataLakeFileSystemClientBuilder serviceVersion(DataLakeServiceVersion version) { + this.version = version; + return this; + } +} diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeLeaseAsyncClient.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeLeaseAsyncClient.java new file mode 100644 index 0000000000000..d2764f3a1908e --- /dev/null +++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeLeaseAsyncClient.java @@ -0,0 +1,247 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.file.datalake; + +import com.azure.core.annotation.ReturnType; +import com.azure.core.annotation.ServiceClient; +import com.azure.core.annotation.ServiceMethod; +import com.azure.core.http.RequestConditions; +import com.azure.core.http.rest.Response; +import com.azure.core.implementation.util.FluxUtil; +import com.azure.storage.blob.specialized.BlobLeaseAsyncClient; +import reactor.core.publisher.Mono; + +import java.net.URL; + + +/** + * This class provides a client that contains all the leasing operations for {@link DataLakeFileSystemAsyncClient + * file systems}, {@link DataLakeFileAsyncClient files} and {@link DataLakeDirectoryAsyncClient directories}. + * This client acts as a supplement to those clients and only handles leasing operations. + * + *
Instantiating a DataLakeLeaseAsyncClient
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeLeaseClientBuilder.asyncInstantiationWithFile} + * + * {@codesnippet com.azure.storage.file.datalake.DataLakeLeaseClientBuilder.asyncInstantiationWithDirectory} + * + * {@codesnippet com.azure.storage.file.datalake.DataLakeLeaseClientBuilder.asyncInstantiationWithFileSystem} + * + *View {@link DataLakeLeaseClientBuilder this} for additional ways to construct the client.
+ * + *For more information about leasing see the + * file system leasing or + * path leasing documentation.
+ * + * @see DataLakeLeaseClientBuilder + */ +@ServiceClient(builder = DataLakeLeaseClientBuilder.class, isAsync = true) +public final class DataLakeLeaseAsyncClient { + + private final BlobLeaseAsyncClient blobLeaseAsyncClient; + + DataLakeLeaseAsyncClient(BlobLeaseAsyncClient blobLeaseAsyncClient) { + this.blobLeaseAsyncClient = blobLeaseAsyncClient; + } + + /** + * Gets the {@link URL} of the lease client. + * + *The lease will either be a file system or path URL depending on which the lease client is associated.
+ * + * @return URL of the lease client. + */ + public String getResourceUrl() { + return this.blobLeaseAsyncClient.getResourceUrl(); + } + + /** + * Get the lease ID for this lease. + * + * @return the lease ID. + */ + public String getLeaseId() { + return this.blobLeaseAsyncClient.getLeaseId(); + } + + /** + * Acquires a lease for write and delete operations. The lease duration must be between 15 to 60 seconds or + * -1 for an infinite duration. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeLeaseAsyncClient.acquireLease#int} + * + * @param duration The duration of the lease between 15 to 60 seconds or -1 for an infinite duration. + * @return A reactive response containing the lease ID. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeLeaseAsyncClient.acquireLeaseWithResponse#int-RequestConditions} + * + * @param duration The duration of the lease between 15 to 60 seconds or -1 for an infinite duration. + * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and + * LastModifiedTime are used to construct conditions related to when the resource was changed relative to the given + * request. The request will fail if the specified condition is not satisfied. + * @return A reactive response containing the lease ID. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeLeaseAsyncClient.renewLease} + * + * @return A reactive response containing the renewed lease ID. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeLeaseAsyncClient.renewLeaseWithResponse#RequestConditions} + * + * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and + * LastModifiedTime are used to construct conditions related to when the resource was changed relative to the given + * request. The request will fail if the specified condition is not satisfied. + * @return A reactive response containing the renewed lease ID. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeLeaseAsyncClient.releaseLease} + * + * @return A reactive response signalling completion. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeLeaseAsyncClient.releaseLeaseWithResponse#RequestConditions} + * + * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and + * LastModifiedTime are used to construct conditions related to when the resource was changed relative to the given + * request. The request will fail if the specified condition is not satisfied. + * @return A reactive response signalling completion. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeLeaseAsyncClient.breakLease} + * + * @return A reactive response containing the remaining time in the broken lease in seconds. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public MonoIf {@code null} is passed for {@code breakPeriodInSeconds} a fixed duration lease will break after the + * remaining lease period elapses and an infinite lease will break immediately.
+ * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeLeaseAsyncClient.breakLeaseWithResponse#Integer-RequestConditions} + * + * @param breakPeriodInSeconds An optional duration, between 0 and 60 seconds, that the lease should continue before + * it is broken. If the break period is longer than the time remaining on the lease the remaining time on the lease + * is used. A new lease will not be available before the break period has expired, but the lease may be held for + * longer than the break period. + * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and + * LastModifiedTime are used to construct conditions related to when the resource was changed relative to the given + * request. The request will fail if the specified condition is not satisfied. + * @return A reactive response containing the remaining time in the broken lease in seconds. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeLeaseAsyncClient.changeLease#String} + * + * @param proposedId A new lease ID in a valid GUID format. + * @return A reactive response containing the new lease ID. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeLeaseAsyncClient.changeLeaseWithResponse#String-RequestConditions} + * + * @param proposedId A new lease ID in a valid GUID format. + * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and + * LastModifiedTime are used to construct conditions related to when the resource was changed relative to the given + * request. The request will fail if the specified condition is not satisfied. + * @return A reactive response containing the new lease ID. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public MonoInstantiating a DataLakeLeaseClient
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeLeaseClientBuilder.syncInstantiationWithFile} + * + * {@codesnippet com.azure.storage.file.datalake.DataLakeLeaseClientBuilder.syncInstantiationWithDirectory} + * + * {@codesnippet com.azure.storage.file.datalake.DataLakeLeaseClientBuilder.syncInstantiationWithFileSystem} + * + *View {@link DataLakeLeaseClientBuilder this} for additional ways to construct the client.
+ * + *For more information about leasing see the + * file system leasing or + * path leasing documentation.
+ * + * @see DataLakeLeaseClientBuilder + */ +@ServiceClient(builder = DataLakeLeaseClientBuilder.class) +public final class DataLakeLeaseClient { + private final BlobLeaseClient blobLeaseClient; + + DataLakeLeaseClient(BlobLeaseClient blobLeaseClient) { + this.blobLeaseClient = blobLeaseClient; + } + + /** + * Gets the {@link URL} of the lease client. + * + *The lease will either be a file system or path URL depending on which the lease client is associated.
+ * + * @return URL of the lease client. + */ + public String getResourceUrl() { + return blobLeaseClient.getResourceUrl(); + } + + /** + * Get the lease ID for this lease. + * + * @return the lease ID. + */ + public String getLeaseId() { + return blobLeaseClient.getLeaseId(); + } + + /** + * Acquires a lease for write and delete operations. The lease duration must be between 15 to 60 seconds or + * -1 for an infinite duration. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeLeaseClient.acquireLease#int} + * + * @param duration The duration of the lease between 15 to 60 seconds or -1 for an infinite duration. + * @return The lease ID. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public String acquireLease(int duration) { + return acquireLeaseWithResponse(duration, null, null, Context.NONE).getValue(); + } + + /** + * Acquires a lease for write and delete operations. The lease duration must be between 15 to 60 seconds or + * -1 for an infinite duration. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeLeaseClient.acquireLeaseWithResponse#int-RequestConditions-Duration-Context} + * + * @param duration The duration of the lease between 15 to 60 seconds or -1 for an infinite duration. + * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and + * LastModifiedTime are used to construct conditions related to when the resource was changed relative to the given + * request. The request will fail if the specified condition is not satisfied. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context Additional context that is passed through the Http pipeline during the service call. + * @return The lease ID. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public ResponseCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeLeaseClient.renewLease} + * + * @return The renewed lease ID. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public String renewLease() { + return renewLeaseWithResponse(null, null, Context.NONE).getValue(); + } + + /** + * Renews the previously-acquired lease. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeLeaseClient.renewLeaseWithResponse#RequestConditions-Duration-Context} + * + * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and + * LastModifiedTime are used to construct conditions related to when the resource was changed relative to the given + * request. The request will fail if the specified condition is not satisfied. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context Additional context that is passed through the Http pipeline during the service call. + * @return The renewed lease ID. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public ResponseCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeLeaseClient.releaseLease} + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public void releaseLease() { + releaseLeaseWithResponse(null, null, Context.NONE); + } + + /** + * Releases the previously acquired lease. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeLeaseClient.releaseLeaseWithResponse#RequestConditions-Duration-Context} + * + * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and + * LastModifiedTime are used to construct conditions related to when the resource was changed relative to the given + * request. The request will fail if the specified condition is not satisfied. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context Additional context that is passed through the Http pipeline during the service call. + * @return A response containing status code and HTTP headers. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public ResponseCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeLeaseClient.breakLease} + * + * @return The remaining time in the broken lease in seconds. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Integer breakLease() { + return breakLeaseWithResponse(null, null, null, Context.NONE).getValue(); + } + + /** + * Breaks the previously acquired lease, if it exists. + * + *If {@code null} is passed for {@code breakPeriodInSeconds} a fixed duration lease will break after the + * remaining lease period elapses and an infinite lease will break immediately.
+ * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeLeaseClient.breakLeaseWithResponse#Integer-RequestConditions-Duration-Context} + * + * @param breakPeriodInSeconds An optional duration, between 0 and 60 seconds, that the lease should continue before + * it is broken. If the break period is longer than the time remaining on the lease the remaining time on the lease + * is used. A new lease will not be available before the break period has expired, but the lease may be held for + * longer than the break period. + * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and + * LastModifiedTime are used to construct conditions related to when the resource was changed relative to the given + * request. The request will fail if the specified condition is not satisfied. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context Additional context that is passed through the Http pipeline during the service call. + * @return The remaining time in the broken lease in seconds. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public ResponseCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeLeaseClient.changeLease#String} + * + * @param proposedId A new lease ID in a valid GUID format. + * @return The new lease ID. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public String changeLease(String proposedId) { + return changeLeaseWithResponse(proposedId, null, null, Context.NONE).getValue(); + } + + /** + * Changes the lease ID. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeLeaseClient.changeLeaseWithResponse#String-RequestConditions-Duration-Context} + * + * @param proposedId A new lease ID in a valid GUID format. + * @param modifiedAccessConditions Standard HTTP Access conditions related to the modification of data. ETag and + * LastModifiedTime are used to construct conditions related to when the resource was changed relative to the given + * request. The request will fail if the specified condition is not satisfied. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context Additional context that is passed through the Http pipeline during the service call. + * @return The new lease ID. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public ResponseWhen a client is instantiated and a {@link #leaseId(String) leaseId} hasn't been set a {@link UUID} will be used + * as the lease identifier.
+ * + *Instantiating LeaseClients
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeLeaseClientBuilder.syncInstantiationWithFileAndLeaseId} + * + * {@codesnippet com.azure.storage.file.datalake.DataLakeLeaseClientBuilder.syncInstantiationWithDirectoryAndLeaseId} + * + * {@codesnippet com.azure.storage.file.datalake.DataLakeLeaseClientBuilder.syncInstantiationWithFileSystemAndLeaseId} + * + *Instantiating LeaseAsyncClients
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeLeaseClientBuilder.asyncInstantiationWithFileAndLeaseId} + * + * {@codesnippet com.azure.storage.file.datalake.DataLakeLeaseClientBuilder.asyncInstantiationWithDirectoryAndLeaseId} + * + * {@codesnippet com.azure.storage.file.datalake.DataLakeLeaseClientBuilder.asyncInstantiationWithFileSystemAndLeaseId} + * + * @see DataLakeLeaseClient + * @see DataLakeLeaseAsyncClient + */ +@ServiceClientBuilder(serviceClients = { DataLakeLeaseClient.class, DataLakeLeaseAsyncClient.class }) +public final class DataLakeLeaseClientBuilder { + + final BlobLeaseClientBuilder blobLeaseClientBuilder; + + public DataLakeLeaseClientBuilder() { + blobLeaseClientBuilder = new BlobLeaseClientBuilder(); + } + + /** + * Creates a {@link DataLakeLeaseClient} based on the configurations set in the builder. + * + * @return a {@link DataLakeLeaseClient} based on the configurations in this builder. + */ + public DataLakeLeaseClient buildClient() { + return new DataLakeLeaseClient(blobLeaseClientBuilder.buildClient()); + } + + /** + * Creates a {@link DataLakeLeaseAsyncClient} based on the configurations set in the builder. + * + * @return a {@link DataLakeLeaseAsyncClient} based on the configurations in this builder. + */ + public DataLakeLeaseAsyncClient buildAsyncClient() { + return new DataLakeLeaseAsyncClient(blobLeaseClientBuilder.buildAsyncClient()); + } + + /** + * Configures the builder based on the passed {@link DataLakePathClient}. This will set the {@link HttpPipeline} and + * {@link URL} that are used to interact with the service. + * + * @param dataLakePathClient Client used to configure the builder. + * @return the updated DataLakeLeaseClientBuilder object + * @throws NullPointerException If {@code dataLakePathClient} is {@code null}. + */ + public DataLakeLeaseClientBuilder pathClient(DataLakePathClient dataLakePathClient) { + blobLeaseClientBuilder.blobClient(dataLakePathClient.getBlockBlobClient()); + return this; + } + + /** + * Configures the builder based on the passed {@link DataLakePathAsyncClient}. This will set the + * {@link HttpPipeline} and {@link URL} that are used to interact with the service. + * + * @param dataLakePathAsyncClient DataLakePathAsyncClient used to configure the builder. + * @return the updated DataLakeLeaseClientBuilder object + * @throws NullPointerException If {@code dataLakePathAsyncClient} is {@code null}. + */ + public DataLakeLeaseClientBuilder pathAsyncClient(DataLakePathAsyncClient dataLakePathAsyncClient) { + blobLeaseClientBuilder.blobAsyncClient(dataLakePathAsyncClient.getBlockBlobAsyncClient()); + return this; + } + + /** + * Configures the builder based on the passed {@link DataLakeFileSystemClient}. This will set the + * {@link HttpPipeline} and {@link URL} that are used to interact with the service. + * + * @param dataLakeFileSystemClient DataLakeFileSystemClient used to configure the builder. + * @return the updated DataLakeLeaseClientBuilder object + * @throws NullPointerException If {@code dataLakeFileSystemClient} is {@code null}. + */ + public DataLakeLeaseClientBuilder fileSystemClient(DataLakeFileSystemClient dataLakeFileSystemClient) { + blobLeaseClientBuilder.containerClient(dataLakeFileSystemClient.getBlobContainerClient()); + return this; + } + + /** + * Configures the builder based on the passed {@link DataLakeFileSystemAsyncClient}. This will set the {@link + * HttpPipeline} and {@link URL} that are used to interact with the service. + * + * @param dataLakeFileSystemAsyncClient DataLakeFileSystemAsyncClient used to configure the builder. + * @return the updated DataLakeLeaseClientBuilder object + * @throws NullPointerException If {@code dataLakeFileSystemAsyncClient} is {@code null}. + */ + public DataLakeLeaseClientBuilder fileSystemAsyncClient( + DataLakeFileSystemAsyncClient dataLakeFileSystemAsyncClient) { + blobLeaseClientBuilder.containerAsyncClient(dataLakeFileSystemAsyncClient.getBlobContainerAsyncClient()); + return this; + } + + /** + * Sets the identifier for the lease. + * + *If a lease ID isn't set then a {@link UUID} will be used.
+ * + * @param leaseId Identifier for the lease. + * @return the updated DataLakeLeaseClientBuilder object + */ + public DataLakeLeaseClientBuilder leaseId(String leaseId) { + blobLeaseClientBuilder.leaseId(leaseId); + return this; + } +} diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakePathAsyncClient.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakePathAsyncClient.java new file mode 100644 index 0000000000000..5cec3ccfaffd5 --- /dev/null +++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakePathAsyncClient.java @@ -0,0 +1,581 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.file.datalake; + +import com.azure.core.http.HttpPipeline; +import com.azure.core.http.rest.Response; +import com.azure.core.http.rest.SimpleResponse; +import com.azure.core.implementation.util.FluxUtil; +import com.azure.core.implementation.util.ImplUtils; +import com.azure.core.util.Context; +import com.azure.core.util.logging.ClientLogger; +import com.azure.storage.blob.BlobContainerAsyncClient; +import com.azure.storage.blob.BlobServiceVersion; +import com.azure.storage.blob.BlobUrlParts; +import com.azure.storage.blob.specialized.BlockBlobAsyncClient; +import com.azure.storage.blob.specialized.SpecializedBlobClientBuilder; +import com.azure.storage.common.Utility; +import com.azure.storage.file.datalake.implementation.DataLakeStorageClientBuilder; +import com.azure.storage.file.datalake.implementation.DataLakeStorageClientImpl; +import com.azure.storage.file.datalake.implementation.models.LeaseAccessConditions; +import com.azure.storage.file.datalake.implementation.models.ModifiedAccessConditions; +import com.azure.storage.file.datalake.implementation.models.PathGetPropertiesAction; +import com.azure.storage.file.datalake.implementation.models.PathRenameMode; +import com.azure.storage.file.datalake.implementation.models.PathResourceType; +import com.azure.storage.file.datalake.implementation.models.SourceModifiedAccessConditions; +import com.azure.storage.file.datalake.models.DataLakeRequestConditions; +import com.azure.storage.file.datalake.models.PathAccessControl; +import com.azure.storage.file.datalake.models.PathHttpHeaders; +import com.azure.storage.file.datalake.models.PathInfo; +import com.azure.storage.file.datalake.models.PathItem; +import com.azure.storage.file.datalake.models.PathProperties; +import reactor.core.publisher.Mono; + +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import java.util.Map; +import java.util.Objects; + +import static com.azure.core.implementation.util.FluxUtil.monoError; +import static com.azure.core.implementation.util.FluxUtil.withContext; + +/** + * This class provides a client that contains all operations that apply to any path object. + */ +public class DataLakePathAsyncClient { + + private final ClientLogger logger = new ClientLogger(DataLakePathAsyncClient.class); + + protected final DataLakeStorageClientImpl dataLakeStorage; + private final String accountName; + private final String fileSystemName; + private final String pathName; + protected final BlockBlobAsyncClient blockBlobAsyncClient; + private final DataLakeServiceVersion serviceVersion; + + /** + * Package-private constructor for use by {@link DataLakePathClientBuilder}. + * + * @param pipeline The pipeline used to send and receive service requests. + * @param url The endpoint where to send service requests. + * @param serviceVersion The version of the service to receive requests. + * @param accountName The storage account name. + * @param fileSystemName The file system name. + * @param pathName The path name. + * @param blockBlobAsyncClient The underlying {@link BlobContainerAsyncClient} + */ + DataLakePathAsyncClient(HttpPipeline pipeline, String url, DataLakeServiceVersion serviceVersion, + String accountName, String fileSystemName, String pathName, BlockBlobAsyncClient blockBlobAsyncClient) { + this.dataLakeStorage = new DataLakeStorageClientBuilder() + .pipeline(pipeline) + .url(url) + .version(serviceVersion.getVersion()) + .build(); + this.serviceVersion = serviceVersion; + + this.accountName = accountName; + this.fileSystemName = fileSystemName; + this.pathName = pathName; + this.blockBlobAsyncClient = blockBlobAsyncClient; + } + + /** + * Converts the metadata into a string of format "key1=value1, key2=value2" and Base64 encodes the values. + * + * @param metadata The metadata. + * + * @return The metadata represented as a String. + */ + static String buildMetadataString(MapCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.setMetadata#Map} + * + *For more information, see the + * Azure Docs
+ * + * @param metadata Metadata to associate with the resource. + * @return A reactive response signalling completion. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.setMetadata#Map-DataLakeRequestConditions} + * + *For more information, see the + * Azure Docs
+ * + * @param metadata Metadata to associate with the resource. + * @param accessConditions {@link DataLakeRequestConditions} + * @return A reactive response signalling completion. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.setHttpHeaders#PathHttpHeaders} + * + *For more information, see the + * Azure Docs
+ * + * @param headers {@link PathHttpHeaders} + * @return A reactive response signalling completion. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.setHttpHeadersWithResponse#PathHttpHeaders-DataLakeRequestConditions} + * + *For more information, see the + * Azure Docs
+ * + * @param headers {@link PathHttpHeaders} + * @param accessConditions {@link DataLakeRequestConditions} + * @return A reactive response signalling completion. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.getProperties} + * + *For more information, see the + * Azure Docs
+ * + * @return A reactive response containing the resource's properties and metadata. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.getPropertiesWithResponse#DataLakeRequestConditions} + * + *For more information, see the + * Azure Docs
+ * + * @param accessConditions {@link DataLakeRequestConditions} + * @return A reactive response containing the resource's properties and metadata. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.setAccessControl#PathAccessControl} + * + *For more information, see the + * Azure Docs
+ * + * @param accessControl {@link PathAccessControl} + * @return A reactive response containing the resource info. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.setAccessControlWithResponse#PathAccessControl-DataLakeRequestConditions} + * + *For more information, see the + * Azure Docs
+ * + * @param accessControl {@link PathAccessControl} + * @param accessConditions {@link DataLakeRequestConditions} + * @return A reactive response containing the resource info. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.getAccessControl} + * + *For more information, see the + * Azure Docs
+ * + * @return A reactive response containing the resource access control. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.getAccessControlWithResponse#boolean-DataLakeRequestConditions} + * + *For more information, see the + * Azure Docs
+ * + * @param returnUpn When true, user identity values returned as User Principal Names. When false, user identity + * values returned as Azure Active Directory Object IDs. Default value is false. + * @param accessConditions {@link DataLakeRequestConditions} + * @return A reactive response containing the resource access control. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.setMetadata#Map} + * + *For more information, see the + * Azure Docs
+ * + * @param metadata Metadata to associate with the resource. + */ + public void setMetadata(MapCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.setMetadata#Map-DataLakeRequestConditions-Duration-Context} + * + *For more information, see the + * Azure Docs
+ * + * @param metadata Metadata to associate with the resource. + * @param accessConditions {@link DataLakeRequestConditions} + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context Additional context that is passed through the Http pipeline during the service call. + * @return A response containing status code and HTTP headers. + */ + public ResponseCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.setHttpHeaders#PathHttpHeaders} + * + *For more information, see the + * Azure Docs
+ * + * @param headers {@link PathHttpHeaders} + */ + public void setHttpHeaders(PathHttpHeaders headers) { + setHttpHeadersWithResponse(headers, null, null, Context.NONE); + } + + /** + * Changes a resources's HTTP header properties. If only one HTTP header is updated, the others will all be erased. + * In order to preserve existing values, they must be passed alongside the header being changed. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.setHttpHeadersWithResponse#PathHttpHeaders-DataLakeRequestConditions-Duration-Context} + * + *For more information, see the + * Azure Docs
+ * + * @param headers {@link PathHttpHeaders} + * @param accessConditions {@link DataLakeRequestConditions} + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context Additional context that is passed through the Http pipeline during the service call. + * @return A response containing status code and HTTP headers. + */ + public ResponseCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.setAccessControl#PathAccessControl} + * + *For more information, see the + * Azure Docs
+ * + * @param accessControl {@link PathAccessControl} + * @return The resource info. + */ + public PathInfo setAccessControl(PathAccessControl accessControl) { + return setAccessControlWithResponse(accessControl, null, null, Context.NONE).getValue(); + } + + /** + * Changes the access control for a resource. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.setAccessControlWithResponse#PathAccessControl-DataLakeRequestConditions-Duration-Context} + * + *For more information, see the + * Azure Docs
+ * + * @param accessControl {@link PathAccessControl} + * @param accessConditions {@link DataLakeRequestConditions} + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context Additional context that is passed through the Http pipeline during the service call. + * @return A response containing the resource info. + */ + public ResponseCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.getAccessControl} + * + *For more information, see the + * Azure Docs
+ * + * @return The resource access control. + */ + public PathAccessControl getAccessControl() { + return getAccessControlWithResponse(false, null, null, Context.NONE).getValue(); + } + + /** + * Returns the access control for a resource. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.getAccessControlWithResponse#boolean-DataLakeRequestConditions-Duration-Context} + * + *For more information, see the + * Azure Docs
+ * + * @param returnUpn When true, user identity values returned as User Principal Names. When false, user identity + * values returned as Azure Active Directory Object IDs. Default value is false. + * @param accessConditions {@link DataLakeRequestConditions} + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context Additional context that is passed through the Http pipeline during the service call. + * @return A response containing the resource access control. + */ + public ResponseCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.getProperties} + * + *For more information, see the + * Azure Docs
+ * + * @return The resource properties and metadata. + */ + public PathProperties getProperties() { + return getPropertiesWithResponse(null, null, Context.NONE).getValue(); + } + + /** + * Returns the resource's metadata and properties. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.getPropertiesWithResponse#DataLakeRequestConditions-Duration-Context} + * + *For more information, see the + * Azure Docs
+ * + * @param accessConditions {@link DataLakeRequestConditions} + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context Additional context that is passed through the Http pipeline during the service call. + * @return A response containing the resource properties and metadata. + */ + public Response+ * The following information must be provided on this builder: + * + *
Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakePathClientBuilder.buildFileClient} + * + * @return a {@link DataLakeFileClient} created from the configurations in this builder. + * @throws NullPointerException If {@code endpoint} or {@code pathName} is {@code null}. + */ + public DataLakeFileClient buildFileClient() { + return new DataLakeFileClient(buildFileAsyncClient(), blobClientBuilder.buildClient().getBlockBlobClient()); + } + + /** + * Creates a {@link DataLakeFileAsyncClient} based on options set in the builder. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakePathClientBuilder.buildFileAsyncClient} + * + * @return a {@link DataLakeFileAsyncClient} created from the configurations in this builder. + * @throws NullPointerException If {@code endpoint} or {@code pathName} is {@code null}. + */ + public DataLakeFileAsyncClient buildFileAsyncClient() { + Objects.requireNonNull(pathName, "'pathName' cannot be null."); + Objects.requireNonNull(endpoint, "'endpoint' cannot be null"); + + /* + Implicit and explicit root container access are functionally equivalent, but explicit references are easier + to read and debug. + */ + String dataLakeFileSystemName = ImplUtils.isNullOrEmpty(fileSystemName) + ? DataLakeFileSystemAsyncClient.ROOT_FILESYSTEM_NAME + : fileSystemName; + + DataLakeServiceVersion serviceVersion = version != null ? version : DataLakeServiceVersion.getLatest(); + + HttpPipeline pipeline = (httpPipeline != null) ? httpPipeline : BuilderHelper.buildPipeline(() -> { + if (storageSharedKeyCredential != null) { + return new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential); + } else if (tokenCredential != null) { + return new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint)); + } else if (sasTokenCredential != null) { + return new SasTokenCredentialPolicy(sasTokenCredential); + } else { + return null; + } + }, retryOptions, logOptions, httpClient, additionalPolicies, configuration, serviceVersion); + + return new DataLakeFileAsyncClient(pipeline, String.format("%s/%s/%s", endpoint, dataLakeFileSystemName, + pathName), serviceVersion, accountName, dataLakeFileSystemName, pathName, + blobClientBuilder.buildAsyncClient().getBlockBlobAsyncClient()); + } + + /** + * Creates a {@link DataLakeDirectoryClient} based on options set in the builder. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakePathClientBuilder.buildDirectoryClient} + * + * @return a {@link DataLakeDirectoryClient} created from the configurations in this builder. + * @throws NullPointerException If {@code endpoint} or {@code pathName} is {@code null}. + */ + public DataLakeDirectoryClient buildDirectoryClient() { + return new DataLakeDirectoryClient(buildDirectoryAsyncClient(), + blobClientBuilder.buildClient().getBlockBlobClient()); + } + + /** + * Creates a {@link DataLakeDirectoryAsyncClient} based on options set in the builder. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakePathClientBuilder.buildDirectoryAsyncClient} + * + * @return a {@link DataLakeDirectoryAsyncClient} created from the configurations in this builder. + * @throws NullPointerException If {@code endpoint} or {@code pathName} is {@code null}. + */ + public DataLakeDirectoryAsyncClient buildDirectoryAsyncClient() { + Objects.requireNonNull(pathName, "'pathName' cannot be null."); + Objects.requireNonNull(endpoint, "'endpoint' cannot be null"); + + /* + Implicit and explicit root container access are functionally equivalent, but explicit references are easier + to read and debug. + */ + String dataLakeFileSystemName = ImplUtils.isNullOrEmpty(fileSystemName) + ? DataLakeFileSystemAsyncClient.ROOT_FILESYSTEM_NAME + : fileSystemName; + + DataLakeServiceVersion serviceVersion = version != null ? version : DataLakeServiceVersion.getLatest(); + + HttpPipeline pipeline = (httpPipeline != null) ? httpPipeline : BuilderHelper.buildPipeline(() -> { + if (storageSharedKeyCredential != null) { + return new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential); + } else if (tokenCredential != null) { + return new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint)); + } else if (sasTokenCredential != null) { + return new SasTokenCredentialPolicy(sasTokenCredential); + } else { + return null; + } + }, retryOptions, logOptions, httpClient, additionalPolicies, configuration, serviceVersion); + + return new DataLakeDirectoryAsyncClient(pipeline, String.format("%s/%s/%s", endpoint, dataLakeFileSystemName, + pathName), serviceVersion, accountName, dataLakeFileSystemName, pathName, + blobClientBuilder.buildAsyncClient().getBlockBlobAsyncClient()); + } + + /** + * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. + * + * @param credential The credential to use for authenticating request. + * @return the updated DataLakePathClientBuilder + * @throws NullPointerException If {@code credential} is {@code null}. + */ + public DataLakePathClientBuilder credential(StorageSharedKeyCredential credential) { + blobClientBuilder.credential(credential); + this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); + this.tokenCredential = null; + this.sasTokenCredential = null; + return this; + } + + /** + * Sets the {@link TokenCredential} used to authorize requests sent to the service. + * + * @param credential The credential to use for authenticating request. + * @return the updated DataLakePathClientBuilder + * @throws NullPointerException If {@code credential} is {@code null}. + */ + public DataLakePathClientBuilder credential(TokenCredential credential) { + blobClientBuilder.credential(credential); + this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); + this.storageSharedKeyCredential = null; + this.sasTokenCredential = null; + return this; + } + + /** + * Sets the SAS token used to authorize requests sent to the service. + * + * @param sasToken The SAS token to use for authenticating requests. + * @return the updated DataLakePathClientBuilder + * @throws NullPointerException If {@code sasToken} is {@code null}. + */ + public DataLakePathClientBuilder sasToken(String sasToken) { + blobClientBuilder.sasToken(sasToken); + this.sasTokenCredential = new SasTokenCredential(Objects.requireNonNull(sasToken, + "'sasToken' cannot be null.")); + this.storageSharedKeyCredential = null; + this.tokenCredential = null; + return this; + } + + /** + * Clears the credential used to authorize the request. + * + *This is for paths that are publicly accessible.
+ * + * @return the updated DataLakePathClientBuilder + */ + public DataLakePathClientBuilder setAnonymousAccess() { + blobClientBuilder.setAnonymousAccess(); + this.storageSharedKeyCredential = null; + this.tokenCredential = null; + this.sasTokenCredential = null; + return this; + } + + /** + * Sets the service endpoint, additionally parses it for information (SAS token, file system name, path name) + * + *If the endpoint is to a file/directory in the root container, this method will fail as it will interpret the + * path name as the file system name. With only one path element, it is impossible to distinguish between a file + * system name and a path in the root file system, so it is assumed to be the file system name as this is much more + * common. When working with paths in the root file system, it is best to set the endpoint to the account url and + * specify the path name separately using the {@link DataLakePathClientBuilder#pathName(String) pathName} method. + *
+ * + * @param endpoint URL of the service + * @return the updated DataLakePathClientBuilder object + * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. + */ + public DataLakePathClientBuilder endpoint(String endpoint) { + blobClientBuilder.endpoint(Transforms.endpointToDesiredEndpoint(endpoint, "blob", "dfs")); + try { + URL url = new URL(endpoint); + BlobUrlParts parts = BlobUrlParts.parse(url); + + this.accountName = parts.getAccountName(); + this.endpoint = parts.getScheme() + "://" + parts.getHost(); + this.fileSystemName = parts.getBlobContainerName(); + this.pathName = parts.getBlobName(); + + String sasToken = parts.getSasQueryParameters().encode(); + if (!ImplUtils.isNullOrEmpty(sasToken)) { + this.sasToken(sasToken); + } + } catch (MalformedURLException ex) { + throw logger.logExceptionAsError( + new IllegalArgumentException("The Azure Storage DataLake endpoint url is malformed.")); + } + return this; + } + + /** + * Sets the name of the file system that contains the path. + * + * @param fileSystemName Name of the file system. If the value {@code null} or empty the root file system, + * {@code $root}, will be used. + * @return the updated DataLakePathClientBuilder object + */ + public DataLakePathClientBuilder fileSystemName(String fileSystemName) { + blobClientBuilder.containerName(fileSystemName); + this.fileSystemName = fileSystemName; + return this; + } + + /** + * Sets the name of the file/directory. + * + * @param pathName Name of the path. + * @return the updated DataLakePathClientBuilder object + * @throws NullPointerException If {@code pathName} is {@code null} + */ + public DataLakePathClientBuilder pathName(String pathName) { + blobClientBuilder.blobName(pathName); + this.pathName = Objects.requireNonNull(pathName, "'pathName' cannot be null."); + return this; + } + + /** + * Sets the {@link HttpClient} to use for sending a receiving requests to and from the service. + * + * @param httpClient HttpClient to use for requests. + * @return the updated DataLakePathClientBuilder object + */ + public DataLakePathClientBuilder httpClient(HttpClient httpClient) { + blobClientBuilder.httpClient(httpClient); + if (this.httpClient != null && httpClient == null) { + logger.info("'httpClient' is being set to 'null' when it was previously configured."); + } + + this.httpClient = httpClient; + return this; + } + + /** + * Adds a pipeline policy to apply on each request sent. + * + * @param pipelinePolicy a pipeline policy + * @return the updated DataLakePathClientBuilder object + * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. + */ + public DataLakePathClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { + blobClientBuilder.addPolicy(pipelinePolicy); + this.additionalPolicies.add(Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null")); + return this; + } + + /** + * Gets the default Storage whitelist log headers and query parameters. + * + * @return the default http log options. + */ + public static HttpLogOptions getDefaultHttpLogOptions() { + return BuilderHelper.getDefaultHttpLogOptions(); + } + + /** + * Sets the {@link HttpLogOptions} for service requests. + * + * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. + * @return the updated DataLakePathClientBuilder object + * @throws NullPointerException If {@code logOptions} is {@code null}. + */ + public DataLakePathClientBuilder httpLogOptions(HttpLogOptions logOptions) { + blobClientBuilder.httpLogOptions(logOptions); + this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); + return this; + } + + /** + * Sets the configuration object used to retrieve environment configuration values during building of the client. + * + * @param configuration Configuration store used to retrieve environment configurations. + * @return the updated DataLakePathClientBuilder object + */ + public DataLakePathClientBuilder configuration(Configuration configuration) { + blobClientBuilder.configuration(configuration); + this.configuration = configuration; + return this; + } + + /** + * Sets the request retry options for all the requests made through the client. + * + * @param retryOptions The options used to configure retry behavior. + * @return the updated DataLakePathClientBuilder object + * @throws NullPointerException If {@code retryOptions} is {@code null}. + */ + public DataLakePathClientBuilder retryOptions(RequestRetryOptions retryOptions) { + blobClientBuilder.retryOptions(retryOptions); + this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); + return this; + } + + /** + * Sets the {@link HttpPipeline} to use for the service client. + * + * If {@code pipeline} is set, all other settings are ignored, aside from {@link #endpoint(String) endpoint}. + * + * @param httpPipeline HttpPipeline to use for sending service requests and receiving responses. + * @return the updated DataLakePathClientBuilder object + */ + public DataLakePathClientBuilder pipeline(HttpPipeline httpPipeline) { + blobClientBuilder.pipeline(httpPipeline); + if (this.httpPipeline != null && httpPipeline == null) { + logger.info("HttpPipeline is being set to 'null' when it was previously configured."); + } + + this.httpPipeline = httpPipeline; + return this; + } + + // TODO (gapra) : Determine how to set blob version as well + /** + * Sets the {@link DataLakeServiceVersion} that is used when making API requests. + *+ * If a service version is not provided, the service version that will be used will be the latest known service + * version based on the version of the client library being used. If no service version is specified, updating to a + * newer version the client library will have the result of potentially moving to a newer service version. + * + * @param version {@link DataLakeServiceVersion} of the service to be used when making requests. + * @return the updated DataLakePathClientBuilder object + */ + public DataLakePathClientBuilder serviceVersion(DataLakeServiceVersion version) { + this.version = version; + return this; + } + +} diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeServiceAsyncClient.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeServiceAsyncClient.java new file mode 100644 index 0000000000000..4f77e1a4ece9d --- /dev/null +++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeServiceAsyncClient.java @@ -0,0 +1,321 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.file.datalake; + +import com.azure.core.annotation.ServiceClient; +import com.azure.core.credential.TokenCredential; +import com.azure.core.http.HttpPipeline; +import com.azure.core.http.rest.PagedFlux; +import com.azure.core.http.rest.Response; +import com.azure.core.http.rest.SimpleResponse; +import com.azure.core.implementation.util.FluxUtil; +import com.azure.core.implementation.util.ImplUtils; +import com.azure.core.util.logging.ClientLogger; +import com.azure.storage.blob.BlobServiceAsyncClient; +import com.azure.storage.common.Utility; +import com.azure.storage.common.implementation.StorageImplUtils; +import com.azure.storage.file.datalake.implementation.DataLakeStorageClientBuilder; +import com.azure.storage.file.datalake.implementation.DataLakeStorageClientImpl; +import com.azure.storage.file.datalake.models.DataLakeRequestConditions; +import com.azure.storage.file.datalake.models.FileSystemItem; +import com.azure.storage.file.datalake.models.ListFileSystemsOptions; +import com.azure.storage.file.datalake.models.PublicAccessType; +import com.azure.storage.file.datalake.models.UserDelegationKey; +import reactor.core.publisher.Mono; + +import java.time.OffsetDateTime; +import java.util.Map; + +import static com.azure.core.implementation.util.FluxUtil.monoError; +import static com.azure.core.implementation.util.FluxUtil.pagedFluxError; + + +/** + * Client to a storage account. It may only be instantiated through a {@link DataLakeServiceClientBuilder}. This class + * does not hold any state about a particular storage account but is instead a convenient way of sending off appropriate + * requests to the resource on the service. It may also be used to construct URLs to file systems, files and + * directories. + * + *
+ * This client contains operations on the main data lake service account. Operations on a file system are available on + * {@link DataLakeFileSystemAsyncClient} through {@link #getFileSystemAsyncClient(String)}, and operations on a file or + * directory are available on {@link DataLakeFileAsyncClient} or {@link DataLakeDirectoryAsyncClient}. + * + *
+ * Note this client is an async client that returns reactive responses from Spring Reactor Core project + * (https://projectreactor.io/). Calling the methods in this client will NOT start the actual network + * operation, until {@code .subscribe()} is called on the reactive response. You can simply convert one of these + * responses to a {@link java.util.concurrent.CompletableFuture} object through {@link Mono#toFuture()}. + */ +@ServiceClient(builder = DataLakeServiceClientBuilder.class, isAsync = true) +public class DataLakeServiceAsyncClient { + private final ClientLogger logger = new ClientLogger(DataLakeServiceAsyncClient.class); + + private final DataLakeStorageClientImpl azureDataLakeStorage; + + private final String accountName; + private final DataLakeServiceVersion serviceVersion; + + private final BlobServiceAsyncClient blobServiceAsyncClient; + + /** + * Package-private constructor for use by {@link DataLakeServiceClientBuilder}. + * + * @param pipeline The pipeline used to send and receive service requests. + * @param url The endpoint where to send service requests. + * @param serviceVersion The version of the service to receive requests. + * @param accountName The storage account name. + * @param blobServiceAsyncClient The underlying {@link BlobServiceAsyncClient} + */ + DataLakeServiceAsyncClient(HttpPipeline pipeline, String url, DataLakeServiceVersion serviceVersion, + String accountName, BlobServiceAsyncClient blobServiceAsyncClient) { + this.azureDataLakeStorage = new DataLakeStorageClientBuilder() + .pipeline(pipeline) + .url(url) + .version(serviceVersion.getVersion()) + .build(); + this.serviceVersion = serviceVersion; + + this.accountName = accountName; + + this.blobServiceAsyncClient = blobServiceAsyncClient; + } + + /** + * Initializes a {@link DataLakeFileSystemAsyncClient} object pointing to the specified file system. This method + * does not create a file system. It simply constructs the URL to the file system and offers access to methods + * relevant to file systems. + * + *
Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceAsyncClient.getFileSystemAsyncClient#String} + * + * @param fileSystemName The name of the file system to point to. A value of null or empty string will be + * interpreted as pointing to the root file system and will be replaced by "$root". + * @return A {@link DataLakeFileSystemAsyncClient} object pointing to the specified file system + */ + public DataLakeFileSystemAsyncClient getFileSystemAsyncClient(String fileSystemName) { + if (ImplUtils.isNullOrEmpty(fileSystemName)) { + fileSystemName = DataLakeFileSystemAsyncClient.ROOT_FILESYSTEM_NAME; + } + return new DataLakeFileSystemAsyncClient(getHttpPipeline(), + StorageImplUtils.appendToUrlPath(getAccountUrl(), Utility.urlEncode(Utility.urlDecode(fileSystemName))) + .toString(), getServiceVersion(), getAccountName(), fileSystemName, + blobServiceAsyncClient.getBlobContainerAsyncClient(fileSystemName) + ); + } + + /** + * Gets the {@link HttpPipeline} powering this client. + * + * @return The pipeline. + */ + public HttpPipeline getHttpPipeline() { + return azureDataLakeStorage.getHttpPipeline(); + } + + /** + * Gets the service version the client is using. + * + * @return the service version the client is using. + */ + public DataLakeServiceVersion getServiceVersion() { + return serviceVersion; + } + + /** + * Creates a new file system within a storage account. If a file system with the same name already exists, the + * operation fails. For more information, see the + * Azure Docs. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceAsyncClient.createFileSystem#String} + * + * @param fileSystemName Name of the file system to create + * @return A {@link Mono} containing a {@link DataLakeFileSystemAsyncClient} used to interact with the file system + * created. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceAsyncClient.createFileSystemWithResponse#String-Map-PublicAccessType} + * + * @param fileSystemName Name of the file system to create + * @param metadata Metadata to associate with the file system + * @param accessType Specifies how the data in this file system is available to the public. See the + * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. + * @return A {@link Mono} containing a {@link Response} whose {@link Response#getValue() value} contains a {@link + * DataLakeFileSystemAsyncClient} used to interact with the file system created. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceAsyncClient.deleteFileSystem#String} + * + * @param fileSystemName Name of the file system to delete + * @return A reactive response signalling completion. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceAsyncClient.deleteFileSystemWithResponse#String-DataLakeRequestConditions} + * + * @param fileSystemName Name of the file system to delete + * @param accessConditions {@link DataLakeRequestConditions} + * @return A {@link Mono} containing containing status code and HTTP headers + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceAsyncClient.listFileSystems} + * + * @return A reactive response emitting the list of file systems. + */ + public PagedFluxCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceAsyncClient.listFileSystems#ListFileSystemsOptions} + * + * @param options A {@link ListFileSystemsOptions} which specifies what data should be returned by the service. + * @return A reactive response emitting the list of file systems. + */ + public PagedFluxCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceAsyncClient.getUserDelegationKey#OffsetDateTime-OffsetDateTime} + * + * @param start Start time for the key's validity. Null indicates immediate start. + * @param expiry Expiration of the key's validity. + * @return A {@link Mono} containing the user delegation key. + * @throws IllegalArgumentException If {@code start} isn't null and is after {@code expiry}. + * @throws NullPointerException If {@code expiry} is null. + */ + public MonoCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceAsyncClient.getUserDelegationKeyWithResponse#OffsetDateTime-OffsetDateTime} + * + * @param start Start time for the key's validity. Null indicates immediate start. + * @param expiry Expiration of the key's validity. + * @return A {@link Mono} containing a {@link Response} whose {@link Response#getValue() value} containing the user + * delegation key. + * @throws IllegalArgumentException If {@code start} isn't null and is after {@code expiry}. + * @throws NullPointerException If {@code expiry} is null. + */ + public Mono+ * This client contains operations on a data lake service account. Operations on a file system are available on + * {@link DataLakeFileSystemClient} through {@link #getFileSystemClient(String)}, and operations on a file or directory + * are available on {@link DataLakeFileClient} and {@link DataLakeDirectoryClient} respectively. + */ +@ServiceClient(builder = DataLakeServiceClientBuilder.class) +public class DataLakeServiceClient { + + private final DataLakeServiceAsyncClient dataLakeServiceAsyncClient; + private final BlobServiceClient blobServiceClient; + + /** + * Package-private constructor for use by {@link DataLakeServiceClientBuilder}. + * + * @param dataLakeServiceAsyncClient the async data lake service client. + * @param blobServiceClient the sync blob service client. + */ + DataLakeServiceClient(DataLakeServiceAsyncClient dataLakeServiceAsyncClient, BlobServiceClient blobServiceClient) { + this.dataLakeServiceAsyncClient = dataLakeServiceAsyncClient; + this.blobServiceClient = blobServiceClient; + } + + /** + * Initializes a {@link DataLakeFileSystemClient} object pointing to the specified file system. This method does + * not create a file system. It simply constructs the URL to the file system and offers access to methods relevant + * to file systems. + * + *
Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceClient.getFileSystemClient#String} + * + * @param fileSystemName The name of the file system to point to. + * @return A {@link DataLakeFileSystemClient} object pointing to the specified file system + */ + public DataLakeFileSystemClient getFileSystemClient(String fileSystemName) { + return new DataLakeFileSystemClient(dataLakeServiceAsyncClient.getFileSystemAsyncClient(fileSystemName), + blobServiceClient.getBlobContainerClient(fileSystemName)); + } + + /** + * Gets the {@link HttpPipeline} powering this client. + * + * @return The pipeline. + */ + public HttpPipeline getHttpPipeline() { + return dataLakeServiceAsyncClient.getHttpPipeline(); + } + + /** + * Gets the service version the client is using. + * + * @return the service version the client is using. + */ + public DataLakeServiceVersion getServiceVersion() { + return this.dataLakeServiceAsyncClient.getServiceVersion(); + } + + /** + * Creates a new file system within a storage account. If a file system with the same name already exists, the + * operation fails. For more information, see the + * Azure Docs. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceClient.createFileSystem#String} + * + * @param fileSystemName Name of the file system to create + * @return The {@link DataLakeFileSystemClient} used to interact with the file system created. + */ + public DataLakeFileSystemClient createFileSystem(String fileSystemName) { + return createFileSystemWithResponse(fileSystemName, null, null, Context.NONE).getValue(); + } + + /** + * Creates a new file system within a storage account. If a file system with the same name already exists, the + * operation fails. For more information, see the + * Azure Docs. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceClient.createFileSystemWithResponse#String-Map-PublicAccessType-Context} + * + * @param fileSystemName Name of the file system to create + * @param metadata Metadata to associate with the file system. + * @param accessType Specifies how the data in this file system is available to the public. See the + * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. + * @param context Additional context that is passed through the Http pipeline during the service call. + * @return A {@link Response} whose {@link Response#getValue() value} contains the {@link DataLakeFileSystemClient} + * used to interact with the file system created. + */ + public ResponseCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceClient.deleteFileSystem#String} + * + * @param fileSystemName Name of the file system to delete + */ + public void deleteFileSystem(String fileSystemName) { + deleteFileSystemWithResponse(fileSystemName, null, Context.NONE).getValue(); + } + + /** + * Deletes the specified file system in the storage account. If the file system doesn't exist the operation fails. + * For more information see the Azure + * Docs. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceClient.deleteFileSystemWithResponse#String-DataLakeRequestConditions-Context} + * + * @param fileSystemName Name of the file system to delete + * @param accessConditions {@link DataLakeRequestConditions} + * @param context Additional context that is passed through the Http pipeline during the service call. + * @return A response containing status code and HTTP headers + */ + public ResponseCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceClient.listFileSystems} + * + * @return The list of file systems. + */ + public PagedIterableCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceClient.listFileSystems#ListFileSystemsOptions-Duration} + * + * @param options A {@link ListFileSystemsOptions} which specifies what data should be returned by the service. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @return The list of file systems. + */ + public PagedIterableCode Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceClient.getUserDelegationKey#OffsetDateTime-OffsetDateTime} + * + * @param start Start time for the key's validity. Null indicates immediate start. + * @param expiry Expiration of the key's validity. + * @return The user delegation key. + */ + public UserDelegationKey getUserDelegationKey(OffsetDateTime start, OffsetDateTime expiry) { + return getUserDelegationKeyWithResponse(start, expiry, null, Context.NONE).getValue(); + } + + /** + * Gets a user delegation key for use with this account's data lake storage. Note: This method call is only valid + * when using {@link TokenCredential} in this object's {@link HttpPipeline}. + * + *Code Samples
+ * + * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceClient.getUserDelegationKeyWithResponse#OffsetDateTime-OffsetDateTime-Duration-Context} + * + * @param start Start time for the key's validity. Null indicates immediate start. + * @param expiry Expiration of the key's validity. + * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. + * @param context Additional context that is passed through the Http pipeline during the service call. + * @return A {@link Response} whose {@link Response#getValue() value} contains the user delegation key. + */ + public Response+ * The following information must be provided on this builder: + * + *
+ * If a service version is not provided, the service version that will be used will be the latest known service
+ * version based on the version of the client library being used. If no service version is specified, updating to a
+ * newer version the client library will have the result of potentially moving to a newer service version.
+ *
+ * @param version {@link DataLakeServiceVersion} of the service to be used when making requests.
+ * @return the updated DataLakeServiceClientBuilder object
+ */
+ public DataLakeServiceClientBuilder serviceVersion(DataLakeServiceVersion version) {
+ this.version = version;
+ return this;
+ }
+
+}
diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeServiceVersion.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeServiceVersion.java
new file mode 100644
index 0000000000000..771f8ce5b01da
--- /dev/null
+++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeServiceVersion.java
@@ -0,0 +1,36 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package com.azure.storage.file.datalake;
+
+import com.azure.core.util.ServiceVersion;
+
+/**
+ * The versions of Azure Storage Data Lake supported by this client library.
+ */
+public enum DataLakeServiceVersion implements ServiceVersion {
+ V2019_02_02("2019-02-02");
+
+ private final String version;
+
+ DataLakeServiceVersion(String version) {
+ this.version = version;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public String getVersion() {
+ return this.version;
+ }
+
+ /**
+ * Gets the latest service version supported by this client library
+ *
+ * @return the latest {@link DataLakeServiceVersion}
+ */
+ public static DataLakeServiceVersion getLatest() {
+ return V2019_02_02;
+ }
+}
diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/Transforms.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/Transforms.java
new file mode 100644
index 0000000000000..1a1f034900462
--- /dev/null
+++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/Transforms.java
@@ -0,0 +1,310 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package com.azure.storage.file.datalake;
+
+import com.azure.storage.blob.models.BlobContainerItem;
+import com.azure.storage.blob.models.BlobContainerItemProperties;
+import com.azure.storage.blob.models.BlobContainerListDetails;
+import com.azure.storage.blob.models.BlobContainerProperties;
+import com.azure.storage.blob.models.BlobDownloadAsyncResponse;
+import com.azure.storage.blob.models.BlobDownloadHeaders;
+import com.azure.storage.blob.models.BlobDownloadResponse;
+import com.azure.storage.blob.models.BlobHttpHeaders;
+import com.azure.storage.blob.models.BlobProperties;
+import com.azure.storage.blob.models.BlobRange;
+import com.azure.storage.blob.models.BlobRequestConditions;
+import com.azure.storage.blob.models.ListBlobContainersOptions;
+import com.azure.storage.file.datalake.implementation.models.Path;
+import com.azure.storage.file.datalake.models.AccessTier;
+import com.azure.storage.file.datalake.models.ArchiveStatus;
+import com.azure.storage.file.datalake.models.CopyStatusType;
+import com.azure.storage.file.datalake.models.DataLakeRequestConditions;
+import com.azure.storage.file.datalake.models.FileRange;
+import com.azure.storage.file.datalake.models.FileReadAsyncResponse;
+import com.azure.storage.file.datalake.models.FileReadHeaders;
+import com.azure.storage.file.datalake.models.FileReadResponse;
+import com.azure.storage.file.datalake.models.FileSystemItem;
+import com.azure.storage.file.datalake.models.FileSystemItemProperties;
+import com.azure.storage.file.datalake.models.FileSystemListDetails;
+import com.azure.storage.file.datalake.models.FileSystemProperties;
+import com.azure.storage.file.datalake.models.LeaseDurationType;
+import com.azure.storage.file.datalake.models.LeaseStateType;
+import com.azure.storage.file.datalake.models.LeaseStatusType;
+import com.azure.storage.file.datalake.models.ListFileSystemsOptions;
+import com.azure.storage.file.datalake.models.PathHttpHeaders;
+import com.azure.storage.file.datalake.models.PathItem;
+import com.azure.storage.file.datalake.models.PathProperties;
+import com.azure.storage.file.datalake.models.PublicAccessType;
+import com.azure.storage.file.datalake.models.DownloadRetryOptions;
+import com.azure.storage.file.datalake.models.UserDelegationKey;
+
+import java.time.OffsetDateTime;
+import java.time.format.DateTimeFormatter;
+
+class Transforms {
+
+ static com.azure.storage.blob.models.PublicAccessType toBlobPublicAccessType(PublicAccessType
+ fileSystemPublicAccessType) {
+ if (fileSystemPublicAccessType == null) {
+ return null;
+ }
+ return com.azure.storage.blob.models.PublicAccessType.fromString(fileSystemPublicAccessType.toString());
+ }
+
+ private static LeaseDurationType toDataLakeLeaseDurationType(com.azure.storage.blob.models.LeaseDurationType
+ blobLeaseDurationType) {
+ if (blobLeaseDurationType == null) {
+ return null;
+ }
+ return LeaseDurationType.fromString(blobLeaseDurationType.toString());
+ }
+
+ private static LeaseStateType toDataLakeLeaseStateType(com.azure.storage.blob.models.LeaseStateType
+ blobLeaseStateType) {
+ if (blobLeaseStateType == null) {
+ return null;
+ }
+ return LeaseStateType.fromString(blobLeaseStateType.toString());
+ }
+
+ private static LeaseStatusType toDataLakeLeaseStatusType(com.azure.storage.blob.models.LeaseStatusType
+ blobLeaseStatusType) {
+ if (blobLeaseStatusType == null) {
+ return null;
+ }
+ return LeaseStatusType.fromString(blobLeaseStatusType.toString());
+ }
+
+ private static PublicAccessType toDataLakePublicAccessType(com.azure.storage.blob.models.PublicAccessType
+ blobPublicAccessType) {
+ if (blobPublicAccessType == null) {
+ return null;
+ }
+ return PublicAccessType.fromString(blobPublicAccessType.toString());
+ }
+
+ private static CopyStatusType toDataLakeCopyStatusType(
+ com.azure.storage.blob.models.CopyStatusType blobCopyStatus) {
+ if (blobCopyStatus == null) {
+ return null;
+ }
+ return CopyStatusType.fromString(blobCopyStatus.toString());
+ }
+
+ private static ArchiveStatus toDataLakeArchiveStatus(
+ com.azure.storage.blob.models.ArchiveStatus blobArchiveStatus) {
+ if (blobArchiveStatus == null) {
+ return null;
+ }
+ return ArchiveStatus.fromString(blobArchiveStatus.toString());
+ }
+
+ private static AccessTier toDataLakeAccessTier(com.azure.storage.blob.models.AccessTier blobAccessTier) {
+ if (blobAccessTier == null) {
+ return null;
+ }
+ return AccessTier.fromString(blobAccessTier.toString());
+ }
+
+ static FileSystemProperties toFileSystemProperties(BlobContainerProperties blobContainerProperties) {
+ if (blobContainerProperties == null) {
+ return null;
+ }
+ return new FileSystemProperties(blobContainerProperties.getMetadata(), blobContainerProperties.getETag(),
+ blobContainerProperties.getLastModified(),
+ Transforms.toDataLakeLeaseDurationType(blobContainerProperties.getLeaseDuration()),
+ Transforms.toDataLakeLeaseStateType(blobContainerProperties.getLeaseState()),
+ Transforms.toDataLakeLeaseStatusType(blobContainerProperties.getLeaseStatus()),
+ Transforms.toDataLakePublicAccessType(blobContainerProperties.getBlobPublicAccess()),
+ blobContainerProperties.hasImmutabilityPolicy(), blobContainerProperties.hasLegalHold());
+ }
+
+ private static BlobContainerListDetails toBlobContainerListDetails(FileSystemListDetails fileSystemListDetails) {
+ return new BlobContainerListDetails()
+ .setRetrieveMetadata(fileSystemListDetails.getRetrieveMetadata());
+ }
+
+ static ListBlobContainersOptions toListBlobContainersOptions(ListFileSystemsOptions listFileSystemsOptions) {
+ return new ListBlobContainersOptions()
+ .setDetails(toBlobContainerListDetails(listFileSystemsOptions.getDetails()))
+ .setMaxResultsPerPage(listFileSystemsOptions.getMaxResultsPerPage())
+ .setPrefix(listFileSystemsOptions.getPrefix());
+ }
+
+ static UserDelegationKey toDataLakeUserDelegationKey(com.azure.storage.blob.models.UserDelegationKey
+ blobUserDelegationKey) {
+ if (blobUserDelegationKey == null) {
+ return null;
+ }
+ return new UserDelegationKey()
+ .setSignedExpiry(blobUserDelegationKey.getSignedExpiry())
+ .setSignedObjectId(blobUserDelegationKey.getSignedObjectId())
+ .setSignedTenantId(blobUserDelegationKey.getSignedTenantId())
+ .setSignedService(blobUserDelegationKey.getSignedService())
+ .setSignedStart(blobUserDelegationKey.getSignedStart())
+ .setSignedVersion(blobUserDelegationKey.getSignedVersion())
+ .setValue(blobUserDelegationKey.getValue());
+ }
+
+ static String endpointToDesiredEndpoint(String endpoint, String desiredEndpoint, String currentEndpoint) {
+ String desiredRegex = "." + desiredEndpoint + ".";
+ String currentRegex = "." + currentEndpoint + ".";
+ if (endpoint.contains(desiredRegex)) {
+ return endpoint;
+ } else {
+ return endpoint.replaceFirst(currentRegex, desiredRegex);
+ }
+ }
+
+ static BlobHttpHeaders toBlobHttpHeaders(PathHttpHeaders pathHTTPHeaders) {
+ if (pathHTTPHeaders == null) {
+ return null;
+ }
+ return new BlobHttpHeaders()
+ .setCacheControl(pathHTTPHeaders.getCacheControl())
+ .setContentDisposition(pathHTTPHeaders.getContentDisposition())
+ .setContentEncoding(pathHTTPHeaders.getContentEncoding())
+ .setContentLanguage(pathHTTPHeaders.getContentLanguage())
+ .setContentType(pathHTTPHeaders.getContentType())
+ .setContentMd5(pathHTTPHeaders.getContentMd5());
+ }
+
+ static BlobRange toBlobRange(FileRange fileRange) {
+ if (fileRange == null) {
+ return null;
+ }
+ return new BlobRange(fileRange.getOffset(), fileRange.getCount());
+ }
+
+ static com.azure.storage.blob.models.DownloadRetryOptions toBlobDownloadRetryOptions(
+ DownloadRetryOptions dataLakeOptions) {
+ if (dataLakeOptions == null) {
+ return null;
+ }
+ return new com.azure.storage.blob.models.DownloadRetryOptions()
+ .setMaxRetryRequests(dataLakeOptions.getMaxRetryRequests());
+ }
+
+ static PathProperties toPathProperties(BlobProperties properties) {
+ if (properties == null) {
+ return null;
+ } else {
+ return new PathProperties(properties.getCreationTime(), properties.getLastModified(), properties.getETag(),
+ properties.getBlobSize(), properties.getContentType(), properties.getContentMd5(),
+ properties.getContentEncoding(), properties.getContentDisposition(), properties.getContentLanguage(),
+ properties.getCacheControl(), Transforms.toDataLakeLeaseStatusType(properties.getLeaseStatus()),
+ Transforms.toDataLakeLeaseStateType(properties.getLeaseState()),
+ Transforms.toDataLakeLeaseDurationType(properties.getLeaseDuration()), properties.getCopyId(),
+ Transforms.toDataLakeCopyStatusType(properties.getCopyStatus()), properties.getCopySource(),
+ properties.getCopyProgress(), properties.getCopyCompletionTime(), properties.getCopyStatusDescription(),
+ properties.isServerEncrypted(), properties.isIncrementalCopy(),
+ Transforms.toDataLakeAccessTier(properties.getAccessTier()),
+ Transforms.toDataLakeArchiveStatus(properties.getArchiveStatus()), properties.getEncryptionKeySha256(),
+ properties.getAccessTierChangeTime(), properties.getMetadata());
+ }
+ }
+
+
+ static FileSystemItem toFileSystemItem(BlobContainerItem blobContainerItem) {
+ if (blobContainerItem == null) {
+ return null;
+ }
+ return new FileSystemItem()
+ .setName(blobContainerItem.getName())
+ .setMetadata(blobContainerItem.getMetadata())
+ .setProperties(Transforms.toFileSystemItemProperties(blobContainerItem.getProperties()));
+ }
+
+ private static FileSystemItemProperties toFileSystemItemProperties(
+ BlobContainerItemProperties blobContainerItemProperties) {
+ if (blobContainerItemProperties == null) {
+ return null;
+ }
+ return new FileSystemItemProperties()
+ .setETag(blobContainerItemProperties.getETag())
+ .setLastModified(blobContainerItemProperties.getLastModified())
+ .setLeaseStatus(toDataLakeLeaseStatusType(blobContainerItemProperties.getLeaseStatus()))
+ .setLeaseState(toDataLakeLeaseStateType(blobContainerItemProperties.getLeaseState()))
+ .setLeaseDuration(toDataLakeLeaseDurationType(blobContainerItemProperties.getLeaseDuration()))
+ .setPublicAccess(toDataLakePublicAccessType(blobContainerItemProperties.getPublicAccess()))
+ .setHasLegalHold(blobContainerItemProperties.isHasLegalHold())
+ .setHasImmutabilityPolicy(blobContainerItemProperties.isHasImmutabilityPolicy());
+ }
+
+ static PathItem toPathItem(Path path) {
+ if (path == null) {
+ return null;
+ }
+ return new PathItem(path.getETag(),
+ OffsetDateTime.parse(path.getLastModified(), DateTimeFormatter.RFC_1123_DATE_TIME),
+ path.getContentLength(), path.getGroup(), path.isDirectory() == null ? false : path.isDirectory(),
+ path.getName(), path.getOwner(), path.getPermissions());
+ }
+
+ static BlobRequestConditions toBlobRequestConditions(DataLakeRequestConditions accessConditions) {
+ if (accessConditions == null) {
+ return null;
+ }
+ return new BlobRequestConditions()
+ .setLeaseId(accessConditions.getLeaseId())
+ .setIfUnmodifiedSince(accessConditions.getIfUnmodifiedSince())
+ .setIfNoneMatch(accessConditions.getIfNoneMatch())
+ .setIfMatch(accessConditions.getIfMatch())
+ .setIfModifiedSince(accessConditions.getIfModifiedSince());
+
+ }
+
+ static FileReadResponse toFileReadResponse(BlobDownloadResponse r) {
+ if (r == null) {
+ return null;
+ }
+ return new FileReadResponse(Transforms.toFileReadAsyncResponse(new BlobDownloadAsyncResponse(r.getRequest(),
+ r.getStatusCode(), r.getHeaders(), null, r.getDeserializedHeaders())));
+ }
+
+ static FileReadAsyncResponse toFileReadAsyncResponse(BlobDownloadAsyncResponse r) {
+ if (r == null) {
+ return null;
+ }
+ return new FileReadAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), r.getValue(),
+ Transforms.toPathReadHeaders(r.getDeserializedHeaders()));
+ }
+
+ private static FileReadHeaders toPathReadHeaders(BlobDownloadHeaders h) {
+ if (h == null) {
+ return null;
+ }
+ return new FileReadHeaders()
+ .setLastModified(h.getLastModified())
+ .setMetadata(h.getMetadata())
+ .setContentLength(h.getContentLength())
+ .setContentType(h.getContentType())
+ .setContentRange(h.getContentRange())
+ .setETag(h.getETag())
+ .setContentMd5(h.getContentMd5())
+ .setContentEncoding(h.getContentEncoding())
+ .setCacheControl(h.getCacheControl())
+ .setContentDisposition(h.getContentDisposition())
+ .setContentLanguage(h.getContentLanguage())
+ .setCopyCompletionTime(h.getCopyCompletionTime())
+ .setCopyStatusDescription(h.getCopyStatusDescription())
+ .setCopyId(h.getCopyId())
+ .setCopyProgress(h.getCopyProgress())
+ .setCopySource(h.getCopySource())
+ .setCopyStatus(Transforms.toDataLakeCopyStatusType(h.getCopyStatus()))
+ .setLeaseDuration(Transforms.toDataLakeLeaseDurationType(h.getLeaseDuration()))
+ .setLeaseState(Transforms.toDataLakeLeaseStateType(h.getLeaseState()))
+ .setLeaseStatus(Transforms.toDataLakeLeaseStatusType(h.getLeaseStatus()))
+ .setClientRequestId(h.getClientRequestId())
+ .setRequestId(h.getRequestId())
+ .setVersion(h.getVersion())
+ .setAcceptRanges(h.getAcceptRanges())
+ .setDateProperty(h.getDateProperty())
+ .setIsServerEncrypted(h.isServerEncrypted())
+ .setEncryptionKeySha256(h.getEncryptionKeySha256())
+ .setFileContentMD5(h.getBlobContentMD5())
+ .setContentCrc64(h.getContentCrc64())
+ .setErrorCode(h.getErrorCode());
+ }
+}
diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/implementation/DataLakeStorageClientBuilder.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/implementation/DataLakeStorageClientBuilder.java
index fc32992df5e9d..c4c8eb7ca3462 100644
--- a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/implementation/DataLakeStorageClientBuilder.java
+++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/implementation/DataLakeStorageClientBuilder.java
@@ -14,50 +14,82 @@
@ServiceClientBuilder(serviceClients = DataLakeStorageClientImpl.class)
public final class DataLakeStorageClientBuilder {
/*
- * Specifies the version of the REST protocol used for processing the request. This is required when using shared key authorization.
+ * The URL of the service account, container, or blob that is the targe of the desired operation.
*/
- private String xMsVersion;
+ private String url;
/**
- * Sets Specifies the version of the REST protocol used for processing the request. This is required when using shared key authorization.
+ * Sets The URL of the service account, container, or blob that is the targe of the desired operation.
*
- * @param xMsVersion the xMsVersion value.
+ * @param url the url value.
* @return the DataLakeStorageClientBuilder.
*/
- public DataLakeStorageClientBuilder xMsVersion(String xMsVersion) {
- this.xMsVersion = xMsVersion;
+ public DataLakeStorageClientBuilder url(String url) {
+ this.url = url;
return this;
}
/*
- * The Azure Storage account name.
+ * The value must be "filesystem" for all filesystem operations.
*/
- private String accountName;
+ private String resource;
/**
- * Sets The Azure Storage account name.
+ * Sets The value must be "filesystem" for all filesystem operations.
*
- * @param accountName the accountName value.
+ * @param resource the resource value.
* @return the DataLakeStorageClientBuilder.
*/
- public DataLakeStorageClientBuilder accountName(String accountName) {
- this.accountName = accountName;
+ public DataLakeStorageClientBuilder resource(String resource) {
+ this.resource = resource;
return this;
}
/*
- * The DNS suffix for the Azure Data Lake Storage endpoint.
+ * Specifies the version of the operation to use for this request.
*/
- private String dnsSuffix;
+ private String version;
/**
- * Sets The DNS suffix for the Azure Data Lake Storage endpoint.
+ * Sets Specifies the version of the operation to use for this request.
*
- * @param dnsSuffix the dnsSuffix value.
+ * @param version the version value.
* @return the DataLakeStorageClientBuilder.
*/
- public DataLakeStorageClientBuilder dnsSuffix(String dnsSuffix) {
- this.dnsSuffix = dnsSuffix;
+ public DataLakeStorageClientBuilder version(String version) {
+ this.version = version;
+ return this;
+ }
+
+ /*
+ * The filesystem identifier.
+ */
+ private String fileSystem;
+
+ /**
+ * Sets The filesystem identifier.
+ *
+ * @param fileSystem the fileSystem value.
+ * @return the DataLakeStorageClientBuilder.
+ */
+ public DataLakeStorageClientBuilder fileSystem(String fileSystem) {
+ this.fileSystem = fileSystem;
+ return this;
+ }
+
+ /*
+ * The file or directory path.
+ */
+ private String path1;
+
+ /**
+ * Sets The file or directory path.
+ *
+ * @param path1 the path1 value.
+ * @return the DataLakeStorageClientBuilder.
+ */
+ public DataLakeStorageClientBuilder path1(String path1) {
+ this.path1 = path1;
return this;
}
@@ -87,16 +119,24 @@ public DataLakeStorageClientImpl build() {
this.pipeline = RestProxy.createDefaultPipeline();
}
DataLakeStorageClientImpl client = new DataLakeStorageClientImpl(pipeline);
- if (this.xMsVersion != null) {
- client.setXMsVersion(this.xMsVersion);
+ if (this.url != null) {
+ client.setUrl(this.url);
}
- if (this.accountName != null) {
- client.setAccountName(this.accountName);
+ if (this.resource != null) {
+ client.setResource(this.resource);
+ } else {
+ client.setResource("filesystem");
}
- if (this.dnsSuffix != null) {
- client.setDnsSuffix(this.dnsSuffix);
+ if (this.version != null) {
+ client.setVersion(this.version);
} else {
- client.setDnsSuffix("dfs.core.windows.net");
+ client.setVersion("2019-02-02");
+ }
+ if (this.fileSystem != null) {
+ client.setFileSystem(this.fileSystem);
+ }
+ if (this.path1 != null) {
+ client.setPath1(this.path1);
}
return client;
}
diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/implementation/DataLakeStorageClientImpl.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/implementation/DataLakeStorageClientImpl.java
index 928696c461eae..9174ea203063d 100644
--- a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/implementation/DataLakeStorageClientImpl.java
+++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/implementation/DataLakeStorageClientImpl.java
@@ -12,74 +12,122 @@
*/
public final class DataLakeStorageClientImpl {
/**
- * Specifies the version of the REST protocol used for processing the request. This is required when using shared key authorization.
+ * The URL of the service account, container, or blob that is the targe of the desired operation.
*/
- private String xMsVersion;
+ private String url;
/**
- * Gets Specifies the version of the REST protocol used for processing the request. This is required when using shared key authorization.
+ * Gets The URL of the service account, container, or blob that is the targe of the desired operation.
*
- * @return the xMsVersion value.
+ * @return the url value.
*/
- public String getXMsVersion() {
- return this.xMsVersion;
+ public String getUrl() {
+ return this.url;
}
/**
- * Sets Specifies the version of the REST protocol used for processing the request. This is required when using shared key authorization.
+ * Sets The URL of the service account, container, or blob that is the targe of the desired operation.
*
- * @param xMsVersion the xMsVersion value.
+ * @param url the url value.
*/
- DataLakeStorageClientImpl setXMsVersion(String xMsVersion) {
- this.xMsVersion = xMsVersion;
+ DataLakeStorageClientImpl setUrl(String url) {
+ this.url = url;
return this;
}
/**
- * The Azure Storage account name.
+ * The value must be "filesystem" for all filesystem operations.
*/
- private String accountName;
+ private String resource;
/**
- * Gets The Azure Storage account name.
+ * Gets The value must be "filesystem" for all filesystem operations.
*
- * @return the accountName value.
+ * @return the resource value.
*/
- public String getAccountName() {
- return this.accountName;
+ public String getResource() {
+ return this.resource;
}
/**
- * Sets The Azure Storage account name.
+ * Sets The value must be "filesystem" for all filesystem operations.
*
- * @param accountName the accountName value.
+ * @param resource the resource value.
*/
- DataLakeStorageClientImpl setAccountName(String accountName) {
- this.accountName = accountName;
+ DataLakeStorageClientImpl setResource(String resource) {
+ this.resource = resource;
return this;
}
/**
- * The DNS suffix for the Azure Data Lake Storage endpoint.
+ * Specifies the version of the operation to use for this request.
*/
- private String dnsSuffix;
+ private String version;
/**
- * Gets The DNS suffix for the Azure Data Lake Storage endpoint.
+ * Gets Specifies the version of the operation to use for this request.
*
- * @return the dnsSuffix value.
+ * @return the version value.
*/
- public String getDnsSuffix() {
- return this.dnsSuffix;
+ public String getVersion() {
+ return this.version;
}
/**
- * Sets The DNS suffix for the Azure Data Lake Storage endpoint.
+ * Sets Specifies the version of the operation to use for this request.
*
- * @param dnsSuffix the dnsSuffix value.
+ * @param version the version value.
*/
- DataLakeStorageClientImpl setDnsSuffix(String dnsSuffix) {
- this.dnsSuffix = dnsSuffix;
+ DataLakeStorageClientImpl setVersion(String version) {
+ this.version = version;
+ return this;
+ }
+
+ /**
+ * The filesystem identifier.
+ */
+ private String fileSystem;
+
+ /**
+ * Gets The filesystem identifier.
+ *
+ * @return the fileSystem value.
+ */
+ public String getFileSystem() {
+ return this.fileSystem;
+ }
+
+ /**
+ * Sets The filesystem identifier.
+ *
+ * @param fileSystem the fileSystem value.
+ */
+ DataLakeStorageClientImpl setFileSystem(String fileSystem) {
+ this.fileSystem = fileSystem;
+ return this;
+ }
+
+ /**
+ * The file or directory path.
+ */
+ private String path1;
+
+ /**
+ * Gets The file or directory path.
+ *
+ * @return the path1 value.
+ */
+ public String getPath1() {
+ return this.path1;
+ }
+
+ /**
+ * Sets The file or directory path.
+ *
+ * @param path1 the path1 value.
+ */
+ DataLakeStorageClientImpl setPath1(String path1) {
+ this.path1 = path1;
return this;
}
@@ -98,17 +146,31 @@ public HttpPipeline getHttpPipeline() {
}
/**
- * The FilesystemsImpl object to access its operations.
+ * The ServicesImpl object to access its operations.
+ */
+ private ServicesImpl services;
+
+ /**
+ * Gets the ServicesImpl object to access its operations.
+ *
+ * @return the ServicesImpl object.
+ */
+ public ServicesImpl services() {
+ return this.services;
+ }
+
+ /**
+ * The FileSystemsImpl object to access its operations.
*/
- private FilesystemsImpl filesystems;
+ private FileSystemsImpl fileSystems;
/**
- * Gets the FilesystemsImpl object to access its operations.
+ * Gets the FileSystemsImpl object to access its operations.
*
- * @return the FilesystemsImpl object.
+ * @return the FileSystemsImpl object.
*/
- public FilesystemsImpl filesystems() {
- return this.filesystems;
+ public FileSystemsImpl fileSystems() {
+ return this.fileSystems;
}
/**
@@ -139,7 +201,8 @@ public DataLakeStorageClientImpl() {
*/
public DataLakeStorageClientImpl(HttpPipeline httpPipeline) {
this.httpPipeline = httpPipeline;
- this.filesystems = new FilesystemsImpl(this);
+ this.services = new ServicesImpl(this);
+ this.fileSystems = new FileSystemsImpl(this);
this.paths = new PathsImpl(this);
}
}
diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/implementation/FileSystemsImpl.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/implementation/FileSystemsImpl.java
new file mode 100644
index 0000000000000..6ddedc950fa70
--- /dev/null
+++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/implementation/FileSystemsImpl.java
@@ -0,0 +1,283 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+// Code generated by Microsoft (R) AutoRest Code Generator.
+
+package com.azure.storage.file.datalake.implementation;
+
+import com.azure.core.annotation.Delete;
+import com.azure.core.annotation.ExpectedResponses;
+import com.azure.core.annotation.Get;
+import com.azure.core.annotation.Head;
+import com.azure.core.annotation.HeaderParam;
+import com.azure.core.annotation.Host;
+import com.azure.core.annotation.HostParam;
+import com.azure.core.annotation.Patch;
+import com.azure.core.annotation.PathParam;
+import com.azure.core.annotation.Put;
+import com.azure.core.annotation.QueryParam;
+import com.azure.core.annotation.ReturnType;
+import com.azure.core.annotation.ServiceInterface;
+import com.azure.core.annotation.ServiceMethod;
+import com.azure.core.annotation.UnexpectedResponseExceptionType;
+import com.azure.core.implementation.DateTimeRfc1123;
+import com.azure.core.implementation.RestProxy;
+import com.azure.core.util.Context;
+import com.azure.storage.file.datalake.implementation.models.FileSystemsCreateResponse;
+import com.azure.storage.file.datalake.implementation.models.FileSystemsDeleteResponse;
+import com.azure.storage.file.datalake.implementation.models.FileSystemsGetPropertiesResponse;
+import com.azure.storage.file.datalake.implementation.models.FileSystemsListPathsResponse;
+import com.azure.storage.file.datalake.implementation.models.FileSystemsSetPropertiesResponse;
+import com.azure.storage.file.datalake.implementation.models.ModifiedAccessConditions;
+import com.azure.storage.file.datalake.implementation.models.StorageErrorException;
+import java.time.OffsetDateTime;
+import reactor.core.publisher.Mono;
+
+/**
+ * An instance of this class provides access to all the operations defined in
+ * FileSystems.
+ */
+public final class FileSystemsImpl {
+ /**
+ * The proxy service used to perform REST calls.
+ */
+ private FileSystemsService service;
+
+ /**
+ * The service client containing this operation class.
+ */
+ private DataLakeStorageClientImpl client;
+
+ /**
+ * Initializes an instance of FileSystemsImpl.
+ *
+ * @param client the instance of the service client containing this operation class.
+ */
+ public FileSystemsImpl(DataLakeStorageClientImpl client) {
+ this.service = RestProxy.create(FileSystemsService.class, client.getHttpPipeline());
+ this.client = client;
+ }
+
+ /**
+ * The interface defining all the services for
+ * DataLakeStorageClientFileSystems to be used by the proxy service to
+ * perform REST calls.
+ */
+ @Host("{url}")
+ @ServiceInterface(name = "DataLakeStorageClientFileSystems")
+ private interface FileSystemsService {
+ @Put("{filesystem}")
+ @ExpectedResponses({201})
+ @UnexpectedResponseExceptionType(StorageErrorException.class)
+ Mono