From 24de93bee8175d23b522a6f2ce8b4d915b30d312 Mon Sep 17 00:00:00 2001 From: begoldsm Date: Tue, 11 Oct 2016 16:17:33 -0700 Subject: [PATCH 1/3] Remove filesystem and uploader, rev gulp ADLS data plane stuff is now built separately and is being removed from this repo. In the interest of reducing the change size, I am not regenerating all clients in this change. I will do that in a separate PR. --- azure-mgmt-datalake-store-uploader/pom.xml | 147 -- .../uploader/AggregateUploadException.java | 55 - .../DataLakeStoreFrontEndAdapterImpl.java | 143 -- .../store/uploader/DataLakeStoreUploader.java | 379 --- .../store/uploader/FrontEndAdapter.java | 81 - .../uploader/InvalidMetadataException.java | 31 - .../uploader/MultipleSegmentUploader.java | 228 -- .../store/uploader/SegmentUploadStatus.java | 31 - .../store/uploader/SingleSegmentUploader.java | 312 --- .../store/uploader/StringExtensions.java | 156 -- .../store/uploader/UploadFailedException.java | 19 - .../store/uploader/UploadMetadata.java | 444 ---- .../uploader/UploadMetadataGenerator.java | 252 -- .../store/uploader/UploadParameters.java | 371 --- .../store/uploader/UploadSegmentMetadata.java | 261 -- .../datalake/store/uploader/package-info.java | 9 - .../uploader/DataLakeUploaderTestBase.java | 55 - .../store/uploader/DataLakeUploaderTests.java | 366 --- .../store/uploader/InMemoryFrontEnd.java | 276 -- .../store/uploader/IntentionalException.java | 13 - .../store/uploader/MsuMockFrontEnd.java | 77 - .../MultipleSegmentUploaderTests.java | 305 --- .../uploader/PerformanceUploadTests.java | 167 -- .../uploader/SingleSegmentUploaderTests.java | 296 --- .../store/uploader/SsuMockFrontEnd.java | 98 - .../store/uploader/StringExtensionsTests.java | 299 --- .../datalake/store/uploader/TestHelpers.java | 75 - .../UploadMetadataGeneratorTests.java | 183 -- .../uploader/UploadSegmentMetadataTests.java | 176 -- .../store/uploader/UploaderFrontEndMock.java | 74 - .../management/datalake/store/Accounts.java | 400 ++- ...taLakeStoreFileSystemManagementClient.java | 111 - .../datalake/store/FileSystems.java | 905 ------- .../store/implementation/AccountsImpl.java | 711 ++++-- ...keStoreFileSystemManagementClientImpl.java | 203 -- .../store/implementation/FileSystemsImpl.java | 2238 ----------------- .../store/implementation/package-info.java | 4 +- .../datalake/store/models/AclStatus.java | 117 - .../store/models/AclStatusResult.java | 43 - .../models/AdlsAccessControlException.java | 22 - .../store/models/AdlsBadOffsetException.java | 22 - .../datalake/store/models/AdlsError.java | 32 - .../store/models/AdlsErrorException.java | 87 - .../AdlsFileAlreadyExistsException.java | 21 - .../models/AdlsFileNotFoundException.java | 21 - .../store/models/AdlsIOException.java | 21 - .../models/AdlsIllegalArgumentException.java | 21 - .../store/models/AdlsRemoteException.java | 66 - .../store/models/AdlsRuntimeException.java | 22 - .../store/models/AdlsSecurityException.java | 21 - .../AdlsUnsupportedOperationException.java | 21 - .../datalake/store/models/ContentSummary.java | 77 - .../store/models/ContentSummaryResult.java | 32 - .../store/models/DataLakeStoreAccount.java | 25 + .../DataLakeStoreAccountProperties.java | 67 + .../store/models/EncryptionConfig.java | 69 + .../store/models/EncryptionConfigType.java | 53 + .../store/models/EncryptionIdentity.java | 74 + ...eType.java => EncryptionIdentityType.java} | 22 +- .../models/EncryptionProvisioningState.java | 53 + .../{FileType.java => EncryptionState.java} | 26 +- .../store/models/FileOperationResult.java | 32 - .../store/models/FileStatusProperties.java | 168 -- .../store/models/FileStatusResult.java | 32 - .../datalake/store/models/FileStatuses.java | 33 - .../store/models/FileStatusesResult.java | 32 - .../store/models/KeyVaultMetaInfo.java | 92 + .../datalake/store/models/package-info.java | 4 +- .../datalake/store/package-info.java | 4 +- gulpfile.js | 8 +- 70 files changed, 1279 insertions(+), 10112 deletions(-) delete mode 100644 azure-mgmt-datalake-store-uploader/pom.xml delete mode 100644 azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/AggregateUploadException.java delete mode 100644 azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/DataLakeStoreFrontEndAdapterImpl.java delete mode 100644 azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/DataLakeStoreUploader.java delete mode 100644 azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/FrontEndAdapter.java delete mode 100644 azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/InvalidMetadataException.java delete mode 100644 azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/MultipleSegmentUploader.java delete mode 100644 azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/SegmentUploadStatus.java delete mode 100644 azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/SingleSegmentUploader.java delete mode 100644 azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/StringExtensions.java delete mode 100644 azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/UploadFailedException.java delete mode 100644 azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/UploadMetadata.java delete mode 100644 azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/UploadMetadataGenerator.java delete mode 100644 azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/UploadParameters.java delete mode 100644 azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/UploadSegmentMetadata.java delete mode 100644 azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/package-info.java delete mode 100644 azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/DataLakeUploaderTestBase.java delete mode 100644 azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/DataLakeUploaderTests.java delete mode 100644 azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/InMemoryFrontEnd.java delete mode 100644 azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/IntentionalException.java delete mode 100644 azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/MsuMockFrontEnd.java delete mode 100644 azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/MultipleSegmentUploaderTests.java delete mode 100644 azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/PerformanceUploadTests.java delete mode 100644 azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/SingleSegmentUploaderTests.java delete mode 100644 azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/SsuMockFrontEnd.java delete mode 100644 azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/StringExtensionsTests.java delete mode 100644 azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/TestHelpers.java delete mode 100644 azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/UploadMetadataGeneratorTests.java delete mode 100644 azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/UploadSegmentMetadataTests.java delete mode 100644 azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/UploaderFrontEndMock.java delete mode 100644 azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/DataLakeStoreFileSystemManagementClient.java delete mode 100644 azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/FileSystems.java delete mode 100644 azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/implementation/DataLakeStoreFileSystemManagementClientImpl.java delete mode 100644 azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/implementation/FileSystemsImpl.java delete mode 100644 azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AclStatus.java delete mode 100644 azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AclStatusResult.java delete mode 100644 azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsAccessControlException.java delete mode 100644 azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsBadOffsetException.java delete mode 100644 azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsError.java delete mode 100644 azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsErrorException.java delete mode 100644 azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsFileAlreadyExistsException.java delete mode 100644 azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsFileNotFoundException.java delete mode 100644 azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsIOException.java delete mode 100644 azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsIllegalArgumentException.java delete mode 100644 azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsRemoteException.java delete mode 100644 azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsRuntimeException.java delete mode 100644 azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsSecurityException.java delete mode 100644 azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsUnsupportedOperationException.java delete mode 100644 azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/ContentSummary.java delete mode 100644 azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/ContentSummaryResult.java create mode 100644 azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/EncryptionConfig.java create mode 100644 azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/EncryptionConfigType.java create mode 100644 azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/EncryptionIdentity.java rename azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/{AppendModeType.java => EncryptionIdentityType.java} (55%) create mode 100644 azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/EncryptionProvisioningState.java rename azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/{FileType.java => EncryptionState.java} (57%) delete mode 100644 azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/FileOperationResult.java delete mode 100644 azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/FileStatusProperties.java delete mode 100644 azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/FileStatusResult.java delete mode 100644 azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/FileStatuses.java delete mode 100644 azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/FileStatusesResult.java create mode 100644 azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/KeyVaultMetaInfo.java diff --git a/azure-mgmt-datalake-store-uploader/pom.xml b/azure-mgmt-datalake-store-uploader/pom.xml deleted file mode 100644 index c8c31b04b4ba..000000000000 --- a/azure-mgmt-datalake-store-uploader/pom.xml +++ /dev/null @@ -1,147 +0,0 @@ - - - 4.0.0 - - com.microsoft.azure - azure-parent - 1.0.0-beta4-SNAPSHOT - ../pom.xml - - - azure-mgmt-datalake-store-uploader - jar - 1.0.0-SNAPSHOT - - Microsoft Azure SDK for Data Lake Store Data Uploading - This package contains Microsoft Azure Data Lake Store Uploader SDK, which enables rapid ingress of data into Azure Data Lake Storage accounts - https://github.com/Azure/azure-sdk-for-java - - - - The MIT License (MIT) - http://opensource.org/licenses/MIT - repo - - - - - scm:git:https://github.com/Azure/azure-sdk-for-java - scm:git:git@github.com:Azure/azure-sdk-for-java.git - HEAD - - - - UTF-8 - - - - - - microsoft - Microsoft - - - - - - com.microsoft.azure - azure-client-runtime - - - com.microsoft.azure - azure-mgmt-datalake-store - 1.0.0-SNAPSHOT - - - com.microsoft.azure - azure-mgmt-resources - 1.0.0-beta4-SNAPSHOT - test - - - junit - junit - test - - - com.microsoft.azure - azure-client-authentication - test - - - commons-io - commons-io - 2.4 - test - - - com.microsoft.azure - api-annotations - 0.0.1 - - - - - - org.apache.maven.plugins - maven-jar-plugin - - - - true - true - - - - - - org.apache.maven.plugins - maven-compiler-plugin - 3.1 - - 1.7 - 1.7 - - - com.microsoft.azure.management.apigeneration.LangDefinitionProcessor - - true - true - - true - true - - - - - - org.apache.maven.plugins - maven-antrun-plugin - - - - org.codehaus.mojo - build-helper-maven-plugin - - - - org.apache.maven.plugins - maven-javadoc-plugin - 2.8 - - *.implementation.*;*.utils.*;com.microsoft.schemas._2003._10.serialization;*.blob.core.storage - /** -
* Copyright (c) Microsoft Corporation. All rights reserved. -
* Licensed under the MIT License. See License.txt in the project root for -
* license information. -
*/]]>
-
-
- -
-
-
diff --git a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/AggregateUploadException.java b/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/AggregateUploadException.java deleted file mode 100644 index c0630ec3dc1f..000000000000 --- a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/AggregateUploadException.java +++ /dev/null @@ -1,55 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - */ -package com.microsoft.azure.management.datalake.store.uploader; - -import java.util.Arrays; -import java.util.List; - -/** - * A wrapper for the exceptions that can be generated during parallel - * execution of the uploader. - */ -public class AggregateUploadException extends Exception { - - private final List secondaryExceptions; - - /** - * Constructor for the custom aggregate exception thrown by the uploader in the event of failure - * during parallel execution. - * @param message The message to be displayed at the top level of the exception (should be the most relevant message). - * @param primary The primary exception at the top level (should be the most relevant). - * @param others All other exceptions that were also thrown during parallel execution. - */ - public AggregateUploadException(String message, Exception primary, List others) { - super(message, primary); - this.secondaryExceptions = others; - } - - /*** - * Returns all of the exceptions (except the first one, which is used to construct this exception) - * that are associated with this exception. - * @return an Array of {@link Throwable} objects that are associated with this exception - */ - public Throwable[] getAllExceptions() { - - int start = 0; - int size = secondaryExceptions.size(); - final Throwable primary = getCause(); - if (primary != null) { - start = 1; - size++; - } - - Throwable[] all = new Exception[size]; - - if (primary != null) { - all[0] = primary; - } - - Arrays.fill(all, start, all.length, secondaryExceptions); - return all; - } -} diff --git a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/DataLakeStoreFrontEndAdapterImpl.java b/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/DataLakeStoreFrontEndAdapterImpl.java deleted file mode 100644 index 1909ca02a4fd..000000000000 --- a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/DataLakeStoreFrontEndAdapterImpl.java +++ /dev/null @@ -1,143 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - */ -package com.microsoft.azure.management.datalake.store.uploader; - -import com.microsoft.azure.management.datalake.store.models.AdlsErrorException; -import com.microsoft.rest.RestException; -import com.microsoft.azure.management.datalake.store.implementation.DataLakeStoreFileSystemManagementClientImpl; -import com.microsoft.azure.management.datalake.store.models.FileStatusResult; -import org.apache.commons.lang3.StringUtils; - -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.text.MessageFormat; - -/** - * A front end adapter that communicates with the DataLake Store. - * This is a syncrhonous call adapter, which has certain efficiency limitations. - * In the future, new adapters that are created should consider implementing the methods - * asynchronously. - */ -public class DataLakeStoreFrontEndAdapterImpl implements FrontEndAdapter { - - private String accountName; - - private DataLakeStoreFileSystemManagementClientImpl client; - - /** - * Initializes a new instance of the DataLakeStoreFrontEndAdapter adapter. - * - * @param accountName The Data Lake Store account name associated with this adapter - * @param client the {@link DataLakeStoreFileSystemManagementClientImpl} used by this adapter - */ - public DataLakeStoreFrontEndAdapterImpl(String accountName, DataLakeStoreFileSystemManagementClientImpl client) { - this.accountName = accountName; - this.client = client; - } - - /** - * Creates a new, empty stream at the given path. - * - * @param streamPath The relative path to the stream. - * @param overwrite Whether to overwrite an existing stream. - * @param data Optionally pass in data to add to the stream during creation. If null is passed in an empty stream is created - * @param byteCount If data is passed in, indicates how many bytes of the data passed in should be pushed into the stream - * @throws IOException if the file does not exist or is inaccessible. - * @throws RestException if there is a failure communicating with the service. - */ - public void createStream(String streamPath, boolean overwrite, byte[] data, int byteCount) throws RestException, IOException { - byte[] toCreate; - if (data == null) { - toCreate = new byte[0]; - } else { - toCreate = new byte[byteCount]; - System.arraycopy(data, 0, toCreate, 0, byteCount); - } - client.fileSystems().create(accountName, streamPath, toCreate, overwrite); - } - - /** - * Deletes an existing stream at the given path. - * - * @param streamPath The relative path to the stream. - * @param recurse if set to true recursively delete. This is used for folder streams only. - * @throws IOException if the file does not exist or is inaccessible. - * @throws RestException if there is a failure communicating with the service. - */ - public void deleteStream(String streamPath, boolean recurse) throws IOException, RestException { - client.fileSystems().delete(accountName, streamPath, recurse); - } - - /** - * Appends to stream. - * - * @param streamPath The relative path to the stream. - * @param data The data to append to the stream - * @param offset This parameter is unused by this implementation, and any value put here is ignored - * @param byteCount The number of bytes from the data stream to append (starting at offset 0 of data). - * @throws IOException if the file does not exist or is inaccessible. - * @throws RestException if there is a failure communicating with the service. - */ - public void appendToStream(String streamPath, byte[] data, long offset, int byteCount) throws IOException, RestException { - byte[] toAppend = new byte[byteCount]; - System.arraycopy(data, 0, toAppend, 0, byteCount); - client.fileSystems().append(accountName, streamPath, toAppend); - } - - /** - * Determines if the stream with given path exists. - * - * @param streamPath The relative path to the stream. - * @return True if the stream exists, false otherwise. - * @throws IOException if the file does not exist or is inaccessible. - * @throws RestException if there is a failure communicating with the service. - */ - public boolean streamExists(String streamPath) throws RestException, IOException { - try { - client.fileSystems().getFileStatus(accountName, streamPath); - } catch (AdlsErrorException cloudEx) { - if (cloudEx.getResponse().code() == 404) { - return false; - } - - throw cloudEx; - } - - return true; - } - - /** - * Gets a value indicating the length of a stream, in bytes. - * - * @param streamPath The relative path to the stream. - * @return The length of the stream, in bytes. - * @throws IOException if the file does not exist or is inaccessible. - * @throws RestException if there is a failure communicating with the service. - */ - public long getStreamLength(String streamPath) throws IOException, RestException { - FileStatusResult fileInfoResponse = client.fileSystems().getFileStatus(accountName, streamPath).getBody(); - return fileInfoResponse.fileStatus().length(); - } - - /** - * Concatenates the given input streams (in order) into the given target stream. - * At the end of this operation, input streams will be deleted. - * - * @param targetStreamPath The relative path to the target stream. - * @param inputStreamPaths An ordered array of paths to the input streams to concatenate into the target stream. - * @throws IOException if the file does not exist or is inaccessible. - * @throws RestException if there is a failure communicating with the service. - */ - public void concatenate(String targetStreamPath, String[] inputStreamPaths) throws IOException, RestException { - // this is required for the current version of the microsoft concatenate - // TODO: Improve WebHDFS concatenate to take in the list of paths to concatenate - // in the request body. - String paths = MessageFormat.format("sources={0}", StringUtils.join(inputStreamPaths, ',')); - - // For the current implementation, we require UTF8 encoding. - client.fileSystems().msConcat(accountName, targetStreamPath, paths.getBytes(StandardCharsets.UTF_8), true); - } -} diff --git a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/DataLakeStoreUploader.java b/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/DataLakeStoreUploader.java deleted file mode 100644 index d3e413ba63a5..000000000000 --- a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/DataLakeStoreUploader.java +++ /dev/null @@ -1,379 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - */ -package com.microsoft.azure.management.datalake.store.uploader; - -import org.apache.commons.lang3.StringUtils; - -import javax.management.OperationsException; -import java.io.File; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.nio.file.Paths; -import java.text.MessageFormat; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; - -/** - * Represents a general purpose file uploader into DataLake. Supports the efficient upload of large files. - */ -public class DataLakeStoreUploader { - - /** - * The maximum number of parallel threads to allow. - */ - public static final int MAX_ALLOWED_THREADS = 1024; - private FrontEndAdapter frontEnd; - private String metadataFilePath; - - /** - * Creates a new instance of the DataLakeUploader class, by specifying a pointer to the FrontEnd to use for the upload. - * - * @param uploadParameters The upload parameters to use. - * @param frontEnd A pointer to the FrontEnd interface to use for the upload. - * @throws FileNotFoundException if the local file cannot be found or is inaccessible - */ - public DataLakeStoreUploader(UploadParameters uploadParameters, FrontEndAdapter frontEnd) throws FileNotFoundException { - this.parameters = uploadParameters; - this.frontEnd = frontEnd; - - //ensure that input parameters are correct - validateParameters(); - metadataFilePath = getCanonicalMetadataFilePath(); - } - - /** - * Gets the canonical metadata file path. - * - * @return A string representation of the canonical metadata file path. - */ - private String getCanonicalMetadataFilePath() { - return Paths.get(this.getParameters().getLocalMetadataLocation(), MessageFormat.format("{0}.upload.xml", Paths.get(this.getParameters().getInputFilePath()).getFileName())).toString(); - } - - private UploadParameters parameters; - - /** - * Gets the parameters to use for this upload. - * @return the parameters for this upload. - */ - public UploadParameters getParameters() { - return parameters; - } - /** - * Executes the upload as defined by the input parameters. - * - * @throws Exception if there is any failure that occurs during execution. - */ - public void execute() throws Exception { - //load up existing metadata or create a fresh one - UploadMetadata metadata = getMetadata(); - - if (metadata.getSegmentCount() < this.getParameters().getThreadCount()) { - // reducing the thread count to make it equal to the segment count - // if it is larger, since those extra threads will not be used. - this.getParameters().setThreadCount(metadata.getSegmentCount()); - } - - //begin (or resume) uploading the file - uploadFile(metadata); - - //clean up metadata after a successful upload - metadata.deleteFile(); - } - - /** - * Validates the parameters. - * - * @throws FileNotFoundException Could not find input file - * @throws IllegalArgumentException Null or empty account name, stream path should not end with a '/' or the thread count is out of range. - */ - private void validateParameters() throws FileNotFoundException, IllegalArgumentException { - if (!(new File(this.getParameters().getInputFilePath()).exists())) { - throw new FileNotFoundException("Could not find input file: " + this.getParameters().getInputFilePath()); - } - - if (this.getParameters().getTargetStreamPath() == null || StringUtils.isEmpty(this.getParameters().getTargetStreamPath())) { - throw new IllegalArgumentException("Null or empty Target Stream path"); - } - - if (this.getParameters().getTargetStreamPath().endsWith("/")) { - throw new IllegalArgumentException("Invalid TargetStreamPath, a stream path should not end with /"); - } - - if (this.getParameters().getAccountName() == null || StringUtils.isEmpty(this.getParameters().getAccountName())) { - throw new IllegalArgumentException("Null or empty Account Name"); - } - - if (this.getParameters().getThreadCount() < 1 || this.getParameters().getThreadCount() > MAX_ALLOWED_THREADS) { - throw new IllegalArgumentException(MessageFormat.format("ThreadCount must be at least 1 and at most {0}", MAX_ALLOWED_THREADS)); - } - } - - /** - * Gets the metadata. - * - * @return The {@link UploadMetadata} used by this upload. - * @throws IOException - * @throws InvalidMetadataException - * @throws UploadFailedException - */ - private UploadMetadata getMetadata() throws IOException, InvalidMetadataException, UploadFailedException { - UploadMetadataGenerator metadataGenerator = new UploadMetadataGenerator(this.parameters); - if (this.getParameters().isResume()) { - return metadataGenerator.getExistingMetadata(metadataFilePath); - } else { - return metadataGenerator.createNewMetadata(metadataFilePath); - } - } - - /** - * Deletes the metadata file from disk. - */ - public void deleteMetadataFile() { - File toDelete = new File(metadataFilePath); - if (toDelete.exists()) { - toDelete.delete(); - } - } - - /** - * Validates that the metadata is valid for a resume operation, and also updates the internal Segment States to match what the Server looks like. - * If any changes are made, the metadata will be saved to its canonical location. - * - * @param metadata The {@link UploadMetadata} to resume the upload from. - * @throws Exception - */ - private void validateMetadataForResume(UploadMetadata metadata) throws Exception { - validateMetadataMatchesLocalFile(metadata); - - //verify that the target stream does not already exist (in case we don't want to overwrite) - if (!this.getParameters().isOverwrite() && frontEnd.streamExists(metadata.getTargetStreamPath())) { - throw new OperationsException("Target Stream already exists"); - } - - //make sure we don't upload part of the file as binary, while the rest is non-binary (that's just asking for trouble) - if (this.getParameters().isBinary() != metadata.isBinary()) { - throw new OperationsException( - MessageFormat.format( - "Existing metadata was created for a {0}binary file while the current parameters requested a {1}binary upload.", - metadata.isBinary() ? "" : "non-", - this.getParameters().isBinary() ? "" : "non-")); - } - - //see what files(segments) already exist - update metadata accordingly (only for segments that are missing from server; if it's on the server but not in metadata, reupload) - for (UploadSegmentMetadata segment : metadata.getSegments()) { - if (segment.getStatus() == SegmentUploadStatus.Complete) { - int retryCount = 0; - while (retryCount < SingleSegmentUploader.MAX_BUFFER_UPLOAD_ATTEMPT_COUNT) { - retryCount++; - try { - //verify that the stream exists and that the length is as expected - if (!frontEnd.streamExists(segment.getPath())) { - // this segment was marked as completed, but no target stream exists; it needs to be reuploaded - segment.setStatus(SegmentUploadStatus.Pending); - } else { - long remoteLength = frontEnd.getStreamLength(segment.getPath()); - if (remoteLength != segment.getLength()) { - //the target stream has a different length than the input segment, which implies they are inconsistent; it needs to be reuploaded - segment.setStatus(SegmentUploadStatus.Pending); - } - } - - break; - } catch (Exception e) { - if (retryCount >= SingleSegmentUploader.MAX_BUFFER_UPLOAD_ATTEMPT_COUNT) { - throw new UploadFailedException( - MessageFormat.format( - "Cannot validate metadata in order to resume due to the following exception retrieving file information: {0}", - e)); - } - - SingleSegmentUploader.waitForRetry(retryCount, parameters.isUseSegmentBlockBackOffRetryStrategy()); - } - } - } else { - //anything which is not in 'Completed' status needs to be reuploaded - segment.setStatus(SegmentUploadStatus.Pending); - } - } - metadata.save(); - } - - /** - * Verifies that the metadata is valid for a fresh upload. - * - * @param metadata {@link UploadMetadata} to validate for a fresh upload. - * @throws Exception - */ - private void validateMetadataForFreshUpload(UploadMetadata metadata) throws Exception { - validateMetadataMatchesLocalFile(metadata); - - //verify that the target stream does not already exist (in case we don't want to overwrite) - if (!this.getParameters().isOverwrite() && frontEnd.streamExists(metadata.getTargetStreamPath())) { - throw new OperationsException("Target Stream already exists"); - } - } - - /** - * Verifies that the metadata is consistent with the local file information. - * - * @param metadata The {@link UploadMetadata} to check against a serialized copy. - * @throws OperationsException - */ - private void validateMetadataMatchesLocalFile(UploadMetadata metadata) throws OperationsException { - if (!metadata.getTargetStreamPath().trim().equalsIgnoreCase(this.getParameters().getTargetStreamPath().trim())) { - throw new OperationsException("Metadata points to a different target stream than the input parameters"); - } - - //verify that it matches against local file (size, name) - File metadataInputFileInfo = new File(metadata.getInputFilePath()); - File paramInputFileInfo = new File(this.getParameters().getInputFilePath()); - - if (!paramInputFileInfo.toString().toLowerCase().equals(metadataInputFileInfo.toString().toLowerCase())) { - throw new OperationsException("The metadata refers to different file than the one requested"); - } - - if (!metadataInputFileInfo.exists()) { - throw new OperationsException("The metadata refers to a file that does not exist"); - } - - if (metadata.getFileLength() != metadataInputFileInfo.length()) { - throw new OperationsException("The metadata's file information differs from the actual file"); - } - } - - /** - * Uploads the file using the given metadata. - * @param metadata The {@link UploadMetadata} to use to upload the file. - * @throws Exception - */ - private void uploadFile(UploadMetadata metadata) throws Exception { - try { - //TODO: figure out if we need a ServicePointManager equivalent for the connection limit - //match up the metadata with the information on the server - if (this.getParameters().isResume()) { - validateMetadataForResume(metadata); - } else { - validateMetadataForFreshUpload(metadata); - } - - // TODO: figure out if we need a way to track progress. - if (metadata.getSegmentCount() == 0) { - // simply create the target stream, overwriting existing streams if they exist - frontEnd.createStream(metadata.getTargetStreamPath(), true, null, 0); - } else if (metadata.getSegmentCount() > 1) { - //perform the multi-segment upload - MultipleSegmentUploader msu = new MultipleSegmentUploader(metadata, this.getParameters().getThreadCount(), frontEnd); - msu.setUseSegmentBlockBackOffRetryStrategy(this.getParameters().isUseSegmentBlockBackOffRetryStrategy()); - msu.upload(); - - //concatenate the files at the end - concatenateSegments(metadata); - } else { - //optimization if we only have one segment: upload it directly to the target stream - UploadSegmentMetadata[] toUse = metadata.getSegments(); - toUse[0].setPath(metadata.getTargetStreamPath()); - metadata.setSegments(toUse); - SingleSegmentUploader ssu = new SingleSegmentUploader(0, metadata, frontEnd); - ssu.setUseBackOffRetryStrategy(this.getParameters().isUseSegmentBlockBackOffRetryStrategy()); - ssu.upload(); - } - } catch (InterruptedException ex) { - // do nothing since we have already marked everything as failed - } - } - - /** - * Concatenates all the segments defined in the metadata into a single stream. - * - * @param metadata The {@link UploadMetadata} to determine the segments to concatenate - * @throws Exception - */ - private void concatenateSegments(final UploadMetadata metadata) throws Exception { - final String[] inputPaths = new String[metadata.getSegmentCount()]; - - //verify if target stream exists - if (frontEnd.streamExists(metadata.getTargetStreamPath())) { - if (this.getParameters().isOverwrite()) { - frontEnd.deleteStream(metadata.getTargetStreamPath(), false); - } else { - throw new OperationsException("Target Stream already exists"); - } - } - - //ensure all input streams exist and are of the expected length - //ensure all segments in the metadata are marked as 'complete' - final List exceptions = new ArrayList<>(); - ExecutorService exec = Executors.newFixedThreadPool(this.getParameters().getThreadCount()); - for (int i = 0; i < metadata.getSegmentCount(); i++) { - final int finalI = i; - exec.submit(new Runnable() { - @Override - public void run() { - try { - if (metadata.getSegments()[finalI].getStatus() != SegmentUploadStatus.Complete) { - throw new UploadFailedException("Cannot perform 'concatenate' operation because not all streams are fully uploaded."); - } - - String remoteStreamPath = metadata.getSegments()[finalI].getPath(); - int retryCount = 0; - long remoteLength = -1; - - while (retryCount < SingleSegmentUploader.MAX_BUFFER_UPLOAD_ATTEMPT_COUNT) { - retryCount++; - try { - remoteLength = frontEnd.getStreamLength(remoteStreamPath); - break; - } catch (Exception e) { - if (retryCount >= SingleSegmentUploader.MAX_BUFFER_UPLOAD_ATTEMPT_COUNT) { - throw new UploadFailedException( - MessageFormat.format( - "Cannot perform 'concatenate' operation due to the following exception retrieving file information: {0}", - e)); - } - - SingleSegmentUploader.waitForRetry(retryCount, parameters.isUseSegmentBlockBackOffRetryStrategy()); - } - } - - - if (remoteLength != metadata.getSegments()[finalI].getLength()) { - throw new UploadFailedException(MessageFormat.format("Cannot perform 'concatenate' operation because segment {0} has an incorrect length (expected {1}, actual {2}).", finalI, metadata.getSegments()[finalI].getLength(), remoteLength)); - } - - inputPaths[finalI] = remoteStreamPath; - - } catch (Exception ex) { - //collect any exceptions, whether we just generated them above or whether they come from the Front End, - synchronized (exceptions) { - exceptions.add(ex); - } - } - } - }); - } - - exec.shutdown(); - - try { - exec.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); // waits ~292 years for completion or interruption. - } - catch (InterruptedException e) { - // add the exception since it will indicate that it was cancelled. - exceptions.add(e); - } - - if (exceptions.size() > 0) { - throw new AggregateUploadException("At least one concatenate test failed", exceptions.remove(0), exceptions); - } - - //issue the command - frontEnd.concatenate(metadata.getTargetStreamPath(), inputPaths); - } -} diff --git a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/FrontEndAdapter.java b/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/FrontEndAdapter.java deleted file mode 100644 index 7bbf536895f5..000000000000 --- a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/FrontEndAdapter.java +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - */ -package com.microsoft.azure.management.datalake.store.uploader; - -import com.microsoft.rest.RestException; - -import java.io.IOException; - -/** - * The frontend adapter that must be implemented in order to execute using the - * multipart uploader. By implementing this contract, the multi-part uploader will execute. - */ -public interface FrontEndAdapter { - /** - * Creates a new, empty stream at the given path. - * - * @param streamPath The relative path to the stream. - * @param overwrite Whether to overwrite an existing stream. - * @param data The data to include in the stream during creation. - * @param byteCount The number of bytes from data to include (starting at 0). - * @throws IOException if the file does not exist or is inaccessible. - * @throws RestException if there is a failure communicating with the service. - */ - void createStream(String streamPath, boolean overwrite, byte[] data, int byteCount) throws RestException, IOException; - - /** - * Deletes an existing stream at the given path. - * - * @param streamPath The relative path to the stream. - * @param recurse if set to true recursively delete. This is used for folder streams only. - * @throws IOException if the file does not exist or is inaccessible. - * @throws RestException if there is a failure communicating with the service. - */ - void deleteStream(String streamPath, boolean recurse) throws IOException, RestException; - - /** - * Appends the given byte array to the end of a given stream. - * - * @param streamPath The relative path to the stream. - * @param data An array of bytes to be appended to the stream. - * @param offset The offset at which to append to the stream. - * @param length The number of bytes to append (starting at 0). - * @throws IOException if the file does not exist or is inaccessible. - * @throws RestException if there is a failure communicating with the service. - */ - void appendToStream(String streamPath, byte[] data, long offset, int length) throws IOException, RestException; - - /** - * Determines if the stream with given path exists. - * - * @param streamPath The relative path to the stream. - * @return True if the stream exists, false otherwise. - * @throws IOException if the file does not exist or is inaccessible. - * @throws RestException if there is a failure communicating with the service. - */ - boolean streamExists(String streamPath) throws RestException, IOException; - - /** - * Gets a value indicating the length of a stream, in bytes. - * - * @param streamPath The relative path to the stream. - * @return The length of the stream, in bytes. - * @throws IOException if the file does not exist or is inaccessible. - * @throws RestException if there is a failure communicating with the service. - */ - long getStreamLength(String streamPath) throws IOException, RestException; - - /** - * Concatenates the given input streams (in order) into the given target stream. - * At the end of this operation, input streams will be deleted. - * - * @param targetStreamPath The relative path to the target stream. - * @param inputStreamPaths An ordered array of paths to the input streams. - * @throws IOException if the file does not exist or is inaccessible. - * @throws RestException if there is a failure communicating with the service. - */ - void concatenate(String targetStreamPath, String[] inputStreamPaths) throws IOException, RestException; -} diff --git a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/InvalidMetadataException.java b/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/InvalidMetadataException.java deleted file mode 100644 index 2ef5fe64ba74..000000000000 --- a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/InvalidMetadataException.java +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - */ -package com.microsoft.azure.management.datalake.store.uploader; - -/** - * Represents an exception that is thrown when the local metadata is invalid or inconsistent. - */ -public class InvalidMetadataException extends Exception { - - /** - * Initializes a new instance of the InvalidMetadataException exception. - * - * @param message The message that describes the error. - */ - public InvalidMetadataException(String message) { - super(message); - } - - /** - * Initializes a new instance of the InvalidMetadataException exception. - * - * @param message The error message that explains the reason for the exception. - * @param innerException The exception that is the cause of the current exception, or a null reference if no inner exception is specified. - */ - public InvalidMetadataException(String message, Exception innerException) { - super(message, innerException); - } -} diff --git a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/MultipleSegmentUploader.java b/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/MultipleSegmentUploader.java deleted file mode 100644 index 486d013dca5d..000000000000 --- a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/MultipleSegmentUploader.java +++ /dev/null @@ -1,228 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - */ -package com.microsoft.azure.management.datalake.store.uploader; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.LinkedList; -import java.util.List; -import java.util.Queue; - -/** - * Uploads a local file in parallel by splitting it into several segments, according to the given metadata. - */ -public class MultipleSegmentUploader implements Runnable { - - /** - * The maximum attempts to upload a segment of the file before failing out. - */ - public static final int MAX_UPLOAD_ATTEMPT_COUNT = 4; - private UploadMetadata metadata; - private FrontEndAdapter frontEnd; - private int maxThreadCount; - private Queue pendingSegments; - private List exceptions; - - /** - * Creates a new MultipleSegmentUploader. - * - * @param uploadMetadata The metadata that keeps track of the file upload. - * @param maxThreadCount The maximum number of threads to use. Note that in some cases, this number may not be reached. - * @param frontEnd A pointer to the Front End interface to perform the upload to. - */ - public MultipleSegmentUploader(UploadMetadata uploadMetadata, int maxThreadCount, FrontEndAdapter frontEnd) { - metadata = uploadMetadata; - this.maxThreadCount = maxThreadCount; - this.frontEnd = frontEnd; - exceptions = new ArrayList<>(); - pendingSegments = getPendingSegmentsToUpload(metadata); - this.useSegmentBlockBackOffRetryStrategy = true; - } - - /** - * Gets or sets a value indicating whether to use a back-off (exponenential) in case of individual block failures. - * The MultipleSegmentUploader does not use this directly; it passes it on to SingleSegmentUploader. - */ - private boolean useSegmentBlockBackOffRetryStrategy; - - /** - * - * @return A value indicating whether to use a back-off (exponenential) in case of individual block failures. - * The MultipleSegmentUploader does not use this directly; it passes it on to SingleSegmentUploader. - */ - public boolean useSegmentBlockBackOffRetryStrategy() { - return useSegmentBlockBackOffRetryStrategy; - } - - /** - * - * @param isEnabled A value indicating whether to use a back-off (exponenential) in case of individual block failures. - * The MultipleSegmentUploader does not use this directly; it passes it on to SingleSegmentUploader. - */ - public void setUseSegmentBlockBackOffRetryStrategy(boolean isEnabled) { - useSegmentBlockBackOffRetryStrategy = isEnabled; - } - /** - * Executes the upload of the segments in the file that were not already uploaded (i.e., those that are in a 'Pending' state). - * - * @throws InterruptedException if there is some interruption sent during a wait. - * @throws AggregateUploadException if there are any failures in any of the threads running upload. - */ - public void upload() throws InterruptedException, AggregateUploadException { - int threadCount = Math.min(pendingSegments.size(), maxThreadCount); - List threads = new ArrayList<>(threadCount); - - //start a bunch of new threads that pull from the pendingSegments and then wait for them to finish - for (int i = 0; i < threadCount; i++) { - Thread t = new Thread(this); - t.start(); - threads.add(t); - } - - for (Thread t : threads) { - t.join(); - } - - - // aggregate any exceptions and throw them back at our caller - if (exceptions.size() > 0) { - // always return the first exception as the primary exception. - throw new AggregateUploadException("One or more segments could not be uploaded. Review the upload Metadata to determine which segments failed", exceptions.remove(0), exceptions); - } - } - - /** - * Processes the pending segments. - * @param pendingSegments The pending segments. - * @param exceptions The exceptions. - */ - private void processPendingSegments(Queue pendingSegments, Collection exceptions) { - while (pendingSegments.size() > 0) { - //get the next item to process - SegmentQueueItem toProcess; - synchronized (pendingSegments) { - if (pendingSegments.size() == 0) { - break; - } - toProcess = pendingSegments.remove(); - } - - try { - //execute it - uploadSegment(toProcess.segmentNumber, metadata); - } catch (Exception ex) { - if (toProcess.attemptCount + 1 < MAX_UPLOAD_ATTEMPT_COUNT) { - //re-enqueue at the end, but with an incremented attempt count - synchronized (pendingSegments) { - pendingSegments.add(new SegmentQueueItem(toProcess.segmentNumber, toProcess.attemptCount + 1)); - } - } else { - //keep track of the last exception for each segment and report it back - synchronized (exceptions) { - exceptions.add(ex); - } - } - } - } - } - - /** - * Uploads the segment. - * - * @param segmentNumber The segment number. - * @param metadata The metadata. - * @throws Exception if there is any failure while uploading the segment - */ - private void uploadSegment(int segmentNumber, UploadMetadata metadata) throws Exception { - //mark the segment as 'InProgress' in the metadata - updateSegmentMetadataStatus(metadata, segmentNumber, SegmentUploadStatus.InProgress); - - SingleSegmentUploader segmentUploader = new SingleSegmentUploader(segmentNumber, metadata, frontEnd); - segmentUploader.setUseBackOffRetryStrategy(this.useSegmentBlockBackOffRetryStrategy); - - try { - segmentUploader.upload(); - // if we reach this point, the upload was successful. Mark it as such. - updateSegmentMetadataStatus(metadata, segmentNumber, SegmentUploadStatus.Complete); - } catch (Exception e) { - //something horrible happened, mark the segment as failed and throw the original exception (the caller will handle it) - updateSegmentMetadataStatus(metadata, segmentNumber, SegmentUploadStatus.Failed); - throw e; - } - } - - /** - * Gets the pending segments to upload. - * - * @param metadata The metadata. - * @return A queue containing the remaining pending segments to upload - */ - private static Queue getPendingSegmentsToUpload(UploadMetadata metadata) { - Queue result = new LinkedList<>(); - for (UploadSegmentMetadata segment : metadata.getSegments()) { - if (segment.getStatus() == SegmentUploadStatus.Pending) { - result.add(new SegmentQueueItem(segment.getSegmentNumber(), 0)); - } - } - return result; - } - - /** - * Updates the segment metadata status. - * - * @param metadata The metadata. - * @param segmentNumber The segment number. - * @param newStatus The new status. - */ - private static void updateSegmentMetadataStatus(UploadMetadata metadata, int segmentNumber, SegmentUploadStatus newStatus) { - UploadSegmentMetadata[] toSave = metadata.getSegments(); - toSave[segmentNumber].setStatus(newStatus); - metadata.setSegments(toSave); - try { - metadata.save(); - } catch (Exception e) { - } //no need to crash the program if were unable to save the metadata; it is what's in memory that's important - } - - /** - * When an object implementing interface Runnable is used - * to create a thread, starting the thread causes the object's - * run method to be called in that separately executing - * thread. - *

- * The general contract of the method run is that it may - * take any action whatsoever. - * - * In this run, we are allowing each thread to attempt to process all - * of the remaining segments, which will ultimately result in each thread - * processing a subset of segments that are still in the queue. - * - * @see Thread#run() - */ - @Override - public void run() { - processPendingSegments(pendingSegments, exceptions); - } - - /** - * Represents a tuple that pairs a segment number with the number of times it was attempted for upload. - */ - private static class SegmentQueueItem { - SegmentQueueItem(int segmentNumber, int attemptCount) { - this.segmentNumber = segmentNumber; - this.attemptCount = attemptCount; - } - public int getSegmentNumber() { - return segmentNumber; - } - private int segmentNumber; - - public int getAttemptCount() { - return attemptCount; - } - private int attemptCount; - } -} diff --git a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/SegmentUploadStatus.java b/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/SegmentUploadStatus.java deleted file mode 100644 index 7d5ee00180c6..000000000000 --- a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/SegmentUploadStatus.java +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - */ -package com.microsoft.azure.management.datalake.store.uploader; - -/** - * Defines various states that a segment upload can have. - */ -public enum SegmentUploadStatus { - /** - * Indicates that the segment is currently scheduled for upload. - */ - Pending, - - /** - * Indicates that the segment is currently being uploaded. - */ - InProgress, - - /** - * Indicates that the segment was not uploaded successfully. - */ - Failed, - - /** - * Indicates that the segment was successfully uploaded. - */ - Complete -} diff --git a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/SingleSegmentUploader.java b/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/SingleSegmentUploader.java deleted file mode 100644 index d0090e5bf3cc..000000000000 --- a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/SingleSegmentUploader.java +++ /dev/null @@ -1,312 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - */ -package com.microsoft.azure.management.datalake.store.uploader; - -import org.apache.commons.lang3.StringUtils; - -import java.io.File; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.RandomAccessFile; -import java.nio.charset.Charset; -import java.text.MessageFormat; - -/** - * Represents an uploader for a single segment of a larger file. - */ -public class SingleSegmentUploader { - - /** - * The length of the buffers to upload (4MB). - */ - public static final int BUFFER_LENGTH = 4 * 1024 * 1024; - - /** 4MB is the maximum length of a single extent. So if one record is longer than this, - * then we will fast fail, since that record will cross extent boundaries. - */ - public static final int MAX_RECORD_LENGTH = 4 * 1024 * 1024; - - /** - * During upload retries, this indicates the maximum amount of time, in seconds, that we will wait between retries. - */ - public static final int MAXIMUM_BACKOFF_WAIT_SECONDS = 32; - - /** - * The maximum number of times to attempt to upload the buffer. - */ - public static final int MAX_BUFFER_UPLOAD_ATTEMPT_COUNT = 4; - - private FrontEndAdapter frontEndAdapter; - private UploadSegmentMetadata segmentMetadata; - private UploadMetadata metadata; - - /** - * Creates a new uploader for a single segment. - * - * @param segmentNumber The sequence number of the segment. - * @param uploadMetadata The metadata for the entire upload. - * @param frontEnd A pointer to the front end. - */ - public SingleSegmentUploader(int segmentNumber, UploadMetadata uploadMetadata, FrontEndAdapter frontEnd) { - metadata = uploadMetadata; - segmentMetadata = uploadMetadata.getSegments()[segmentNumber]; - frontEndAdapter = frontEnd; - this.useBackOffRetryStrategy = true; - } - - /** - * Gets or sets a value indicating whether to use a back-off (exponenential) in case of individual block failures. - * If set to 'false' every retry is handled immediately; otherwise an amount of time is waited between retries, as a function of power of 2. - */ - private boolean useBackOffRetryStrategy; - - /** - * - * @return A value indicating whether to use a back-off (exponenential) in case of individual block failures. - * If set to 'false' every retry is handled immediately; otherwise an amount of time is waited between retries, as a function of power of 2. - */ - public boolean useBackOffRetryStrategy() { - return useBackOffRetryStrategy; - } - - /** - * - * @param isEnabled A value indicating whether to use a back-off (exponenential) in case of individual block failures. - * If set to 'false' every retry is handled immediately; otherwise an amount of time is waited between retries, as a function of power of 2. - */ - public void setUseBackOffRetryStrategy(boolean isEnabled) { - useBackOffRetryStrategy = isEnabled; - } - - /** - * Uploads the portion of the InputFilePath to the given TargetStreamPath, starting at the given StartOffset. - * The segment is further divided into equally-sized blocks which are uploaded in sequence. - * Each such block is attempted a certain number of times; if after that it still cannot be uploaded, the entire segment is aborted (in which case no cleanup is performed on the server). - * - * @throws Exception if there is any failure during the upload - */ - public void upload() throws Exception { - File fileInfo = new File(metadata.getInputFilePath()); - if (!(fileInfo.exists())) { - throw new FileNotFoundException("Unable to locate input file: " + metadata.getInputFilePath()); - } - - //open up a reader from the input file, seek to the appropriate offset - try (RandomAccessFile inputStream = openInputStream()) { - long endPosition = segmentMetadata.getOffset() + segmentMetadata.getLength(); - if (endPosition > fileInfo.length()) { - throw new IllegalArgumentException("StartOffset+UploadLength is beyond the end of the input file"); - } - - uploadSegmentContents(inputStream, endPosition); - - verifyUploadedStream(); - //any exceptions are (re)thrown to be handled by the caller; we do not handle retries or other recovery techniques here - } - } - - /** - * Verifies the uploaded stream. - * - * @throws Exception if there is any failure validating the stream being uploaded. - */ - private void verifyUploadedStream() throws Exception { - //verify that the remote stream has the length we expected. - int retryCount = 0; - long remoteLength = -1; - while (retryCount < MAX_BUFFER_UPLOAD_ATTEMPT_COUNT) { - retryCount++; - try { - remoteLength = frontEndAdapter.getStreamLength(segmentMetadata.getPath()); - break; - } catch (Exception ex) { - if (retryCount >= MAX_BUFFER_UPLOAD_ATTEMPT_COUNT) { - throw ex; - } - - waitForRetry(retryCount, this.useBackOffRetryStrategy); - } - } - - if (segmentMetadata.getLength() != remoteLength) { - throw new UploadFailedException(MessageFormat.format("Post-upload stream verification failed: target stream has a length of {0}, expected {1}", remoteLength, segmentMetadata.getLength())); - } - } - - /** - * Uploads the segment contents. - * - * @param inputStream The input stream. - * @param endPosition The end position. - * @throws Exception if there is any failure attempting to upload the contents of a single segment. - */ - private void uploadSegmentContents(RandomAccessFile inputStream, long endPosition) throws Exception { - long bytesCopiedSoFar = 0; // we start off with a fresh stream - - byte[] buffer = new byte[BUFFER_LENGTH]; - int residualBufferLength = 0; //the number of bytes that remained in the buffer from the last upload (bytes which were not uploaded) - - while (inputStream.getFilePointer() < endPosition) { - //read a block of data, and keep track of how many bytes are actually read - int bytesRead = readIntoBuffer(inputStream, buffer, residualBufferLength, endPosition); - int bufferDataLength = residualBufferLength + bytesRead; - - //determine the cutoff offset for upload - everything before will be uploaded, everything after is residual; (the position of the last record in this buffer) - int uploadCutoff = bufferDataLength; - if (!metadata.isBinary()) { - uploadCutoff = determineUploadCutoffForTextFile(buffer, bufferDataLength, inputStream); - } - - bytesCopiedSoFar = uploadBuffer(buffer, uploadCutoff, bytesCopiedSoFar); - - residualBufferLength = bufferDataLength - uploadCutoff; - if (residualBufferLength > 0) { - //move the remainder of the buffer to the front - System.arraycopy(buffer, uploadCutoff, buffer, 0, residualBufferLength); - } - } - - //make sure we don't leave anything behind - if (residualBufferLength > 0) { - uploadBuffer(buffer, residualBufferLength, bytesCopiedSoFar); - } - - buffer = null; - } - - /** - * Determines the upload cutoff for text file. - * - * @param buffer The buffer. - * @param bufferDataLength length of the buffer data. - * @param inputStream The input stream. - * @return The index within the buffer which indicates a record boundary cutoff for a single append request for a text file. - * @throws UploadFailedException indicates that the upload failed for the specified reason. - * @throws IOException indicates the path is inaccessible or does not exist. - */ - private int determineUploadCutoffForTextFile(byte[] buffer, int bufferDataLength, RandomAccessFile inputStream) throws UploadFailedException, IOException { - Charset encoding = Charset.forName(metadata.getEncodingName()); - //NOTE: we return an offset, but everywhere else below we treat it as a byte count; in order for that to work, we need to add 1 to the result of FindNewLine. - int uploadCutoff = StringExtensions.findNewline(buffer, bufferDataLength - 1, bufferDataLength, true, encoding, metadata.getDelimiter()) + 1; - if (uploadCutoff <= 0 && (metadata.getSegmentCount() > 1 || bufferDataLength >= MAX_RECORD_LENGTH)) { - throw new UploadFailedException(MessageFormat.format("Found a record that exceeds the maximum allowed record length around offset {0}", inputStream.getFilePointer())); - } - - //a corner case here is when the newline is 2 chars long, and the first of those lands on the last byte of the buffer. If so, let's try to find another - //newline inside the buffer, because we might be splitting this wrongly. - if ((metadata.getDelimiter() == null || StringUtils.isEmpty(metadata.getDelimiter())) && uploadCutoff == buffer.length && buffer[buffer.length - 1] == (byte) '\r') { - int newCutoff = StringExtensions.findNewline(buffer, bufferDataLength - 2, bufferDataLength - 1, true, encoding, metadata.getDelimiter()) + 1; - if (newCutoff > 0) { - uploadCutoff = newCutoff; - } - } - - return uploadCutoff; - } - - /** - * Uploads the buffer. - * - * @param buffer The buffer. - * @param bytesToCopy The bytes to copy. - * @param targetStreamOffset The target stream offset. - * @return The current index within the target stream after uploading the buffer. - * @throws Exception Thrown if there is a failure uploading the current buffer. - */ - private long uploadBuffer(byte[] buffer, int bytesToCopy, long targetStreamOffset) throws Exception { - //append it to the remote stream - int attemptCount = 0; - boolean uploadCompleted = false; - while (!uploadCompleted && attemptCount < MAX_BUFFER_UPLOAD_ATTEMPT_COUNT) { - attemptCount++; - try { - if (targetStreamOffset == 0) { - frontEndAdapter.createStream(segmentMetadata.getPath(), true, buffer, bytesToCopy); - } else { - frontEndAdapter.appendToStream(segmentMetadata.getPath(), buffer, targetStreamOffset, bytesToCopy); - - } - - uploadCompleted = true; - targetStreamOffset += bytesToCopy; - } catch (Exception ex) { - //if we tried more than the number of times we were allowed to, give up and throw the exception - if (attemptCount >= MAX_BUFFER_UPLOAD_ATTEMPT_COUNT) { - throw ex; - } else { - waitForRetry(attemptCount, this.useBackOffRetryStrategy); - } - } - } - - return targetStreamOffset; - } - - /** - * Reads the data into the buffer. - * - * @param inputStream The stream to read data from. - * @param buffer The buffer to read data into - * @param bufferOffset The offset in the buffer to begin pushing data - * @param streamEndPosition The last point in the stream to read. - * @return The number of bytes read into the buffer. - * @throws IOException Thrown if there is an issue accessing the stream or the pointer to the file. - */ - private int readIntoBuffer(RandomAccessFile inputStream, byte[] buffer, int bufferOffset, long streamEndPosition) throws IOException { - //read a block of data - int bytesToRead = buffer.length - bufferOffset; - if (bytesToRead > streamEndPosition - inputStream.getFilePointer()) { - //last read may be smaller than previous reads; readjust # of bytes to read accordingly - bytesToRead = (int) (streamEndPosition - inputStream.getFilePointer()); - } - - int remainingBytes = bytesToRead; - - while (remainingBytes > 0) { - //Stream.Read may not read all the bytes we requested, so we need to retry until we filled up the entire buffer - int bytesRead = inputStream.read(buffer, bufferOffset, remainingBytes); - bufferOffset += bytesRead; - remainingBytes = bytesToRead - bufferOffset; - } - - return bytesToRead; - } - - /** - * Enables use of a back off retry strategy, allowing a caller to wait before attempting an action again. - * - * @param attemptCount The number of attempts that have already been done - * @param useBackOffRetryStrategy whether to use the back off strategy or not. - * @throws InterruptedException Thrown if there is an interrupt during the sleep. - */ - public static void waitForRetry(int attemptCount, boolean useBackOffRetryStrategy) throws InterruptedException { - if (!useBackOffRetryStrategy) { - //no need to wait - return; - } - - int intervalSeconds = Math.max(MAXIMUM_BACKOFF_WAIT_SECONDS, (int) Math.pow(2, attemptCount)); - Thread.sleep(intervalSeconds * 1000); - } - - /** - * Opens the input stream. - * @return A {@link RandomAccessFile} stream of the file being uploaded. - * @throws IOException Thrown if the input stream cannot be opened due to file accessibility or existence. - */ - private RandomAccessFile openInputStream() throws IOException { - RandomAccessFile stream = new RandomAccessFile(metadata.getInputFilePath(), "r"); - - if (segmentMetadata.getOffset() >= stream.length()) { - throw new IllegalArgumentException("StartOffset is beyond the end of the input file"); - } - - // always seek from the beginning of the file - stream.seek(0); - stream.seek(segmentMetadata.getOffset()); - return stream; - } -} diff --git a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/StringExtensions.java b/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/StringExtensions.java deleted file mode 100644 index eb6d2abd959c..000000000000 --- a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/StringExtensions.java +++ /dev/null @@ -1,156 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - */ -package com.microsoft.azure.management.datalake.store.uploader; - -import org.apache.commons.lang3.StringUtils; - -import java.nio.charset.Charset; -import java.nio.charset.StandardCharsets; - -/** - * A class of helpers to determine the location of record boundaries within byte arrays. - */ -public final class StringExtensions { - - private StringExtensions() { - // empty constructor that should not be used. - } - /** - * Finds the index in the given buffer of a newline character, either the first or the last (based on the parameters). - * If a combined newline (\r\n), the index returned is that of the last character in the sequence. - * - * @param buffer The buffer to search in. - * @param startOffset The index of the first byte to start searching at. - * @param length The number of bytes to search, starting from the given startOffset. - * @param reverse If true, searches from the startOffset down to the beginning of the buffer. If false, searches upwards. - * @param encoding Indicates the type of encoding to use for the buffered bytes. - * @param delimiter Optionally indicates the delimiter to consider as the "new line", which MUST BE a single character. If null, the default is '\\r', '\\n' and '\\r\\n'. - * @return The index of the closest newline character in the sequence (based on direction) that was found. Returns -1 if not found. - */ - public static int findNewline(byte[] buffer, int startOffset, int length, boolean reverse, Charset encoding, String delimiter) { - if (buffer.length == 0 || length == 0) { - return -1; - } - - // define the bytes per character to use - int bytesPerChar; - if (encoding.equals(StandardCharsets.UTF_16) || encoding.equals(StandardCharsets.UTF_16BE) || encoding.equals(StandardCharsets.UTF_16LE)) { - bytesPerChar = 2; - } else if (encoding.equals(StandardCharsets.US_ASCII) || encoding.equals(StandardCharsets.UTF_8)) { - bytesPerChar = 1; - } else { - throw new IllegalArgumentException("Only the following encodings are allowed: UTF-8, UTF-16, UTF-16BE, UTF16-LE and ASCII"); - } - - - if (delimiter != null && !StringUtils.isEmpty(delimiter) && delimiter.length() > 1) { - throw new IllegalArgumentException("The delimiter must only be a single character or unspecified to represent the CRLF delimiter"); - } - - if (delimiter != null && !StringUtils.isEmpty(delimiter)) { - // convert the byte array back to a String - int startOfSegment = reverse ? startOffset - length + 1 : startOffset; - String bytesToString = new String(buffer, startOfSegment, length, encoding); - if (!bytesToString.contains(delimiter)) { - // didn't find the delimiter. - return -1; - } - - // the index is returned, which is 0 based, so our loop must include the zero case. - int numCharsToDelim = reverse ? bytesToString.lastIndexOf(delimiter) : bytesToString.indexOf(delimiter); - int toReturn = 0; - for (int i = 0; i <= numCharsToDelim; i++) { - toReturn += Character.toString(bytesToString.charAt(startOfSegment + i)).getBytes(encoding).length; - } - - // we get the total number of bytes, but we want to return the index (which starts at 0) - // so we subtract 1 from the total number of bytes to get the final byte index. - return toReturn - 1; - } - - //endOffset is a 'sentinel' value; we use that to figure out when to stop searching - int endOffset = reverse ? startOffset - length : startOffset + length; - - // if we are starting at the end, we need to move toward the front enough to grab the right number of bytes - startOffset = reverse ? startOffset - (bytesPerChar - 1) : startOffset; - - if (startOffset < 0 || startOffset >= buffer.length) { - throw new IndexOutOfBoundsException("Given start offset is outside the bounds of the given buffer. In reverse cases, the start offset is modified to ensure we check the full size of the last character"); - } - - // make sure that the length we are traversing is at least as long as a single character - if (length < bytesPerChar) { - throw new IllegalArgumentException("length must be at least as long as the length, in bytes, of a single character"); - } - - if (endOffset < -1 || endOffset > buffer.length) { - throw new IndexOutOfBoundsException("Given combination of startOffset and length would execute the search outside the bounds of the given buffer."); - } - - int bufferEndOffset = reverse ? startOffset : startOffset + length; - int result = -1; - for (int charPos = startOffset; reverse ? charPos != endOffset : charPos + bytesPerChar - 1 < endOffset; charPos = reverse ? charPos - 1 : charPos + 1) { - char c; - if (bytesPerChar == 1) { - c = (char) buffer[charPos]; - } - else { - String temp = new String(buffer, charPos, bytesPerChar, encoding); - if (StringUtils.isEmpty(temp)) { - continue; - } - else { - c = temp.toCharArray()[0]; - } - } - - if (isNewline(c, delimiter)) { - result = charPos + bytesPerChar - 1; - break; - } - } - - if ((delimiter == null || StringUtils.isEmpty(delimiter)) && !reverse && result < bufferEndOffset - bytesPerChar) { - char c; - if (bytesPerChar == 1) { - c = (char) buffer[result + bytesPerChar]; - } - else { - String temp = new String(buffer, result + 1, bytesPerChar, encoding); - if (StringUtils.isEmpty(temp)) { - // this can occur if the number of bytes for characters in the string result in an empty string (an invalid code for the given encoding) - // in this case, that means that we are done for the default delimiter. - return result; - } - else { - c = temp.toCharArray()[0]; - } - } - - if (isNewline(c, delimiter)) { - //we originally landed on a \r character; if we have a \r\n character, advance one position to include that - result += bytesPerChar; - } - } - - return result; - } - - /** - * Determines whether the specified character is newline. - * - * @param c The character. - * @param delimiter The delimiter to use. If null or empty CR LF characters are used. - * @return - */ - private static boolean isNewline(char c, String delimiter) { - if ((delimiter == null || StringUtils.isEmpty(delimiter))) { - return c == '\r' || c == '\n'; - } - - return c == delimiter.toCharArray()[0]; - } -} diff --git a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/UploadFailedException.java b/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/UploadFailedException.java deleted file mode 100644 index 07e778afe896..000000000000 --- a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/UploadFailedException.java +++ /dev/null @@ -1,19 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - */ -package com.microsoft.azure.management.datalake.store.uploader; - -/** - * Represents an exception that is thrown when an upload fails. - */ -public class UploadFailedException extends Exception { - /** - * Initializes a new instance of the UploadFailedException exception. - * @param message The message that describes the error. - */ - public UploadFailedException(String message) { - super(message); - } -} diff --git a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/UploadMetadata.java b/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/UploadMetadata.java deleted file mode 100644 index 81b838262ef3..000000000000 --- a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/UploadMetadata.java +++ /dev/null @@ -1,444 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - */ -package com.microsoft.azure.management.datalake.store.uploader; - -import org.apache.commons.lang3.StringUtils; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InvalidObjectException; -import java.io.ObjectInputStream; -import java.io.ObjectOutputStream; -import java.io.Serializable; -import java.nio.charset.StandardCharsets; -import java.text.MessageFormat; -import java.util.BitSet; -import java.util.UUID; - -/** - * Represents general metadata pertaining to an upload. - */ -public class UploadMetadata implements Serializable { - private static Object saveSync = new Object(); - - /** - * Constructs a new UploadMetadata from the given parameters. - * - * @param metadataFilePath The file path to assign to this metadata file (for saving purposes). - * @param uploadParameters The parameters to use for constructing this metadata. - */ - public UploadMetadata(String metadataFilePath, UploadParameters uploadParameters) { - this.metadataFilePath = metadataFilePath; - - this.uploadId = UUID.randomUUID().toString(); - this.inputFilePath = uploadParameters.getInputFilePath(); - this.targetStreamPath = uploadParameters.getTargetStreamPath(); - - - String[] streamData = splitTargetStreamPathByName(); - String streamName = streamData[0]; - String streamDirectory = streamData[1]; - - if (streamDirectory == null || StringUtils.isEmpty(streamDirectory)) { - // the scenario where the file is being uploaded at the root - this.segmentStreamDirectory = MessageFormat.format("/{0}.segments.{1}", streamName, UUID.randomUUID()); - } else { - // the scenario where the file is being uploaded in a sub folder - this.segmentStreamDirectory = MessageFormat.format("{0}/{1}.segments.{2}", - streamDirectory, - streamName, UUID.randomUUID()); - } - - this.isBinary = uploadParameters.isBinary(); - - File fileInfo = new File(uploadParameters.getInputFilePath()); - this.fileLength = fileInfo.length(); - - this.encodingName = uploadParameters.getFileEncoding().name(); - - // we are taking the smaller number of segments between segment lengths of 256 and the segment growth logic. - // this protects us against agressive increase of thread count resulting in far more segments than - // is reasonable for a given file size. We also ensure that each segment is at least 256mb in size. - // This is the size that ensures we have the optimal storage creation in the store. - int preliminarySegmentCount = (int) Math.ceil((double) fileInfo.length() / uploadParameters.getMaxSegementLength()); - this.segmentCount = Math.min(preliminarySegmentCount, UploadSegmentMetadata.calculateSegmentCount(fileInfo.length())); - this.segmentLength = UploadSegmentMetadata.calculateSegmentLength(fileInfo.length(), this.segmentCount); - - this.segments = new UploadSegmentMetadata[this.segmentCount]; - for (int i = 0; i < this.segmentCount; i++) { - this.segments[i] = new UploadSegmentMetadata(i, this); - } - } - - /** - * - * @return A value indicating the unique identifier associated with this upload. - */ - public String getUploadId() { - return uploadId; - } - - /** - * - * @return A value indicating the full path to the file to be uploaded. - */ - public String getInputFilePath() { - return inputFilePath; - } - - /** - * - * @return A value indicating the length (in bytes) of the file to be uploaded. - */ - public long getFileLength() { - return fileLength; - } - - /** - * - * @return A value indicating the full stream path where the file will be uploaded to. - */ - public String getTargetStreamPath() { - return targetStreamPath; - } - - /** - * - * @return A value indicating the directory path where intermediate segment streams will be stored. - */ - public String getSegmentStreamDirectory() { - return segmentStreamDirectory; - } - - /** - * - * @return A value indicating the number of segments this file is split into for purposes of uploading it. - */ - public int getSegmentCount() { - return segmentCount; - } - - /** - * - * @param segCount Sets the segment count to the specified count. - */ - public void setSegmentCount(int segCount) { - segmentCount = segCount; - } - - /** - * - * @return A value indicating the length (in bytes) of each segment of the file (except the last one, which may be less). - */ - public long getSegmentLength() { - return segmentLength; - } - - /** - * - * @param segLength The length to set the segment length to. - */ - public void setSegmentLength(long segLength) { - segmentLength = segLength; - } - /** - * - * @return A pointer to an array of segment metadata. The segments are ordered by their segment number (sequence). - */ - public UploadSegmentMetadata[] getSegments() { - return segments; - } - - /** - * - * @param segs The value to set the segment array to. - */ - public void setSegments(UploadSegmentMetadata[] segs) { - segments = segs; - } - - /** - * - * @return A value indicating whether the upload file should be treated as a binary file or not. - */ - public boolean isBinary() { - return isBinary; - } - - /** - * - * @return The name of the current encoding being used. - */ - public String getEncodingName() { - return encodingName; - } - - /** - * - * @return A value indicating the record boundary delimiter for the file, if any. - */ - public String getDelimiter() { - return delimiter; - } - - /** - * - * @return A value indicating the path where this metadata file is located. - */ - public String getMetadataFilePath() { - return metadataFilePath; - } - - /** - * - * @param metadataFilePath A value indicating the path where this metadata file is located. - */ - public void setMetadataFilePath(String metadataFilePath) { - this.metadataFilePath = metadataFilePath; - } - - private transient String metadataFilePath; - - /** - * - * @param uploadId A value indicating the unique identifier associated with this upload. - */ - public void setUploadId(String uploadId) { - this.uploadId = uploadId; - } - - /** - * - * @param inputFilePath A value indicating the full path to the file to be uploaded. - */ - public void setInputFilePath(String inputFilePath) { - this.inputFilePath = inputFilePath; - } - - /** - * - * @param fileLength A value indicating the length (in bytes) of the file to be uploaded. - */ - public void setFileLength(long fileLength) { - this.fileLength = fileLength; - } - - /** - * - * @param targetStreamPath A value indicating the full stream path where the file will be uploaded to. - */ - public void setTargetStreamPath(String targetStreamPath) { - this.targetStreamPath = targetStreamPath; - } - - /** - * - * @param segmentStreamDirectory A value indicating the directory path where intermediate segment streams will be stored. - */ - public void setSegmentStreamDirectory(String segmentStreamDirectory) { - this.segmentStreamDirectory = segmentStreamDirectory; - } - - /** - * - * @param binary A value indicating whether the upload file should be treated as a binary file or not. - */ - public void setBinary(boolean binary) { - isBinary = binary; - } - - /** - * - * @param encodingName The name of the current encoding being used. - */ - public void setEncodingName(String encodingName) { - this.encodingName = encodingName; - } - - /** - * - * @param delimiter A value indicating the record boundary delimiter for the file, if any. - */ - public void setDelimiter(String delimiter) { - this.delimiter = delimiter; - } - - private String uploadId; - - private String inputFilePath; - - private long fileLength; - - private String targetStreamPath; - - private String segmentStreamDirectory; - - private int segmentCount; - - private long segmentLength; - - private UploadSegmentMetadata[] segments; - - private boolean isBinary; - - private String encodingName; - - private String delimiter; - - /** - * Initializes a new instance of the UploadMetadata class for use with unit testing. - */ - protected UploadMetadata() { - this.encodingName = StandardCharsets.UTF_8.name(); - } - - /** - * Attempts to load an UploadMetadata object from the given file. - * - * @param filePath The full path to the file where to load the metadata from - * @return A deserialized {@link UploadMetadata} object from the file specified. - * @throws FileNotFoundException Thrown if the filePath is inaccessible or does not exist - * @throws InvalidMetadataException Thrown if the metadata is not in the expected format. - */ - public static UploadMetadata loadFrom(String filePath) throws FileNotFoundException, InvalidMetadataException { - if (!new File(filePath).exists()) { - throw new FileNotFoundException("Could not find metadata file: " + filePath); - } - - UploadMetadata result = null; - try { - FileInputStream fileIn = new FileInputStream(filePath); - ObjectInputStream in = new ObjectInputStream(fileIn); - result = (UploadMetadata) in.readObject(); - in.close(); - fileIn.close(); - result.metadataFilePath = filePath; - return result; - } catch (Exception ex) { - throw new InvalidMetadataException("Unable to parse metadata file", ex); - } - } - - /** - * Saves the given metadata to its canonical location. This method is thread-safe. - * - * @throws IOException Thrown if the file cannot be saved due to accessibility or there is an error saving the stream to disk. - * @throws InvalidMetadataException Thrown if the metadata is invalid. - */ - public void save() throws IOException, InvalidMetadataException { - if (this.metadataFilePath == null || StringUtils.isEmpty(this.metadataFilePath)) { - throw new InvalidObjectException("Null or empty metadataFilePath. Cannot save metadata until this property is set."); - } - - //quick check to ensure that the metadata we constructed is sane - this.validateConsistency(); - - synchronized (saveSync) { - File curMetadata = new File(this.metadataFilePath); - if (curMetadata.exists()) { - curMetadata.delete(); - } - - // always create the full path to the file, since this will not throw if it already exists. - curMetadata.getParentFile().mkdirs(); - curMetadata.createNewFile(); - try { - FileOutputStream fileOut = - new FileOutputStream(this.metadataFilePath); - ObjectOutputStream out = new ObjectOutputStream(fileOut); - out.writeObject(this); - out.close(); - fileOut.close(); - } catch (Exception ex) { - throw new InvalidMetadataException("Unable to parse metadata object and write it to a file", ex); - } - } - } - - /** - * Deletes the metadata file from disk. - * - * @throws InvalidObjectException Thrown if the metadata file path has not yet been set. - */ - public void deleteFile() throws InvalidObjectException { - if (this.metadataFilePath == null || StringUtils.isEmpty(this.metadataFilePath)) { - throw new InvalidObjectException("Null or empty metadataFilePath. Cannot delete metadata until this property is set."); - } - - File curMetadata = new File(this.metadataFilePath); - if (curMetadata.exists()) { - curMetadata.delete(); - } - } - - /** - * Verifies the given metadata for consistency. Checks include: - * Completeness - * Existence and consistency with local file - * Segment data consistency - * - * @throws InvalidMetadataException Thrown if the metadata is invalid. - */ - public void validateConsistency() throws InvalidMetadataException { - if (this.segments == null || this.segments.length != this.segmentCount) { - throw new InvalidMetadataException("Inconsistent number of segments"); - } - - long sum = 0; - int lastSegmentNumber = -1; - BitSet segments = new BitSet(this.segmentCount); - - for (UploadSegmentMetadata segment : this.segments) { - if (segment.getSegmentNumber() < 0 || segment.getSegmentNumber() >= this.segmentCount) { - throw new InvalidMetadataException(MessageFormat.format("Segment numbers must be at least 0 and less than {0}. Found segment number {1}.", this.segmentCount, segment.getSegmentNumber())); - } - - if (segment.getSegmentNumber() <= lastSegmentNumber) { - throw new InvalidMetadataException(MessageFormat.format("Segment number {0} appears out of order.", segment.getSegmentNumber())); - } - - if (segments.get(segment.getSegmentNumber())) { - throw new InvalidMetadataException(MessageFormat.format("Segment number {0} appears twice", segment.getSegmentNumber())); - } - - if (segment.getOffset() != sum) { - throw new InvalidMetadataException(MessageFormat.format("Segment number {0} has an invalid starting offset ({1}). Expected {2}.", segment.getSegmentNumber(), segment.getOffset(), sum)); - } - - segments.set(segment.getSegmentNumber()); - sum += segment.getLength(); - lastSegmentNumber = segment.getSegmentNumber(); - } - - if (sum != this.fileLength) { - throw new InvalidMetadataException("The individual segment lengths do not add up to the input File length"); - } - } - - /** - * Splits the target stream path, returning the name of the stream and storing the full directory path (if any) in an out variable. - * - * @return A string array with the stream name is at index 0 and the stream path (if any) at index 1. - */ - public String[] splitTargetStreamPathByName() { - String[] toReturn = new String[2]; - int numFoldersInPath = this.targetStreamPath.split("/").length; - if (numFoldersInPath - 1 == 0 || (numFoldersInPath - 1 == 1 && this.targetStreamPath.startsWith("/"))) { - // the scenario where the file is being uploaded at the root - toReturn[0] = this.targetStreamPath.replaceAll("^[/]", ""); - toReturn[1] = null; - } else { - // the scenario where the file is being uploaded in a sub folder - toReturn[0] = this.targetStreamPath.substring(this.targetStreamPath.lastIndexOf('/') + 1); - toReturn[1] = this.targetStreamPath.substring(0, this.targetStreamPath.lastIndexOf('/')); - } - - return toReturn; - } -} diff --git a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/UploadMetadataGenerator.java b/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/UploadMetadataGenerator.java deleted file mode 100644 index e2e650f27dcc..000000000000 --- a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/UploadMetadataGenerator.java +++ /dev/null @@ -1,252 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - */ -package com.microsoft.azure.management.datalake.store.uploader; - -import org.apache.commons.lang3.ArrayUtils; -import org.apache.commons.lang3.StringUtils; - -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.RandomAccessFile; -import java.nio.charset.Charset; -import java.text.MessageFormat; - -/** - * An internally used class for generating the metadata used for upload. - */ -public class UploadMetadataGenerator { - - private UploadParameters parameters; - private int maxAppendLength; - - /** - * Creates a new instance of the UploadMetadataGenerator with the given parameters and the default maximum append length. - * - * @param parameters The parameters. - */ - public UploadMetadataGenerator(UploadParameters parameters) { - this(parameters, SingleSegmentUploader.BUFFER_LENGTH); - } - - /** - * Creates a new instance of the UploadMetadataGenerator with the given parameters and the given maximum append length. - * - * @param parameters The parameters - * @param maxAppendLength The maximum allowed append length when uploading a file. - */ - public UploadMetadataGenerator(UploadParameters parameters, int maxAppendLength) { - this.parameters = parameters; - this.maxAppendLength = maxAppendLength; - } - - /** - * Attempts to load the metadata from an existing file in its canonical location. - * - * @param metadataFilePath The metadata file path. - * @return The deserialized {@link UploadMetadata} from the specified file path. - * @throws FileNotFoundException Thrown if the specified metadataFilePath is invalid. - * @throws InvalidMetadataException Thrown if the metadata itself is invalid. - */ - public UploadMetadata getExistingMetadata(String metadataFilePath) throws FileNotFoundException, InvalidMetadataException { - //load from file (based on input parameters) - UploadMetadata metadata = UploadMetadata.loadFrom(metadataFilePath); - metadata.validateConsistency(); - return metadata; - } - - /** - * Creates a new metadata based on the given input parameters, and saves it to its canonical location. - * - * @param metadataFilePath Where the serialized metadata will be saved - * @return A new {@link UploadMetadata} object. - * @throws IOException Thrown if there is an issue saving the metadata to disk. - * @throws UploadFailedException Thrown if there is an issue aligning the segment record boundaries - * @throws InvalidMetadataException Thrown if the metadata is invalid. - */ - public UploadMetadata createNewMetadata(String metadataFilePath) throws IOException, UploadFailedException, InvalidMetadataException { - //determine segment count, segment length and upload Id - //create metadata - UploadMetadata metadata = new UploadMetadata(metadataFilePath, parameters); - - if (!parameters.isBinary() && metadata.getSegmentCount() > 1) { - this.alignSegmentsToRecordBoundaries(metadata); - } - - //save the initial version - metadata.save(); - - return metadata; - } - - /** - * Aligns segments to match record boundaries (where a record boundary = a new line). - * If not possible (max record size = 4MB), throws an exception. - * - * @param metadata The metadata to realign - * @throws IOException Thrown if the input file path in the metadata is invalid or inaccessible. - * @throws UploadFailedException Thrown if the length adjustment cannot be determined. - */ - private void alignSegmentsToRecordBoundaries(UploadMetadata metadata) throws IOException, UploadFailedException { - int remainingSegments = 0; - - try (RandomAccessFile stream = new RandomAccessFile(metadata.getInputFilePath(), "r")) { - long offset = 0; - for (int i = 0; i < metadata.getSegments().length; i++) { - UploadSegmentMetadata segment = metadata.getSegments()[i]; - - //updating segment lengths means that both the offset and the length of the next segment needs to be recalculated, to keep the segment lengths somewhat balanced - long diff = segment.getOffset() - offset; - segment.setOffset(offset); - segment.setLength(segment.getLength() + diff); - if (segment.getOffset() >= metadata.getFileLength()) { - continue; - } - - if (segment.getSegmentNumber() == metadata.getSegments().length - 1) { - //last segment picks up the slack - segment.setLength(metadata.getFileLength() - segment.getOffset()); - } else { - //figure out how much do we need to adjust the length of the segment so it ends on a record boundary (this can be negative or positive) - int lengthAdjustment = determineLengthAdjustment(segment, stream, Charset.forName(metadata.getEncodingName()), metadata.getDelimiter()) + 1; - - //adjust segment length and offset - segment.setLength(segment.getLength() + lengthAdjustment); - } - offset += segment.getLength(); - remainingSegments++; - } - } - - //since we adjusted the segment lengths, it's possible that the last segment(s) became of zero length; so remove it - UploadSegmentMetadata[] segments = metadata.getSegments(); - if (remainingSegments < segments.length) { - ArrayUtils.subarray(segments, 0, remainingSegments); - metadata.setSegments(segments); - metadata.setSegmentCount(segments.length); - } - - //NOTE: we are not validating consistency here; this method is called by createNewMetadata which calls save() after this, which validates consistency anyway. - } - - /** - * Calculates the value by which we'd need to adjust the length of the given segment, by searching for the nearest newline around it (before and after), - * and returning the distance to it (which can be positive, if after, or negative, if before). - * - * @param segment The segment to do the calculation on. - * @param stream The full stream used to figure out the adjustment - * @param encoding The encoding to use to determine where the cutoffs are - * @param delimiter The delimiter that determines how we adjust. If null then '\\r', \\n' and '\\r\\n' are used. - * @return How much to adjust the segment length by. - * @throws UploadFailedException Thrown if proper upload boundaries cannot be determined. - * @throws IOException Thrown if the stream being used is invalid or inaccessible. - */ - private int determineLengthAdjustment(UploadSegmentMetadata segment, RandomAccessFile stream, Charset encoding, String delimiter) throws UploadFailedException, IOException { - long referenceFileOffset = segment.getOffset() + segment.getLength(); - byte[] buffer = new byte[maxAppendLength]; - - //read 2MB before the segment boundary and 2MB after (for a total of 4MB = max append length) - int bytesRead = readIntoBufferAroundReference(stream, buffer, referenceFileOffset); - if (bytesRead > 0) { - int middlePoint = bytesRead / 2; - //search for newline in it - int newLinePosBefore = StringExtensions.findNewline(buffer, middlePoint + 1, middlePoint + 1, true, encoding, delimiter); - - //in some cases, we may have a newline that is 2 characters long, and it occurrs exactly on the midpoint, which means we won't be able to find its end. - //see if that's the case, and then search for a new candidate before it. - if ((delimiter == null || StringUtils.isEmpty(delimiter)) && newLinePosBefore == middlePoint + 1 && buffer[newLinePosBefore] == (byte) '\r') { - int newNewLinePosBefore = StringExtensions.findNewline(buffer, middlePoint, middlePoint, true, encoding, delimiter); - if (newNewLinePosBefore >= 0) { - newLinePosBefore = newNewLinePosBefore; - } - } - - int newLinePosAfter = StringExtensions.findNewline(buffer, middlePoint, middlePoint, false, encoding, delimiter); - if ((delimiter == null || StringUtils.isEmpty(delimiter)) && newLinePosAfter == buffer.length - 1 && buffer[newLinePosAfter] == (byte) '\r' && newLinePosBefore >= 0) { - newLinePosAfter = -1; - } - - int closestNewLinePos = findClosestToCenter(newLinePosBefore, newLinePosAfter, middlePoint); - - //middle point of the buffer corresponds to the reference file offset, so all we need to do is return the difference between the closest newline and the center of the buffer - if (closestNewLinePos >= 0) { - return closestNewLinePos - middlePoint; - } - } - - //if we get this far, we were unable to find a record boundary within our limits => fail the upload - throw new UploadFailedException( - MessageFormat.format( - "Unable to locate a record boundary within {0}MB on either side of segment {1} (offset {2}). This means the record at that offset is larger than {0}MB.", - maxAppendLength / 1024 / 1024 / 2, - segment.getSegmentNumber(), - segment.getOffset(), - maxAppendLength / 1024 / 1024)); - } - - /** - * Returns the value (of the given two) that is closest in absolute terms to the center value. - * Values that are negative are ignored (since these are assumed to represent array indices). - * - * @param value1 First value to compare - * @param value2 Second value to compare - * @param centerValue The center value they are compared against. - * @return Either value1 or value2 depending on which is closest to the centerValue - */ - private static int findClosestToCenter(int value1, int value2, int centerValue) { - if (value1 >= 0) { - if (value2 >= 0) { - return Math.abs(value2 - centerValue) > Math.abs(value1 - centerValue) ? value1 : value2; - } else { - return value1; - } - } else { - return value2; - } - } - - /** - * Reads data from the given file into the given buffer, centered around the given file offset. The first half of the buffer will be - * filled with data right before the given offset, while the remainder of the buffer will contain data right after it (of course, containing the byte at the given offset). - * @param stream The stream to read from - * @param buffer The buffer to read data into - * @param fileReferenceOffset The offset to start reading from in the stream. - * @return The number of bytes reads, which could be less than the length of the input buffer if we can't read due to the beginning or the end of the file. - * @throws IOException Thrown if the stream being used is invalid or inaccessible. - */ - private static int readIntoBufferAroundReference(RandomAccessFile stream, byte[] buffer, long fileReferenceOffset) throws IOException { - int length = buffer.length; - //calculate start offset - long fileStartOffset = fileReferenceOffset - length / 2; - - if (fileStartOffset < 0) { - //offset is less than zero, adjust it, as well as the length we want to read - length += (int) fileStartOffset; - fileStartOffset = 0; - if (length <= 0) { - return 0; - } - } - - if (fileStartOffset + length > stream.length()) { - //startOffset + length is beyond the end of the stream, adjust the length accordingly - length = (int) (stream.length() - fileStartOffset); - if (length <= 0) { - return 0; - } - } - - //read the appropriate block of the file into the buffer, using symmetry with respect to its midpoint - // we always initiate a seek from the origin of the file. - stream.seek(0); - stream.seek(fileStartOffset); - int bufferOffset = 0; - while (bufferOffset < length) { - int bytesRead = stream.read(buffer, bufferOffset, length - bufferOffset); - bufferOffset += bytesRead; - } - return length; - } -} diff --git a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/UploadParameters.java b/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/UploadParameters.java deleted file mode 100644 index fc8636e1213d..000000000000 --- a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/UploadParameters.java +++ /dev/null @@ -1,371 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - */ -package com.microsoft.azure.management.datalake.store.uploader; - -import org.apache.commons.lang3.StringUtils; - -import java.nio.charset.Charset; -import java.nio.charset.StandardCharsets; - -/** - * Represents parameters for the DataLake Uploader. - */ -public class UploadParameters { - - /** - * The default segment length that is used to ensure maximum life cycle performance for uploaded data. - * 256MB is the default and should not be changed without a good reason. - */ - private static final long SEGMENT_LENGTH = 256 * 1024 * 1024; - /** - * Creates a new set of parameters for the DataLake Uploader with optional values set with their defaults. - * Defaults are as follows: - * threadCount = 1 - * isOverwrite = false - * isResume = false - * isBinary = true - * maxSegmentLength = 256mb - * localMetadataLocation = File.createTempFile() - * - * @param inputFilePath The full path to the file to be uploaded. - * @param targetStreamPath The full stream path where the file will be uploaded to. - * @param accountName Name of the account to upload to. - */ - public UploadParameters(String inputFilePath, String targetStreamPath, String accountName) { - this(inputFilePath, targetStreamPath, accountName, 1, false, false, true, SEGMENT_LENGTH, null); - } - - /** - * Creates a new set of parameters for the DataLake Uploader with the following optional values set with their defaults. - * Defaults are as follows: - * isBinary = true - * maxSegmentLength = 256mb - * localMetadataLocation = File.createTempFile(). - * - * @param inputFilePath The full path to the file to be uploaded. - * @param targetStreamPath The full stream path where the file will be uploaded to. - * @param accountName Name of the account to upload to. - * @param threadCount The maximum number of parallel threads to use for the upload. - * @param isOverwrite Whether to overwrite the target stream or not. - * @param isResume Indicates whether to resume a previously interrupted upload. - */ - public UploadParameters(String inputFilePath, String targetStreamPath, String accountName, int threadCount, boolean isOverwrite, boolean isResume) { - this(inputFilePath, targetStreamPath, accountName, threadCount, isOverwrite, isResume, true, SEGMENT_LENGTH, null); - } - - /** - * Creates a new set of parameters for the DataLake Uploader with the following optional values set with their defaults. - * Defaults are as follows: - * isBinary = true - * maxSegmentLength = 256mb - * - * @param inputFilePath The full path to the file to be uploaded. - * @param targetStreamPath The full stream path where the file will be uploaded to. - * @param accountName Name of the account to upload to. - * @param threadCount The maximum number of parallel threads to use for the upload. - * @param isOverwrite Whether to overwrite the target stream or not. - * @param isResume Indicates whether to resume a previously interrupted upload. - * @param localMetadataLocation Indicates the directory path where to store the local upload metadata file while the upload is in progress. This location must be writeable from this application. Default location if null: File.createTempFile() - */ - public UploadParameters(String inputFilePath, String targetStreamPath, String accountName, int threadCount, boolean isOverwrite, boolean isResume, String localMetadataLocation) { - this(inputFilePath, targetStreamPath, accountName, threadCount, isOverwrite, isResume, true, SEGMENT_LENGTH, localMetadataLocation); - } - - /** - * Creates a new set of parameters for the DataLake Uploader. - * - * @param inputFilePath The full path to the file to be uploaded. - * @param targetStreamPath The full stream path where the file will be uploaded to. - * @param accountName Name of the account to upload to. - * @param threadCount The maximum number of parallel threads to use for the upload. - * @param isOverwrite Whether to overwrite the target stream or not. - * @param isResume Indicates whether to resume a previously interrupted upload. - * @param isBinary Indicates whether to treat the input file as a binary file (true), or whether to align upload blocks to record boundaries (false). - * @param maxSegmentLength The recommended value is 256mb, which gives optimal performance. Modify at your own risk. - * @param localMetadataLocation Indicates the directory path where to store the local upload metadata file while the upload is in progress. This location must be writeable from this application. Default location if null: File.createTempFile() - */ - public UploadParameters(String inputFilePath, String targetStreamPath, String accountName, int threadCount, boolean isOverwrite, boolean isResume, boolean isBinary, long maxSegmentLength, String localMetadataLocation) { - this.setInputFilePath(inputFilePath); - this.setTargetStreamPath(targetStreamPath); - this.setThreadCount(threadCount); - this.setAccountName(accountName); - this.setOverwrite(isOverwrite); - this.setResume(isResume); - this.setBinary(isBinary); - this.setMaxSegementLength(maxSegmentLength); - - if (localMetadataLocation == null || StringUtils.isEmpty(localMetadataLocation)) { - localMetadataLocation = System.getProperty("java.io.tmpdir"); - } - - this.setLocalMetadataLocation(localMetadataLocation); - - this.setUseSegmentBlockBackOffRetryStrategy(true); - - // TODO: in the future we will expose these as optional parameters, allowing customers to specify encoding and delimiters. - this.setFileEncoding(StandardCharsets.UTF_8); - this.setDelimiter(null); - } - - /** - * Creates a new set of parameters for the DataLake Uploader used for unit testing. - * - * @param inputFilePath The full path to the file to be uploaded. - * @param targetStreamPath The full stream path where the file will be uploaded to. - * @param accountName Name of the account to upload to. - * @param useSegmentBlockBackOffRetryStrategy if set to true [use segment block back off retry strategy]. - * @param threadCount The maximum number of parallel threads to use for the upload. - * @param isOverwrite Whether to overwrite the target stream or not. - * @param isResume Indicates whether to resume a previously interrupted upload. - * @param isBinary Indicates whether to treat the input file as a binary file (true), or whether to align upload blocks to record boundaries (false). - * @param maxSegmentLength The recommended value is 256mb, which gives optimal performance. Modify at your own risk. - * @param localMetadataLocation Indicates the directory path where to store the local upload metadata file while the upload is in progress. This location must be writeable from this application. Default location if null: File.createTempFile() - */ - protected UploadParameters(String inputFilePath, String targetStreamPath, String accountName, boolean useSegmentBlockBackOffRetryStrategy, int threadCount, boolean isOverwrite, boolean isResume, boolean isBinary, long maxSegmentLength, String localMetadataLocation) { - this(inputFilePath, targetStreamPath, accountName, threadCount, isOverwrite, isResume, isBinary, maxSegmentLength, localMetadataLocation); - this.setUseSegmentBlockBackOffRetryStrategy(useSegmentBlockBackOffRetryStrategy); - } - - /** - * Gets a value indicating whether [to use segment block back off retry strategy]. - * - * @return true if [to use segment block back off retry strategy]; otherwise, false. - */ - public boolean isUseSegmentBlockBackOffRetryStrategy() { - return useSegmentBlockBackOffRetryStrategy; - } - - /** - * Internally sets the value of whether [to use segment block back off retry strategy]. - * - * @param useSegmentBlockBackOffRetryStrategy - */ - private void setUseSegmentBlockBackOffRetryStrategy(boolean useSegmentBlockBackOffRetryStrategy) { - this.useSegmentBlockBackOffRetryStrategy = useSegmentBlockBackOffRetryStrategy; - } - - /** - * Gets a value indicating the full path to the file to be uploaded. - * - * @return The input file path. - */ - public String getInputFilePath() { - return inputFilePath; - } - - /** - * Internally sets the input file path. - * - * @param inputFilePath - */ - private void setInputFilePath(String inputFilePath) { - this.inputFilePath = inputFilePath; - } - - /** - * Gets a value indicating the full stream path where the file will be uploaded to. - * - * @return The target stream path. - */ - public String getTargetStreamPath() { - return targetStreamPath; - } - - /** - * Internally sets the target stream path. - * - * @param targetStreamPath - */ - private void setTargetStreamPath(String targetStreamPath) { - this.targetStreamPath = targetStreamPath; - } - - /** - * Gets a value indicating the name of the account to upload to. - * - * @return The name of the account. - */ - public String getAccountName() { - return accountName; - } - - /** - * Internally sets the account name to upload to. - * - * @param accountName - */ - private void setAccountName(String accountName) { - this.accountName = accountName; - } - - /** - * Gets a value indicating the maximum number of parallel threads to use for the upload. - * - * @return The thread count. - */ - public int getThreadCount() { - return threadCount; - } - - /** - * Internally sets the number of threads that are allowed for the upload. - * - * @param threadCount The number of threads to use for the upload. - */ - protected void setThreadCount(int threadCount) { - this.threadCount = threadCount; - } - - /** - * Gets a value indicating whether to overwrite the target stream if it already exists. - * - * @return true if this instance is overwrite; otherwise, false. - */ - public boolean isOverwrite() { - return overwrite; - } - - /** - * Internally sets whether the target stream can be overwritten. - * - * @param overwrite - */ - private void setOverwrite(boolean overwrite) { - this.overwrite = overwrite; - } - - /** - * Gets a value indicating whether to resume a previously interrupted upload. - * - * @return true if this instance is resume; otherwise, false. - */ - public boolean isResume() { - return resume; - } - - /** - * Internally set whether this is a previous upload being resumed. - * - * @param resume - */ - private void setResume(boolean resume) { - this.resume = resume; - } - - /** - * Gets a value indicating whether the input file should be treated as a binary (true) or a delimited input (false). - * - * @return true if this instance is binary; otherwise, false. - */ - public boolean isBinary() { - return binary; - } - - /** - * Internally set whether the file being uploaded should be binary or delimited input. - * - * @param binary - */ - private void setBinary(boolean binary) { - this.binary = binary; - } - - /** - * Gets the maximum length of each segement in bytes. - * - * @return The maximum length of each segment in bytes. - */ - public long getMaxSegementLength() { - return maxSegementLength; - } - - /** - * Internally set the maximum length of each segment in bytes. - * - * @param maxSegementLength - */ - private void setMaxSegementLength(long maxSegementLength) { - this.maxSegementLength = maxSegementLength; - } - - /** - * Gets a value indicating the directory path where to store the metadata for the upload. - * - * @return The local metadata location. - */ - public String getLocalMetadataLocation() { - return localMetadataLocation; - } - - /** - * Internally set the local metadata location. - * - * @param localMetadataLocation - */ - private void setLocalMetadataLocation(String localMetadataLocation) { - this.localMetadataLocation = localMetadataLocation; - } - - /** - * Gets a value indicating the encoding of the file being uploaded. - * - * @return The file encoding. - */ - public Charset getFileEncoding() { - return fileEncoding; - } - - /** - * Internally sets the value of the file encoding. - * - * @param fileEncoding - */ - private void setFileEncoding(Charset fileEncoding) { - this.fileEncoding = fileEncoding; - } - - /** - * Gets a value indicating the record boundary delimiter for the file, if any. - * - * @return The record boundary delimiter - */ - public String getDelimiter() { - return delimiter; - } - - /** - * Internally set the value of the record boundary delimiter. - * - * @param delimiter - */ - private void setDelimiter(String delimiter) { - this.delimiter = delimiter; - } - - private boolean useSegmentBlockBackOffRetryStrategy; - - private String inputFilePath; - - private String targetStreamPath; - - private String accountName; - - private int threadCount; - - private boolean overwrite; - - private boolean resume; - - private boolean binary; - - private long maxSegementLength; - - private String localMetadataLocation; - - private Charset fileEncoding; - - private String delimiter; -} diff --git a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/UploadSegmentMetadata.java b/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/UploadSegmentMetadata.java deleted file mode 100644 index 07aadb6af73a..000000000000 --- a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/UploadSegmentMetadata.java +++ /dev/null @@ -1,261 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - */ -package com.microsoft.azure.management.datalake.store.uploader; - -import java.io.Serializable; -import java.text.MessageFormat; - -/** - * Represents metadata for a particular file segment. - */ -public class UploadSegmentMetadata implements Serializable { - - /** - * Initializes a new instance of the UploadSegmentMetadata for use with unit tests. - */ - protected UploadSegmentMetadata() { - // does nothing, used for unit tests - } - - /** - * Creates a new UploadSegmentMetadata with the given segment number. - * - * @param segmentNumber The segment number for this instance. - * @param metadata The full metadata associated with this segment. - */ - public UploadSegmentMetadata(int segmentNumber, UploadMetadata metadata) { - this.segmentNumber = segmentNumber; - this.status = SegmentUploadStatus.Pending; - - String targetStreamName = metadata.splitTargetStreamPathByName()[0]; - this.path = MessageFormat.format("{0}/{1}.{2}.segment{3}", metadata.getSegmentStreamDirectory(), targetStreamName, metadata.getUploadId(), this.segmentNumber); - this.offset = this.segmentNumber * metadata.getSegmentLength(); // segment number is zero-based - this.length = calculateSegmentLength(this.segmentNumber, metadata); - } - - /** - * Calculates the length of a typical (non-terminal) segment for a file of the given length that is split into the given number of segments. - * - * @param fileLength The length of the file, in bytes. - * @param segmentCount The number of segments to split the file into. - * @return The length of this segment, in bytes. - */ - public static long calculateSegmentLength(long fileLength, int segmentCount) { - if (segmentCount < 0) { - throw new IllegalArgumentException("Number of segments must be a positive integer"); - } - - if (segmentCount == 0) { - // In this case, we are attempting to upload an empty file, - // in which case the uploader should just return - return 0; - } - - long segmentLength = fileLength / segmentCount; - - //if the file cannot be split into even segments, we need to increment the typical segment length by 1 - //in order to have the last segment in the file be smaller than the other ones. - if (fileLength % segmentCount != 0) { - //BUT we can only do this IF this wouldn't cause the last segment to have a negative length - if (fileLength - (segmentCount - 1) * (segmentLength + 1) > 0) { - segmentLength++; - } - } - - return segmentLength; - } - - /** - * Calculates the length of the segment with given number for a file with given length that is split into the given number of segments. - * @param segmentNumber The segment number. - * @param metadata The metadata for the current upload. - * @return The length of this segment, in bytes. - */ - public static long calculateSegmentLength(int segmentNumber, UploadMetadata metadata) { - if (segmentNumber < 0 || segmentNumber >= metadata.getSegmentCount()) { - throw new IndexOutOfBoundsException("Segment Number must be at least zero and less than the total number of segments"); - } - - if (metadata.getFileLength() < 0) { - throw new IllegalArgumentException("Cannot have a negative file length"); - } - - //verify if the last segment would have a positive value - long lastSegmentLength = metadata.getFileLength() - (metadata.getSegmentCount() - 1) * metadata.getSegmentLength(); - if (lastSegmentLength < 0) { - throw new IllegalArgumentException("The given values for segmentCount and segmentLength cannot possibly be used to split a file with the given fileLength (the last segment would have a negative length)"); - } else if (lastSegmentLength > metadata.getSegmentLength()) { - //verify if the given segmentCount and segmentLength combination would produce an even split - if (metadata.getFileLength() - (metadata.getSegmentCount() - 1) * (metadata.getSegmentLength() + 1) > 0) { - throw new IllegalArgumentException("The given values for segmentCount and segmentLength would not produce an even split of a file with given fileLength"); - } - } - - if (metadata.getFileLength() == 0) { - return 0; - } - - //all segments except the last one have the same length; - //the last one only has the 'full' length if by some miracle the file length is a perfect multiple of the Segment length - if (segmentNumber < metadata.getSegmentCount() - 1) { - return metadata.getSegmentLength(); - } else { - return lastSegmentLength; - } - } - - /** - * Used to calculate the total number of segments that we should create. - */ - private static final int BASE_MULTIPLIER = 50; - - /** - * The Multiplier is the number of times the segment count is inflated when the length of the file increases by a factor of 'Reducer'. - */ - private static final int SEGMENT_COUNT_MULTIPLIER = 2; - - /** - * The minimum number of bytes in a segment. For best performance, should be sync-ed with the upload buffer length. - */ - public static final int MINIMUM_SEGMENT_SIZE = SingleSegmentUploader.BUFFER_LENGTH; - - /** - * Calculates the number of segments a file of the given length should be split into. - * The method to calculate this is based on some empirical measurements that allows both the number of segments and the length of each segment to grow as the input file size grows. - * They both grow on a logarithmic pattern as the file length increases. - * The formula is roughly this: - * Multiplier = Min(100, 50 * 2 ^ Log10(FileLengthInGB)) - * SegmentCount = Max(1, Multiplier * 2 ^ Log10(FileLengthInGB) - * Essentially we quadruple the number of segments for each tenfold increase in the file length, with certain caps. The formula is designed to support both small files and - * extremely large files (and not cause very small segment lengths or very large number of segments). - * - * @param fileLength The length of the file, in bytes. - * @return The number of segments to split the file into. Returns 0 if fileLength is 0. - */ - public static int calculateSegmentCount(long fileLength) { - if (fileLength < 0) { - throw new IllegalArgumentException("File length cannot be negative"); - } - - if (fileLength == 0) { - //empty file => no segments - return 0; - } - - int minNumberOfSegments = (int) Math.max(1, fileLength / MINIMUM_SEGMENT_SIZE); - - //convert the file length into GB - double lengthInGb = fileLength / 1024.0 / 1024 / 1024; - - //apply the formula described in the class description and return the result - double baseMultiplier = calculateBaseMultiplier(lengthInGb); - int segmentCount = (int) (baseMultiplier * Math.pow(SEGMENT_COUNT_MULTIPLIER, Math.log10(lengthInGb))); - if (segmentCount > minNumberOfSegments) { - segmentCount = minNumberOfSegments; - } - - if (segmentCount < 1) { - segmentCount = 1; - } - - return segmentCount; - } - - private static double calculateBaseMultiplier(double lengthInGb) { - double value = BASE_MULTIPLIER * Math.pow(2, Math.log10(lengthInGb)); - return Math.min(100, value); - } - - private int segmentNumber; - - private long offset; - - private long length; - - private SegmentUploadStatus status; - - private String path; - - /** - * - * @return A value indicating the stream path assigned to this segment. - */ - public String getPath() { - return path; - } - - /** - * - * @param path A value indicating the stream path assigned to this segment. - */ - public void setPath(String path) { - this.path = path; - } - - /** - * - * @return A value indicating the number (sequence) of the segment in the file. - */ - public int getSegmentNumber() { - return segmentNumber; - } - - /** - * - * @param segmentNumber A value indicating the number (sequence) of the segment in the file. - */ - public void setSegmentNumber(int segmentNumber) { - this.segmentNumber = segmentNumber; - } - - /** - * - * @return A value indicating the starting offset of the segment in the file. - */ - public long getOffset() { - return offset; - } - - /** - * - * @param offset A value indicating the starting offset of the segment in the file. - */ - public void setOffset(long offset) { - this.offset = offset; - } - - /** - * - * @return A value indicating the size of the segment (in bytes). - */ - public long getLength() { - return length; - } - - /** - * - * @param length A value indicating the size of the segment (in bytes). - */ - public void setLength(long length) { - this.length = length; - } - - /** - * - * @return A value indicating the current upload status for this segment. - */ - public SegmentUploadStatus getStatus() { - return status; - } - - /** - * - * @param status A value indicating the current upload status for this segment. - */ - public void setStatus(SegmentUploadStatus status) { - this.status = status; - } -} diff --git a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/package-info.java b/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/package-info.java deleted file mode 100644 index d0384e3d1d10..000000000000 --- a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/package-info.java +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for -// license information. - -/** - * This package contains the classes for DataLakeStoreUploader. - * The client used to efficiently and rapidly upload files into an Azure Data Lake Store account. - */ -package com.microsoft.azure.management.datalake.store.uploader; \ No newline at end of file diff --git a/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/DataLakeUploaderTestBase.java b/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/DataLakeUploaderTestBase.java deleted file mode 100644 index 24954721e866..000000000000 --- a/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/DataLakeUploaderTestBase.java +++ /dev/null @@ -1,55 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - */ -package com.microsoft.azure.management.datalake.store.uploader; - -import com.microsoft.azure.AzureEnvironment; -import com.microsoft.azure.RestClient; -import com.microsoft.azure.credentials.UserTokenCredentials; -import com.microsoft.azure.management.datalake.store.implementation.DataLakeStoreAccountManagementClientImpl; -import com.microsoft.azure.management.datalake.store.implementation.DataLakeStoreFileSystemManagementClientImpl; -import com.microsoft.azure.management.resources.implementation.ResourceManagementClientImpl; -import okhttp3.OkHttpClient; -import okhttp3.logging.HttpLoggingInterceptor; -import retrofit2.Retrofit; - -import java.util.concurrent.TimeUnit; - -public abstract class DataLakeUploaderTestBase { - protected static ResourceManagementClientImpl resourceManagementClient; - protected static DataLakeStoreAccountManagementClientImpl dataLakeStoreAccountManagementClient; - protected static DataLakeStoreFileSystemManagementClientImpl dataLakeStoreFileSystemManagementClient; - - public static void createClients() { - UserTokenCredentials credentials = new UserTokenCredentials( - System.getenv("arm.clientid"), - System.getenv("arm.domain"), - System.getenv("arm.username"), - System.getenv("arm.password"), - AzureEnvironment.AZURE); - - RestClient restClient = new RestClient.Builder() - .withDefaultBaseUrl(AzureEnvironment.AZURE) - .withCredentials(credentials) - .withLogLevel(HttpLoggingInterceptor.Level.BODY) - .build(); - - resourceManagementClient = new ResourceManagementClientImpl(restClient).withSubscriptionId(System.getenv("arm.subscriptionid")); - dataLakeStoreAccountManagementClient = new DataLakeStoreAccountManagementClientImpl(restClient).withSubscriptionId(System.getenv("arm.subscriptionid")); - - RestClient dataPlaneClient = new RestClient.Builder(new OkHttpClient.Builder().connectTimeout(100, TimeUnit.SECONDS), new Retrofit.Builder()) - .withBaseUrl("https://{accountName}.{adlsFileSystemDnsSuffix}") - .withCredentials(credentials) - .withLogLevel(HttpLoggingInterceptor.Level.NONE) // No logging for this client because we are executing a lot of requests. - .build(); - - dataLakeStoreFileSystemManagementClient = new DataLakeStoreFileSystemManagementClientImpl(dataPlaneClient); - } - - public static String generateName(String prefix) { - int randomSuffix = (int) (Math.random() * 1000); - return prefix + randomSuffix; - } -} diff --git a/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/DataLakeUploaderTests.java b/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/DataLakeUploaderTests.java deleted file mode 100644 index ab255c5824af..000000000000 --- a/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/DataLakeUploaderTests.java +++ /dev/null @@ -1,366 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - */ -package com.microsoft.azure.management.datalake.store.uploader; - -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import javax.management.OperationsException; -import java.io.File; -import java.io.FileNotFoundException; -import java.io.FileOutputStream; -import java.io.IOException; - -/** - * Unit tests for the uploader. - */ -public class DataLakeUploaderTests { - private static final int LargeFileLength = 50 * 1024 * 1024; // 50mb - private static byte[] _largeFileData = new byte[LargeFileLength]; - private static String _largeFilePath; - private static final int SmallFileLength = 128; - private static byte[] _smallFileData = new byte[SmallFileLength]; - private static String _smallFilePath; - private static final int ThreadCount = 1; - private static final String TargetStreamPath = "1"; - - private static String curMetadataPath; - - @BeforeClass - public static void Setup() throws IOException { - _largeFilePath = TestHelpers.GenerateFileData(_largeFileData); - _smallFilePath = TestHelpers.GenerateFileData(_smallFileData); - } - - @AfterClass - public static void Teardown() - { - File large = new File(_largeFilePath); - File small = new File(_smallFilePath); - if (large.exists()) - { - large.delete(); - } - - if (small.exists()) - { - small.delete(); - } - } - - /** - * Tests the case when invalid parameters are being passed to the uploader. - * - * @throws Exception - */ - @Test - public void DataLakeUploader_InvalidParameters() throws Exception - { - //invalid file path - File invalidFilePath = File.createTempFile("adlsUploader", "noneexistent"); - invalidFilePath.delete(); - Assert.assertFalse("Unit test error: generated temp file actually exists", invalidFilePath.exists()); - - try { - new DataLakeStoreUploader(new UploadParameters(invalidFilePath.toString(), "1", "foo", 1, false, false, true, 4 * 1024 * 1024, null),new InMemoryFrontEnd()); - Assert.assertTrue("Expected a file not found exception but no exception was thrown!", false); - } - catch (FileNotFoundException e) { - // do nothing this is expected - } - - //no target stream - try { - new DataLakeStoreUploader(new UploadParameters(_largeFilePath, null, "foo", 1, false, false, true, 4 * 1024 * 1024, null),new InMemoryFrontEnd()); - Assert.assertTrue("Expected a file not found exception but no exception was thrown!", false); - } - catch (IllegalArgumentException e) { - // do nothing this is expected - } - - //target stream ends in '/' - try { - new DataLakeStoreUploader(new UploadParameters(_largeFilePath, "1/", "foo", 1, false, false, true, 4 * 1024 * 1024, null),new InMemoryFrontEnd()); - Assert.assertTrue("Expected exception for invalid target stream but no exception was thrown!", false); - } - catch (IllegalArgumentException e) { - // do nothing this is expected - } - - //no account name - try { - new DataLakeStoreUploader(new UploadParameters(_largeFilePath, "1", null, 1, false, false, true, 4 * 1024 * 1024, null),new InMemoryFrontEnd()); - Assert.assertTrue("Expected exception for null account name but no exception was thrown!", false); - } - catch (IllegalArgumentException e) { - // do nothing this is expected - } - - //bad thread count - try { - new DataLakeStoreUploader(new UploadParameters(_largeFilePath, "1", "foo", 0, false, false, true, 4 * 1024 * 1024, null),new InMemoryFrontEnd()); - Assert.assertTrue("Expected an exception for invalid thread count but no exception was thrown!", false); - } - catch (IllegalArgumentException e) { - // do nothing this is expected - } - - try { - new DataLakeStoreUploader(new UploadParameters(_largeFilePath, "1", "foo", DataLakeStoreUploader.MAX_ALLOWED_THREADS + 1, false, false, true, 4 * 1024 * 1024, null),new InMemoryFrontEnd()); - Assert.assertTrue("Expected an exception for invalid thread count but no exception was thrown!", false); - } - catch (IllegalArgumentException e) { - // do nothing this is expected - } - } - - /** - * Tests the case when the target stream exists and we haven't set the overwrite flag. - * - * @throws Exception - */ - @Test - public void DataLakeUploader_TargetExistsNoOverwrite() throws Exception { - InMemoryFrontEnd frontEnd = new InMemoryFrontEnd(); - frontEnd.createStream(TargetStreamPath, true, null, 0); - - //no resume, no overwrite - UploadParameters up = CreateParameters(false, false, _smallFilePath, true); - DataLakeStoreUploader uploader = new DataLakeStoreUploader(up, frontEnd); - try { - uploader.execute(); - Assert.assertTrue("Expected an exception for no overwrite when file exists but no exception was thrown!", false); - } - catch (OperationsException e) { - // expected - } - - //resume, no overwrite - up = CreateParameters(true, false, _smallFilePath, false); - uploader = new DataLakeStoreUploader(up, frontEnd); - try { - uploader.execute(); - Assert.assertTrue("Expected an exception for no overwrite when file exists but no exception was thrown!", false); - } - catch (OperationsException e) { - // expected - } - - //resume, overwrite - up = CreateParameters(true, true, _smallFilePath, false); - uploader = new DataLakeStoreUploader(up, frontEnd); - uploader.execute(); - - - //no resume, overwrite - up = CreateParameters(false, true, _smallFilePath, true); - uploader = new DataLakeStoreUploader(up, frontEnd); - uploader.execute(); - } - - /** - * Tests the case of a fresh upload with multiple segments.\ - * - * @throws Exception - */ - @Test - public void DataLakeUploader_FreshUpload() throws Exception { - InMemoryFrontEnd frontEnd = new InMemoryFrontEnd(); - UploadParameters up = CreateParameters(false, false, null, true); - DataLakeStoreUploader uploader = new DataLakeStoreUploader(up, frontEnd); - - uploader.execute(); - - VerifyFileUploadedSuccessfully(up, frontEnd); - } - - /** - * Tests the resume upload when the metadata indicates all files are uploaded but no files exist on the server. - * - * @throws Exception - */ - @Test - public void DataLakeUploader_ResumeUploadWithAllMissingFiles() throws Exception { - //this scenario is achieved by refusing to execute the concat command on the front end for the initial upload (which will interrupt it) - //and then resuming the upload against a fresh front-end (which obviously has no files there) - - InMemoryFrontEnd backingFrontEnd1 = new InMemoryFrontEnd(); - UploaderFrontEndMock frontEnd1 = new UploaderFrontEndMock(backingFrontEnd1, true, false); - - //attempt full upload - UploadParameters up = CreateParameters(false, false, null, true); - DataLakeStoreUploader uploader = new DataLakeStoreUploader(up, frontEnd1); - uploader.deleteMetadataFile(); - - try { - uploader.execute(); - Assert.assertTrue("Expected an intentional exception during concat but none was thrown!", false); - } - catch (IntentionalException e) { - // expected - } - - Assert.assertFalse("Target stream should not have been created", frontEnd1.streamExists(up.getTargetStreamPath())); - Assert.assertTrue("No temporary streams seem to have been created", 0 < backingFrontEnd1.getStreamCount()); - - //attempt to resume the upload - InMemoryFrontEnd frontEnd2 = new InMemoryFrontEnd(); - up = CreateParameters(true, false, null, false); - uploader = new DataLakeStoreUploader(up, frontEnd2); - - //at this point the metadata exists locally but there are no target files in frontEnd2 - try - { - uploader.execute(); - } - finally - { - uploader.deleteMetadataFile(); - } - - VerifyFileUploadedSuccessfully(up, frontEnd2); - } - - /** - * Tests the resume upload when only some segments were uploaded previously - * - * @throws Exception - */ - @Test - public void DataLakeUploader_ResumePartialUpload() throws Exception { - //attempt to load the file fully, but only allow creating 1 target stream - InMemoryFrontEnd backingFrontEnd = new InMemoryFrontEnd(); - UploaderFrontEndMock frontEnd = new UploaderFrontEndMock(backingFrontEnd, false, true); - - UploadParameters up = CreateParameters(false, false, null, true); - DataLakeStoreUploader uploader = new DataLakeStoreUploader(up, frontEnd); - uploader.deleteMetadataFile(); - - try { - uploader.execute(); - Assert.assertTrue("Expected an aggregate exception during upload due to failing out creating more than one stream but no exception was thrown!", false); - } - catch (AggregateUploadException e) { - // expected - } - - Assert.assertFalse("Target stream should not have been created", frontEnd.streamExists(up.getTargetStreamPath())); - Assert.assertEquals(1, backingFrontEnd.getStreamCount()); - - //resume the upload but point it to the real back-end, which doesn't throw exceptions - up = CreateParameters(true, false, null, false); - uploader = new DataLakeStoreUploader(up, backingFrontEnd); - - try - { - uploader.execute(); - } - finally - { - uploader.deleteMetadataFile(); - } - - VerifyFileUploadedSuccessfully(up, backingFrontEnd); - } - - /** - * Tests the upload case with only 1 segment (since that is an optimization of the broader case). - * - * @throws Exception - */ - @Test - public void DataLakeUploader_UploadSingleSegment() throws Exception { - InMemoryFrontEnd frontEnd = new InMemoryFrontEnd(); - File fileToFolder = File.createTempFile("adlsUploader", "segmentTest"); - fileToFolder.delete(); - fileToFolder.mkdirs(); - UploadParameters up = new UploadParameters( - _smallFilePath, - "1", - "foo", - ThreadCount, - false, - false, - true, - 4 * 1024 * 1024, - fileToFolder.getAbsolutePath()); - - FileOutputStream writer = new FileOutputStream(_smallFilePath); - writer.write(_smallFileData); - writer.flush(); - writer.close(); - - DataLakeStoreUploader uploader = new DataLakeStoreUploader(up, frontEnd); - uploader.execute(); - - VerifyFileUploadedSuccessfully(up, frontEnd, _smallFileData); - } - - /** - * Creates a parameter object. - * - * @param isResume Whether to resume. - * @param isOverwrite Whether to enable overwrite. - * @param filePath The file path. - * @param createNewFolder indicates that we should create a new folder location where the data should be placed. - * @return A {@link UploadParameters} object. - * @throws IOException - */ - private UploadParameters CreateParameters(boolean isResume, boolean isOverwrite, String filePath, boolean createNewFolder) throws IOException { - if (filePath == null) - { - filePath = _largeFilePath; - } - - File fileToFolder = File.createTempFile("adlsUploader", "metadata"); - if(createNewFolder) { - fileToFolder.delete(); - fileToFolder.mkdirs(); - curMetadataPath = fileToFolder.getAbsolutePath(); - } - - return new UploadParameters( - filePath, - "1", - "foo", - false, - ThreadCount, - isOverwrite, - isResume, - true, - 4 * 1024 * 1024, - curMetadataPath); - } - - /** - * Verifies the file was successfully uploaded. - * - * @param up The upload parameters. - * @param frontEnd The front end to use. - * @throws Exception - */ - private void VerifyFileUploadedSuccessfully(UploadParameters up, InMemoryFrontEnd frontEnd) throws Exception { - VerifyFileUploadedSuccessfully(up, frontEnd, _largeFileData); - } - - /** - * Verifies the file was successfully uploaded. - * @param up The upload parameters. - * @param frontEnd The front end to use. - * @param fileContents The file contents. - * @throws Exception - */ - private void VerifyFileUploadedSuccessfully(UploadParameters up, InMemoryFrontEnd frontEnd, byte[] fileContents) throws Exception { - Assert.assertTrue("Uploaded stream does not exist", frontEnd.streamExists(up.getTargetStreamPath())); - Assert.assertEquals(1, frontEnd.getStreamCount()); - Assert.assertEquals(fileContents.length, frontEnd.getStreamLength(up.getTargetStreamPath())); - - byte[] uploadedData = frontEnd.GetStreamContents(up.getTargetStreamPath()); - Assert.assertArrayEquals("Uploaded stream is not binary identical to input file", fileContents, uploadedData); - } -} diff --git a/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/InMemoryFrontEnd.java b/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/InMemoryFrontEnd.java deleted file mode 100644 index d41715203254..000000000000 --- a/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/InMemoryFrontEnd.java +++ /dev/null @@ -1,276 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - */ -package com.microsoft.azure.management.datalake.store.uploader; - -import com.microsoft.azure.CloudException; - -import java.util.Hashtable; -import java.util.LinkedList; - -/** - * Test front-end, fully in-memory. - */ -public class InMemoryFrontEnd implements FrontEndAdapter { - private Hashtable _streams = new Hashtable<>(); - - /** - * - * @param streamPath The relative path to the stream. - * @param overwrite Whether to overwrite an existing stream. - * @param data - * @param byteCount - * @Throws CloudException - */ - public void createStream(String streamPath, boolean overwrite, byte[] data, int byteCount) throws CloudException { - if (overwrite) - { - _streams.put(streamPath, new StreamData(streamPath)); - } - else - { - if (streamExists(streamPath)) - { - throw new CloudException("stream exists"); - } - - _streams.put(streamPath, new StreamData(streamPath)); - } - - // if there is data passed in, we should do the same operation as in append - if (data != null) - { - if (byteCount > data.length) - { - throw new CloudException("invalid byteCount"); - } - - StreamData stream = _streams.get(streamPath); - - //always make a copy of the original buffer since it is reused - byte[] toAppend = new byte[byteCount]; - System.arraycopy(data, 0, toAppend, 0, byteCount); - - stream.Append(toAppend); - } - } - - /** - * - * @param streamPath The relative path to the stream. - * @param recurse if set to true recursively delete. This is used for folder streams only. - * @Throws CloudException - */ - public void deleteStream(String streamPath, boolean recurse) throws CloudException { - if (!streamExists(streamPath)) - { - throw new CloudException("stream does not exist"); - } - _streams.remove(streamPath); - } - - /** - * - * @param streamPath The relative path to the stream. - * @param data An array of bytes to be appended to the stream. - * @param offset The offset at which to append to the stream. - * @param byteCount - * @Throws CloudException - */ - public void appendToStream(String streamPath, byte[] data, long offset, int byteCount) throws CloudException { - if (!streamExists(streamPath)) - { - throw new CloudException("stream does not exist"); - } - - if (byteCount > data.length) - { - throw new CloudException("invalid byteCount"); - } - - StreamData stream = _streams.get(streamPath); - if (stream.Length != offset) - { - throw new CloudException("offset != stream.length"); - } - - //always make a copy of the original buffer since it is reused - byte[] toAppend = new byte[byteCount]; - System.arraycopy(data, 0, toAppend, 0, byteCount); - - stream.Append(toAppend); - } - - /** - * - * @param streamPath The relative path to the stream. - * @return True or false if the stream exists - */ - public boolean streamExists(String streamPath) - { - return _streams.containsKey(streamPath); - } - - /** - * - * @param streamPath The relative path to the stream. - * @return - * @Throws CloudException - */ - public long getStreamLength(String streamPath) throws CloudException { - if (!streamExists(streamPath)) - { - throw new CloudException("stream does not exist"); - } - - return _streams.get(streamPath).Length; - } - - /** - * - * @param targetStreamPath The relative path to the target stream. - * @param inputStreamPaths An ordered array of paths to the input streams. - * @Throws CloudException - */ - public void concatenate(String targetStreamPath, String[] inputStreamPaths) throws CloudException { - if (streamExists(targetStreamPath)) - { - throw new CloudException("target stream exists"); - } - - final int bufferSize = 4 * 1024 * 1024; - byte[] buffer = new byte[bufferSize]; - - try - { - createStream(targetStreamPath, true, null, 0); - StreamData targetStream = _streams.get(targetStreamPath); - - for (String inputStreamPath: inputStreamPaths) - { - if (!streamExists(inputStreamPath)) - { - throw new CloudException("input stream does not exist"); - } - - StreamData stream = _streams.get(inputStreamPath); - for (byte[] chunk: stream.GetDataChunks()) - { - targetStream.Append(chunk); - } - } - } - catch (CloudException e) - { - if (streamExists(targetStreamPath)) - { - deleteStream(targetStreamPath, false); - } - throw e; - } - - for (String inputStreamPath: inputStreamPaths) - { - deleteStream(inputStreamPath, false); - } - } - - /** - * - * @param streamPath - * @return - * @Throws CloudException - */ - public Iterable GetAppendBlocks(String streamPath) throws CloudException { - if (!streamExists(streamPath)) - { - throw new CloudException("stream does not exist"); - } - - StreamData sd = _streams.get(streamPath); - return sd.GetDataChunks(); - } - - /** - * - * @param streamPath - * @return - * @Throws CloudException - */ - public byte[] GetStreamContents(String streamPath) throws CloudException { - if (!streamExists(streamPath)) - { - throw new CloudException("stream does not exist"); - } - - StreamData sd = _streams.get(streamPath); - - if (sd.Length > Integer.MAX_VALUE) - { - throw new OutOfMemoryError("Stream has too much data and cannot be fit into a single array"); - } - - byte[] result = new byte[(int)sd.Length]; - int position = 0; - for (byte[] chunk: sd.GetDataChunks()) - { - System.arraycopy(chunk, 0, result, position, chunk.length); - position += chunk.length; - } - - return result; - } - - /** - * Returns the number of "streams" that have been created by this adapter. - * - * @return the number of streams. - */ - public int getStreamCount() - { - return _streams.size(); - } - - /** - * Represents stream data for unit testing purposes. - */ - private class StreamData - { - private LinkedList _data; - - /** - * Initializes new stream data with the given name. - * @param name - */ - public StreamData(String name) - { - _data = new LinkedList(); - this.Name = name; - this.Length = 0; - } - - public String getName() { - return Name; - } - - public void setName(String name) { - Name = name; - } - - public String Name; - public long Length; - - public void Append(byte[] data) - { - _data.addLast(data); - this.Length += data.length; - } - - public Iterable GetDataChunks() - { - return _data; - } - } -} diff --git a/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/IntentionalException.java b/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/IntentionalException.java deleted file mode 100644 index f7b52cd4fd50..000000000000 --- a/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/IntentionalException.java +++ /dev/null @@ -1,13 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - */ -package com.microsoft.azure.management.datalake.store.uploader; - -import com.microsoft.azure.CloudException; - -/** - * An exception that we want our mocks to throw sometimes to test out various code paths. - */ -public class IntentionalException extends CloudException { } \ No newline at end of file diff --git a/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/MsuMockFrontEnd.java b/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/MsuMockFrontEnd.java deleted file mode 100644 index 2286692d55f3..000000000000 --- a/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/MsuMockFrontEnd.java +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - */ -package com.microsoft.azure.management.datalake.store.uploader; - -import com.microsoft.rest.RestException; -import org.junit.Assert; - -import java.io.IOException; - -/** - * A mocked front end for testing out the code paths of the {@link MultipleSegmentUploader} - */ -public class MsuMockFrontEnd implements FrontEndAdapter { - - private FrontEndAdapter BaseAdapter; - private boolean TestRetry; - private int FailCount; - private int CallCount; - - /** - * Constructor with base front end. - * @param baseAdapter The "real" front end to use for non-mocked calls - * @param testRetry Indicates if it should mock out retry logic. - * @param failCount Required if mocking retry logic, indicates the number of failures to allow. - */ - public MsuMockFrontEnd(FrontEndAdapter baseAdapter, boolean testRetry, int failCount) - { - TestRetry = testRetry; - BaseAdapter = baseAdapter; - FailCount = failCount; - CallCount = 0; - } - - public void createStream(String streamPath, boolean overwrite, byte[] data, int byteCount) throws RestException, IOException { - if (TestRetry) { - CallCount++; - if (CallCount <= FailCount) - { - throw new IntentionalException(); - } - } - - BaseAdapter.createStream(streamPath, overwrite, data, byteCount); - } - - public void deleteStream(String streamPath, boolean recurse) throws IOException, RestException { - BaseAdapter.deleteStream(streamPath, recurse); - } - - public void appendToStream(String streamPath, byte[] data, long offset, int byteCount) throws IOException, RestException { - if (TestRetry) { - CallCount++; - if (CallCount <= FailCount) - { - throw new IntentionalException(); - } - } - - BaseAdapter.appendToStream(streamPath, data, offset, byteCount); - } - - public boolean streamExists(String streamPath) throws IOException, RestException { - return BaseAdapter.streamExists(streamPath); - } - - public long getStreamLength(String streamPath) throws IOException, RestException { - return BaseAdapter.getStreamLength(streamPath); - } - - public void concatenate(String targetStreamPath, String[] inputStreamPaths) throws IOException, RestException { - Assert.assertTrue("concatenate should not be called when using 1 segment", false); - BaseAdapter.concatenate(targetStreamPath, inputStreamPaths); - } -} diff --git a/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/MultipleSegmentUploaderTests.java b/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/MultipleSegmentUploaderTests.java deleted file mode 100644 index ebf652f976ce..000000000000 --- a/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/MultipleSegmentUploaderTests.java +++ /dev/null @@ -1,305 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - */ -package com.microsoft.azure.management.datalake.store.uploader; - -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.text.MessageFormat; -import java.util.Random; - -/** - * Represents a class of unit tests targeting the {@link MultipleSegmentUploader} - */ -public class MultipleSegmentUploaderTests { - private static byte[] _smallFileContents = new byte[10 * 1024]; //10KB file - private static String _smallFilePath; - - @BeforeClass - public static void Setup() throws IOException { - _smallFilePath = GenerateFileData(_smallFileContents); - } - - private static String GenerateFileData(byte[] contents) throws IOException { - File tempFile = File.createTempFile("adlmsu", ".data"); - - Random rnd = new Random(0); - rnd.nextBytes(contents); - Assert.assertTrue("The temp file at the following path was not created: " + tempFile.getAbsolutePath(), tempFile.exists()); - - try (FileOutputStream stream = new FileOutputStream(tempFile)) { - stream.write(contents); - } - - return tempFile.getAbsolutePath(); - } - - @AfterClass - public static void Teardown() - { - File smallFile = new File(_smallFilePath); - if (smallFile.exists()) - { - smallFile.delete(); - } - } - - /** - * Tests an uneventful upload from scratch made of 1 segment. - * - * @throws Exception - */ - @Test - public void MultipleSegmentUploader_OneSegment() throws Exception { - InMemoryFrontEnd fe = new InMemoryFrontEnd(); - UploadMetadata metadata = CreateMetadata(1); - try - { - MultipleSegmentUploader msu = new MultipleSegmentUploader(metadata, 1, fe); - msu.setUseSegmentBlockBackOffRetryStrategy(false); - msu.upload(); - VerifyTargetStreamsAreComplete(metadata, fe); - } - finally - { - metadata.deleteFile(); - } - } - - /** - * Tests an uneventful upload from scratch made of several segments - * - * @throws Exception - */ - @Test - public void MultipleSegmentUploader_MultipleSegments() throws Exception - { - InMemoryFrontEnd fe = new InMemoryFrontEnd(); - UploadMetadata metadata = CreateMetadata(10); - try - { - MultipleSegmentUploader msu = new MultipleSegmentUploader(metadata, 1, fe); - msu.setUseSegmentBlockBackOffRetryStrategy(false); - msu.upload(); - VerifyTargetStreamsAreComplete(metadata, fe); - } - finally - { - metadata.deleteFile(); - } - } - - /** - * Tests an uneventful upload from scratch made of several segments - * - * @throws Exception - */ - @Test - public void MultipleSegmentUploader_MultipleSegmentsAndMultipleThreads() throws Exception - { - InMemoryFrontEnd fe = new InMemoryFrontEnd(); - UploadMetadata metadata = CreateMetadata(10); - int threadCount = metadata.getSegmentCount() * 10; //intentionally setting this higher than the # of segments - try - { - MultipleSegmentUploader msu = new MultipleSegmentUploader(metadata, threadCount, fe); - msu.setUseSegmentBlockBackOffRetryStrategy(false); - msu.upload(); - VerifyTargetStreamsAreComplete(metadata, fe); - } - finally - { - metadata.deleteFile(); - } - } - - /** - * Tests an uneventful upload from resume made of several segments - * - * @throws Exception - */ - @Test - public void MultipleSegmentUploader_ResumedUploadWithMultipleSegments() throws Exception - { - //the strategy here is to upload everything, then delete a set of the segments, and verify that a resume will pick up the slack - - InMemoryFrontEnd fe = new InMemoryFrontEnd(); - UploadMetadata metadata = CreateMetadata(10); - - try - { - MultipleSegmentUploader msu = new MultipleSegmentUploader(metadata, 1, fe); - msu.setUseSegmentBlockBackOffRetryStrategy(false); - msu.upload(); - VerifyTargetStreamsAreComplete(metadata, fe); - - //delete about 50% of segments - for (int i = 0; i < metadata.getSegmentCount(); i++) - { - UploadSegmentMetadata currentSegment = metadata.getSegments()[i]; - if (i % 2 == 0) - { - currentSegment.setStatus(SegmentUploadStatus.Pending); - fe.deleteStream(currentSegment.getPath(), false); - } - } - - //re-upload everything - msu = new MultipleSegmentUploader(metadata, 1, fe); - msu.upload(); - VerifyTargetStreamsAreComplete(metadata, fe); - } - finally - { - metadata.deleteFile(); - } - } - - /** - * Tests an upload made of several segments, where - * some fail a couple of times => upload can finish. - * some fail too many times => upload will not finish - * - * @throws Exception - */ - @Test - public void MultipleSegmentUploader_SegmentInstability() throws Exception - { - TestRetry(0); - TestRetry(1); - TestRetry(2); - TestRetry(3); - TestRetry(4); - TestRetry(5); - } - - private void TestRetry(int segmentFailCount) throws Exception - { - //we only have access to the underlying FrontEnd, so we need to simulate many exceptions in order to force a segment to fail the upload (multiply by SingleSegmentUploader.MaxBufferUploadAttemptAccount) - //this only works because we have a small file, which we know will fit in only one buffer (for a larger file, more complex operations are necessary) - int actualfailCount = segmentFailCount * SingleSegmentUploader.MAX_BUFFER_UPLOAD_ATTEMPT_COUNT; - boolean expectSuccess = segmentFailCount < MultipleSegmentUploader.MAX_UPLOAD_ATTEMPT_COUNT; - - int callCount = 0; - - //create a mock front end sitting on top of a working front end that simulates some erros for some time - InMemoryFrontEnd workingFrontEnd = new InMemoryFrontEnd(); - MsuMockFrontEnd fe = new MsuMockFrontEnd(workingFrontEnd, true, actualfailCount); - - UploadMetadata metadata = CreateMetadata(1); - try - { - MultipleSegmentUploader msu = new MultipleSegmentUploader(metadata, 1, fe); - msu.setUseSegmentBlockBackOffRetryStrategy(false); - - if (expectSuccess) - { - //the upload method should not throw any exceptions in this case - msu.upload(); - - //if we are expecting success, verify that both the metadata and the target streams are complete - VerifyTargetStreamsAreComplete(metadata, workingFrontEnd); - } - else - { - //the upload method should throw an aggregate exception in this case - try { - msu.upload(); - Assert.assertTrue("An aggregate upload exception was expected but no exception was thrown.", false); - } - catch (AggregateUploadException ex) { - // do nothing, expected - } - - //if we do not expect success, verify that at least 1 segment was marked as Failed - boolean foundFailedSegment = false; - for (UploadSegmentMetadata s: metadata.getSegments()) { - if(s.getStatus() == SegmentUploadStatus.Failed) { - foundFailedSegment = true; - break; - } - } - Assert.assertTrue("Could not find any failed segments", foundFailedSegment); - - //for every other segment, verify it was completed OK - for (UploadSegmentMetadata segment: metadata.getSegments()) - { - if( segment.getStatus() != SegmentUploadStatus.Failed) { - VerifyTargetStreamIsComplete(segment, metadata, workingFrontEnd); - } - } - } - } - finally - { - metadata.deleteFile(); - } - } - - private void VerifyTargetStreamsAreComplete(UploadMetadata metadata, InMemoryFrontEnd fe) throws Exception { - for (UploadSegmentMetadata segment: metadata.getSegments()) - { - VerifyTargetStreamIsComplete(segment, metadata, fe); - } - } - - private void VerifyTargetStreamIsComplete(UploadSegmentMetadata segmentMetadata, UploadMetadata metadata, InMemoryFrontEnd frontEnd) throws Exception { - Assert.assertEquals(SegmentUploadStatus.Complete, segmentMetadata.getStatus()); - Assert.assertTrue(MessageFormat.format("Segment {0} was not uploaded", segmentMetadata.getSegmentNumber()), frontEnd.streamExists(segmentMetadata.getPath())); - Assert.assertEquals(segmentMetadata.getLength(), frontEnd.getStreamLength(segmentMetadata.getPath())); - - byte[] actualContents = frontEnd.GetStreamContents(segmentMetadata.getPath()); - byte[] expectedContents = GetExpectedContents(segmentMetadata, metadata); - Assert.assertArrayEquals(MessageFormat.format("Segment {0} has unexpected contents", segmentMetadata.getSegmentNumber()), expectedContents, actualContents); - } - - - private byte[] GetExpectedContents(UploadSegmentMetadata segment, UploadMetadata metadata) - { - byte[] result = new byte[(int)segment.getLength()]; - System.arraycopy(_smallFileContents, (int) (segment.getSegmentNumber() * metadata.getSegmentLength()), result, 0, (int)segment.getLength()); - return result; - } - - private UploadMetadata CreateMetadata(int segmentCount) throws IOException { - File path = File.createTempFile("adlsmsumetadata", ".xml"); - UploadMetadata metadata = new UploadMetadata(); - - metadata.setMetadataFilePath(path.getAbsolutePath()); - metadata.setInputFilePath(_smallFilePath); - metadata.setFileLength(_smallFileContents.length); - metadata.setSegmentCount(segmentCount); - metadata.setSegmentLength(UploadSegmentMetadata.calculateSegmentLength(_smallFileContents.length, segmentCount)); - - metadata.setTargetStreamPath("abc"); - metadata.setUploadId("123"); - metadata.setBinary(true); - - UploadSegmentMetadata[] toSet = new UploadSegmentMetadata[segmentCount]; - long offset = 0; - for (int i = 0; i < segmentCount; i++) - { - long length = UploadSegmentMetadata.calculateSegmentLength(i, metadata); - toSet[i] = new UploadSegmentMetadata(); - - toSet[i].setSegmentNumber(i); - toSet[i].setOffset(offset); - toSet[i].setStatus(SegmentUploadStatus.Pending); - toSet[i].setLength(length); - toSet[i].setPath(MessageFormat.format("{0}.{1}.segment{2}", metadata.getTargetStreamPath(), metadata.getUploadId(), i)); - - offset += length; - } - - metadata.setSegments(toSet); - return metadata; - } -} diff --git a/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/PerformanceUploadTests.java b/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/PerformanceUploadTests.java deleted file mode 100644 index eb1bece357c3..000000000000 --- a/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/PerformanceUploadTests.java +++ /dev/null @@ -1,167 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - */ -package com.microsoft.azure.management.datalake.store.uploader; - -import com.google.common.base.Stopwatch; -import com.microsoft.azure.management.datalake.store.models.DataLakeStoreAccount; -import com.microsoft.azure.management.datalake.store.implementation.DataLakeStoreFileSystemManagementClientImpl; -import com.microsoft.azure.management.resources.implementation.ResourceGroupInner; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.File; -import java.io.FileOutputStream; -import java.io.RandomAccessFile; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.concurrent.TimeUnit; - -public class PerformanceUploadTests extends DataLakeUploaderTestBase { - - private static String rgName = generateName("javaadlsrg"); - private static String adlsAcct = generateName("javaadlsacct"); - - private static final String location = "East US 2"; - private static String destFolder = generateName("performanceTest"); - - private static final String Local10GbFileName = "C:\\data\\FixedBlockPerfData.txt"; // 10GB perf test binary file. - private static final String localLargeFileName = "C:\\data\\MicrosoftTelemetry.tsv"; // 2.5GB perf test binary file - private static final String localFileName = "C:\\data\\2mbfile.txt"; // 2mb perf test binary file - - @BeforeClass - public static void Setup() throws Exception { - createClients(); - - ResourceGroupInner group = new ResourceGroupInner(); - String location = "eastus2"; - group.withLocation(location); - resourceManagementClient.resourceGroups().createOrUpdate(rgName, group); - - // create storage and ADLS accounts, setting the accessKey - DataLakeStoreAccount adlsAccount = new DataLakeStoreAccount(); - adlsAccount.withLocation(location); - adlsAccount.withName(adlsAcct); - dataLakeStoreAccountManagementClient.accounts().create(rgName, adlsAcct, adlsAccount); - - File smallFile = new File(localFileName); - if (!smallFile.exists()) { - smallFile.createNewFile(); - try (FileOutputStream stream = new FileOutputStream(smallFile)) { - byte[] contents = new byte[4 * 1024 * 1024]; - Arrays.fill(contents, (byte) 'a'); - stream.write(contents); - } - } - - File largeFile = new File(localLargeFileName); - if (!largeFile.exists()) { - try (RandomAccessFile stream = new RandomAccessFile(largeFile, "rw")) { - stream.setLength((long)(2.5* 1024 * 1024 * 1024) - 10); // 2.5GB minus 10 bytes - byte[] content = new byte[10]; - Arrays.fill(content, (byte)'a'); - stream.write(content); - } - } - - File tenGBFile = new File(Local10GbFileName); - if (!tenGBFile.exists()) { - try (RandomAccessFile stream = new RandomAccessFile(tenGBFile, "rw")) { - stream.setLength((long)(10* 1024 * 1024 * 1024) - 10); // 10GB minus 10 bytes - byte[] content = new byte[10]; - Arrays.fill(content, (byte)'a'); - stream.write(content); - } - } - } - - @AfterClass - public static void cleanup() throws Exception { - try { - resourceManagementClient.resourceGroups().delete(rgName); - } - catch (Exception e) { - // ignore failures during cleanup, as it is best effort - } - } - - @Test - public void Test2mbFileUpload() throws Exception { - String folder = "begoldsm"; - ArrayList perfMetrics = new ArrayList(); - - // upload Rentrak data. - boolean force = true; //Set this to true if you want to overwrite existing data - System.out.println("Uploading 2mb data..."); - for (int i = 0; i < 10; ++i) { - String destLocation = destFolder + "/" + folder + "2mbFile.txt"; - Stopwatch watch = Stopwatch.createStarted(); - UploadFile(dataLakeStoreFileSystemManagementClient, adlsAcct, localFileName, destLocation, force); - watch.stop(); - long elapsedMs = watch.elapsed(TimeUnit.MILLISECONDS); - System.out.println("File Uploaded : " + i); - perfMetrics.add(elapsedMs); - } - - for( long perf: perfMetrics){ - System.out.println(perf); - } - } - - @Test - public void Test2_5gbFileUpload() throws Exception { - String folder = "begoldsm"; - ArrayList perfMetrics = new ArrayList(); - - // upload Rentrak data. - boolean force = true; //Set this to true if you want to overwrite existing data - System.out.println("Uploading 2.5GB data..."); - for (int i = 0; i < 5; ++i) { - String destLocation = destFolder + "/" + folder + "2_5gbFile.txt"; - Stopwatch watch = Stopwatch.createStarted(); - UploadFile(dataLakeStoreFileSystemManagementClient, adlsAcct, localLargeFileName, destLocation, force); - watch.stop(); - long elapsedMs = watch.elapsed(TimeUnit.MILLISECONDS); - System.out.println("File Uploaded : " + i); - perfMetrics.add(elapsedMs); - } - - for( long perf: perfMetrics){ - System.out.println(perf); - } - } - - @Test - public void Test10gbFileUpload() throws Exception { - String folder = "begoldsm"; - ArrayList perfMetrics = new ArrayList(); - - // upload Rentrak data. - boolean force = true; //Set this to true if you want to overwrite existing data - System.out.println("Uploading 10GB data..."); - for (int i = 0; i < 3; ++i) { - String destLocation = destFolder + "/" + folder + "10gbFile.txt"; - Stopwatch watch = Stopwatch.createStarted(); - UploadFile(dataLakeStoreFileSystemManagementClient, adlsAcct, Local10GbFileName, destLocation, force); - watch.stop(); - long elapsedMs = watch.elapsed(TimeUnit.MILLISECONDS); - System.out.println("File Uploaded : " + i); - perfMetrics.add(elapsedMs); - } - - for( long perf: perfMetrics){ - System.out.println(perf); - } - } - - public static boolean UploadFile(DataLakeStoreFileSystemManagementClientImpl dataLakeStoreFileSystemClient, String dlAccountName, String srcPath, String destPath, boolean force) throws Exception { - UploadParameters parameters = new UploadParameters(srcPath, destPath, dlAccountName, 40, force, false); - FrontEndAdapter frontend = new DataLakeStoreFrontEndAdapterImpl(dlAccountName, dataLakeStoreFileSystemClient); - DataLakeStoreUploader uploader = new DataLakeStoreUploader(parameters, frontend); - uploader.execute(); - return true; - } -} diff --git a/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/SingleSegmentUploaderTests.java b/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/SingleSegmentUploaderTests.java deleted file mode 100644 index 0a78383d0d34..000000000000 --- a/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/SingleSegmentUploaderTests.java +++ /dev/null @@ -1,296 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - */ -package com.microsoft.azure.management.datalake.store.uploader; - -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.File; -import java.io.IOException; - -public class SingleSegmentUploaderTests { - private static byte[] _smallFileContents = new byte[10 * 1024]; //10KB file - private static String _smallFilePath; - - private static byte[] _largeFileContents = new byte[10 * 1024 * 1024]; //10MB file - private static String _largeFilePath; - - private static byte[] _textFileContents = new byte[20 * 1024 * 1024]; //20MB file - private static String _textFilePath; - - private static byte[] _badTextFileContents = new byte[10 * 1024 * 1024]; //10MB file - private static String _badTextFilePath; - - private static final String StreamPath = "abc"; - - @BeforeClass - public static void Setup() throws IOException { - _smallFilePath = TestHelpers.GenerateFileData(_smallFileContents); - _largeFilePath = TestHelpers.GenerateFileData(_largeFileContents); - _textFilePath = TestHelpers.GenerateTextFileData(_textFileContents, 1, SingleSegmentUploader.BUFFER_LENGTH); - _badTextFilePath = TestHelpers.GenerateTextFileData(_badTextFileContents, SingleSegmentUploader.BUFFER_LENGTH + 1, SingleSegmentUploader.BUFFER_LENGTH + 2); - } - - @AfterClass - public static void Teardown() - { - File largeFile = new File(_largeFilePath); - File smallFile = new File(_smallFilePath); - File textFile = new File(_textFilePath); - File badFile = new File(_badTextFilePath); - if (largeFile.exists()) - { - largeFile.delete(); - } - - if (smallFile.exists()) - { - smallFile.delete(); - } - - if (textFile.exists()) - { - textFile.delete(); - } - - if (badFile.exists()) - { - badFile.delete(); - } - } - - /** - * Tests a simple upload consisting of a single block (the file is small enough to be uploaded without splitting into smaller buffers) - * - * @throws Exception - */ - @Test - public void SingleSegmentUploader_UploadSingleBlockStream() throws Exception { - InMemoryFrontEnd fe = new InMemoryFrontEnd(); - - UploadMetadata metadata = CreateMetadata(_smallFilePath, _smallFileContents.length); - SingleSegmentUploader ssu = new SingleSegmentUploader(0, metadata, fe); - ssu.setUseBackOffRetryStrategy(false); - ssu.upload(); - - byte[] actualContents = fe.GetStreamContents(StreamPath); - Assert.assertArrayEquals("Unexpected uploaded stream contents.", _smallFileContents, actualContents); - } - - /** - * Tests an uploading consisting of a larger file, which will need to be uploaded in sequential buffers. - * - * @throws Exception - */ - @Test - public void SingleSegmentUploader_UploadMultiBlockStream() throws Exception { - InMemoryFrontEnd fe = new InMemoryFrontEnd(); - - UploadMetadata metadata = CreateMetadata(_largeFilePath, _largeFileContents.length); - - SingleSegmentUploader ssu = new SingleSegmentUploader(0, metadata, fe); - ssu.setUseBackOffRetryStrategy(false); - ssu.upload(); - - byte[] actualContents = fe.GetStreamContents(StreamPath); - Assert.assertArrayEquals("Unexpected uploaded stream contents.", _largeFileContents, actualContents); - } - - /** - * Tests the case when only a part of the file is to be uploaded (i.e., all other cases feed in the entire file) - * - * @throws Exception - */ - @Test - public void SingleSegmentUploader_UploadFileRange() throws Exception { - int length = _smallFileContents.length / 3; - - InMemoryFrontEnd fe = new InMemoryFrontEnd(); - - UploadMetadata metadata = CreateMetadata(_smallFilePath, length); - - SingleSegmentUploader ssu = new SingleSegmentUploader(0, metadata, fe); - ssu.setUseBackOffRetryStrategy(false); - ssu.upload(); - - byte[] actualContents = fe.GetStreamContents(StreamPath); - byte[] expectedContents = new byte[length]; - System.arraycopy(_smallFileContents, 0, expectedContents, 0, length); - Assert.assertArrayEquals("Unexpected uploaded stream contents.", expectedContents, actualContents); - - } - - /** - * Tests the case when an existing stream with the same name already exists on the server. That stream needs to be fully replaced with the new data. - * - * @throws Exception - */ - @Test - public void SingleSegmentUploader_TargetStreamExists() throws Exception { - InMemoryFrontEnd fe = new InMemoryFrontEnd(); - - //load up an existing stream - fe.createStream(StreamPath, true, null, 0); - byte[] data = "random".getBytes(); - fe.appendToStream(StreamPath, data, 0, data.length); - - //force a re-upload of the stream - UploadMetadata metadata = CreateMetadata(_smallFilePath, _smallFileContents.length); - SingleSegmentUploader ssu = new SingleSegmentUploader(0, metadata, fe); - ssu.setUseBackOffRetryStrategy(false); - ssu.upload(); - - byte[] actualContents = fe.GetStreamContents(StreamPath); - Assert.assertArrayEquals("Unexpected uploaded stream contents.", _smallFileContents, actualContents); - } - - /** - * Tests the case when the upload did "succeed", but the server reports back a different stream length than expected. - * - * @throws Exception - */ - @Test - public void SingleSegmentUploader_VerifyUploadStreamFails() throws Exception { - //create a mock front end which doesn't do anything - SsuMockFrontEnd fe = new SsuMockFrontEnd(new InMemoryFrontEnd(), true, false , -1); - - //upload some data - UploadMetadata metadata = CreateMetadata(_smallFilePath, _smallFileContents.length); - SingleSegmentUploader ssu = new SingleSegmentUploader(0, metadata, fe); - ssu.setUseBackOffRetryStrategy(false); - - //the upload method should fail if it cannot verify that the stream was uploaded after the upload (i.e., it will get a length of 0 at the end) - try { - ssu.upload(); - Assert.assertTrue("the upload method should fail if it cannot verify that the stream was uploaded, but it succeeded!", false); - } - catch (UploadFailedException ex) { - // do nothing, expected - } - } - - /** - * Tests the case when the SingleSegmentUploader should upload a non-binary file (i.e., split on record boundaries). - * - * @throws Exception - */ - @Test - public void SingleSegmentUploader_UploadNonBinaryFile() throws Exception { - InMemoryFrontEnd fe = new InMemoryFrontEnd(); - - UploadMetadata metadata = CreateMetadata(_textFilePath, _textFileContents.length); - metadata.setBinary(false); - - SingleSegmentUploader ssu = new SingleSegmentUploader(0, metadata, fe); - ssu.setUseBackOffRetryStrategy(false); - ssu.upload(); - - //verify the entire file is identical to the source file - byte[] actualContents = fe.GetStreamContents(StreamPath); - Assert.assertArrayEquals("Unexpected uploaded stream contents.", _textFileContents, actualContents); - - //verify the append blocks start/end on record boundaries - Iterable appendBlocks = fe.GetAppendBlocks(StreamPath); - int lengthSoFar = 0; - for (byte[] append: appendBlocks) - { - lengthSoFar += append.length; - if (lengthSoFar < actualContents.length) - { - Assert.assertEquals('\n', (char)append[append.length - 1]); - } - } - } - - /** - * Tests the case when the SingleSegmentUploader tries upload a non-binary file (i.e., split on record boundaries), but at least one record is larger than the max allowed size. - * - * @throws Exception - */ - @Test - public void SingleSegmentUploader_UploadNonBinaryFileTooLargeRecord() throws Exception { - InMemoryFrontEnd fe = new InMemoryFrontEnd(); - - UploadMetadata metadata = CreateMetadata(_badTextFilePath, _badTextFileContents.length); - metadata.setBinary(false); - - SingleSegmentUploader ssu = new SingleSegmentUploader(0, metadata, fe); - ssu.setUseBackOffRetryStrategy(false); - - try { - ssu.upload(); - Assert.assertTrue("Should fail when a record is too large to fit within a single record boundary when splitting on boundaries, but didn't!", false); - } - catch (UploadFailedException ex) { - // do nothing, expected - } - } - - /** - * Tests various scenarios where the upload will fail repeatedly; verifies that the uploader will retry a certain number of times before finally giving up - * - * @throws Exception - */ - @Test - public void SingleSegmentUploader_RetryBlock() throws Exception { - TestRetryBlock(0); - TestRetryBlock(1); - TestRetryBlock(2); - TestRetryBlock(3); - TestRetryBlock(4); - TestRetryBlock(5); - } - - public void TestRetryBlock(int failCount) throws Exception { - boolean expectSuccess = failCount < SingleSegmentUploader.MAX_BUFFER_UPLOAD_ATTEMPT_COUNT; - - int callCount = 0; - - InMemoryFrontEnd workingFrontEnd = new InMemoryFrontEnd(); - SsuMockFrontEnd fe = new SsuMockFrontEnd(workingFrontEnd, false, true, failCount); - - UploadMetadata metadata = CreateMetadata(_smallFilePath, _smallFileContents.length); - - SingleSegmentUploader ssu = new SingleSegmentUploader(0, metadata, fe); - ssu.setUseBackOffRetryStrategy(false); - - if (expectSuccess) - { - ssu.upload(); - byte[] actualContents = workingFrontEnd.GetStreamContents(StreamPath); - Assert.assertArrayEquals("Unexpected uploaded stream contents.", _smallFileContents, actualContents); - } - else - { - try { - ssu.upload(); - Assert.assertTrue("upload should have failed due to too many retries but didn't!", false); - } - catch (Exception ex) { - Assert.assertTrue("Expected an intentional exception and got: " + ex, IntentionalException.class.isInstance(ex)); - } - } - } - - private UploadMetadata CreateMetadata(String filePath, long filelength) - { - UploadMetadata metadata = new UploadMetadata(); - metadata.setInputFilePath(filePath); - metadata.setFileLength(filelength); - metadata.setTargetStreamPath(StreamPath); - metadata.setSegmentCount(1); - metadata.setSegmentLength(UploadSegmentMetadata.calculateSegmentLength(filelength, 1)); - metadata.setBinary(true); - - UploadSegmentMetadata[] toSet = new UploadSegmentMetadata[1]; - toSet[0] = new UploadSegmentMetadata(0, metadata); - toSet[0].setPath(metadata.getTargetStreamPath()); - metadata.setSegments(toSet); - return metadata; - } -} diff --git a/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/SsuMockFrontEnd.java b/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/SsuMockFrontEnd.java deleted file mode 100644 index 0eea59395e07..000000000000 --- a/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/SsuMockFrontEnd.java +++ /dev/null @@ -1,98 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - */ -package com.microsoft.azure.management.datalake.store.uploader; - -import com.microsoft.rest.RestException; - -import java.io.IOException; - -/** - * Represents a mocked front end for testing the {@link SingleSegmentUploader} - */ -public class SsuMockFrontEnd implements FrontEndAdapter { - - private FrontEndAdapter BaseAdapter; - - private boolean DoNothing; - - private boolean TestRetry; - - private int CallCount; - - private int FailCount; - - /** - * Constructor with base front end. - * @param baseAdapter The base adapter to use for non-mocked methods - * @param doNothing If true, indicates that all methods should perform no actions and return default values. - * @param testRetry If true, indicates that method implementations should test for the retry code paths. Cannot be true if doNothing is true. - * @param failCount This is required when testRetry is true. It indicates the number of failures to allow for retries. - */ - public SsuMockFrontEnd(FrontEndAdapter baseAdapter, boolean doNothing, boolean testRetry, int failCount) - { - BaseAdapter = baseAdapter; - DoNothing = doNothing; - TestRetry = testRetry; - CallCount = 0; - FailCount = failCount; - } - - public void createStream(String streamPath, boolean overwrite, byte[] data, int byteCount) throws RestException, IOException { - if (!DoNothing && !TestRetry) { - BaseAdapter.createStream(streamPath, overwrite, data, byteCount); - } - else if(TestRetry) { - CallCount++; - if (CallCount <= FailCount) - { - throw new IntentionalException(); - } - BaseAdapter.createStream(streamPath, overwrite, data, byteCount); - } - } - - public void deleteStream(String streamPath, boolean recurse) throws RestException, IOException { - if (!DoNothing) { - BaseAdapter.deleteStream(streamPath, recurse); - } - } - - public void appendToStream(String streamPath, byte[] data, long offset, int byteCount) throws RestException, IOException { - if (!DoNothing && !TestRetry) { - BaseAdapter.appendToStream(streamPath, data, offset, byteCount); - } - else if(TestRetry) { - CallCount++; - if (CallCount <= FailCount) - { - throw new IntentionalException(); - } - BaseAdapter.appendToStream(streamPath, data, offset, byteCount); - } - } - - public boolean streamExists(String streamPath) throws RestException, IOException { - if (!DoNothing) { - return BaseAdapter.streamExists(streamPath); - } - - return true; - } - - public long getStreamLength(String streamPath) throws RestException, IOException { - if (!DoNothing) { - return BaseAdapter.getStreamLength(streamPath); - } - - return 0; - } - - public void concatenate(String targetStreamPath, String[] inputStreamPaths) throws RestException, IOException { - if (!DoNothing) { - BaseAdapter.concatenate(targetStreamPath, inputStreamPaths); - } - } -} diff --git a/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/StringExtensionsTests.java b/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/StringExtensionsTests.java deleted file mode 100644 index 5eb8bfe10281..000000000000 --- a/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/StringExtensionsTests.java +++ /dev/null @@ -1,299 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - */ -package com.microsoft.azure.management.datalake.store.uploader; - -import org.apache.commons.lang3.tuple.ImmutableTriple; -import org.apache.commons.lang3.tuple.Triple; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.nio.charset.StandardCharsets; -import java.util.ArrayList; - -public class StringExtensionsTests { - private static final String customDelim = ";"; - - private static ArrayList> TestDataUTF8 = new ArrayList<>(); - - private static ArrayList> TestDataUTF8CustomDelim = new ArrayList<>(); - - private static ArrayList> TestDataUTF16 = new ArrayList<>(); - - private static ArrayList> TestDataUTF16CustomDelim = new ArrayList<>(); - - private static ArrayList> TestDataUTF32 = new ArrayList<>(); - - private static ArrayList> TestDataUTF32CustomDelim = new ArrayList<>(); - - @BeforeClass - public static void setup() throws Exception { - - TestDataUTF8.add(new ImmutableTriple<>("", -1, -1)); - TestDataUTF8.add(new ImmutableTriple<>("a", -1, -1)); - TestDataUTF8.add(new ImmutableTriple<>("a b", -1, -1)); - TestDataUTF8.add(new ImmutableTriple<>("\r", 0, 0)); - TestDataUTF8.add(new ImmutableTriple<>("\n", 0, 0)); - TestDataUTF8.add(new ImmutableTriple<>("\r\n", 1, 1)); - TestDataUTF8.add(new ImmutableTriple<>("\n\r", 1, 1)); - TestDataUTF8.add(new ImmutableTriple<>("\r\nabcde", 1, 1)); - TestDataUTF8.add(new ImmutableTriple<>("abcde\r", 5, 5)); - TestDataUTF8.add(new ImmutableTriple<>("abcde\n", 5, 5)); - TestDataUTF8.add(new ImmutableTriple<>("abcde\r\n", 6, 6)); - TestDataUTF8.add(new ImmutableTriple<>("abcde\rabcde", 5, 5)); - TestDataUTF8.add(new ImmutableTriple<>("abcde\nabcde", 5, 5)); - TestDataUTF8.add(new ImmutableTriple<>("abcde\r\nabcde", 6, 6)); - TestDataUTF8.add(new ImmutableTriple<>("a\rb\na\r\n", 1, 6)); - TestDataUTF8.add(new ImmutableTriple<>("\rb\na\r\n", 0, 5)); - - TestDataUTF8CustomDelim.add(new ImmutableTriple<>("", -1, -1)); - TestDataUTF8CustomDelim.add(new ImmutableTriple<>("a", -1, -1)); - TestDataUTF8CustomDelim.add(new ImmutableTriple<>("a b", -1, -1)); - TestDataUTF8CustomDelim.add(new ImmutableTriple<>(";", 0, 0)); - TestDataUTF8CustomDelim.add(new ImmutableTriple<>("a;", 1, 1)); - TestDataUTF8CustomDelim.add(new ImmutableTriple<>("b;", 1, 1)); - TestDataUTF8CustomDelim.add(new ImmutableTriple<>("a;abcde", 1, 1)); - TestDataUTF8CustomDelim.add(new ImmutableTriple<>("abcde;", 5, 5)); - TestDataUTF8CustomDelim.add(new ImmutableTriple<>("abcde\r;", 6, 6)); - TestDataUTF8CustomDelim.add(new ImmutableTriple<>("abcde;abcde", 5, 5)); - TestDataUTF8CustomDelim.add(new ImmutableTriple<>("abcde;abcde", 5, 5)); - TestDataUTF8CustomDelim.add(new ImmutableTriple<>("abcde\r;abcde", 6, 6)); - TestDataUTF8CustomDelim.add(new ImmutableTriple<>("a;b\na\r;", 1, 6)); - TestDataUTF8CustomDelim.add(new ImmutableTriple<>(";b\na\r;", 0, 5)); - - TestDataUTF16.add(new ImmutableTriple("", -1, -1)); - TestDataUTF16.add(new ImmutableTriple("a", -1, -1)); - TestDataUTF16.add(new ImmutableTriple("a b", -1, -1)); - TestDataUTF16.add(new ImmutableTriple("\r", 1, 1)); - TestDataUTF16.add(new ImmutableTriple("\n", 1, 1)); - TestDataUTF16.add(new ImmutableTriple("\r\n", 3, 3)); - TestDataUTF16.add(new ImmutableTriple("\n\r", 3, 3)); - TestDataUTF16.add(new ImmutableTriple("\r\nabcde", 3, 3)); - TestDataUTF16.add(new ImmutableTriple("abcde\r", 11, 11)); - TestDataUTF16.add(new ImmutableTriple("abcde\n", 11, 11)); - TestDataUTF16.add(new ImmutableTriple("abcde\r\n", 13, 13)); - TestDataUTF16.add(new ImmutableTriple("abcde\rabcde", 11, 11)); - TestDataUTF16.add(new ImmutableTriple("abcde\nabcde", 11, 11)); - TestDataUTF16.add(new ImmutableTriple("abcde\r\nabcde", 13, 13)); - TestDataUTF16.add(new ImmutableTriple("a\rb\na\r\n", 3, 13)); - TestDataUTF16.add(new ImmutableTriple("\rb\na\r\n", 1, 11)); - - TestDataUTF16CustomDelim.add(new ImmutableTriple("", -1, -1)); - TestDataUTF16CustomDelim.add(new ImmutableTriple("a", -1, -1)); - TestDataUTF16CustomDelim.add(new ImmutableTriple("a b", -1, -1)); - TestDataUTF16CustomDelim.add(new ImmutableTriple(";", 1, 1)); - TestDataUTF16CustomDelim.add(new ImmutableTriple("a;", 3, 3)); - TestDataUTF16CustomDelim.add(new ImmutableTriple("b;", 3, 3)); - TestDataUTF16CustomDelim.add(new ImmutableTriple("a;abcde", 3, 3)); - TestDataUTF16CustomDelim.add(new ImmutableTriple("abcde;", 11, 11)); - TestDataUTF16CustomDelim.add(new ImmutableTriple("abcde\r;", 13, 13)); - TestDataUTF16CustomDelim.add(new ImmutableTriple("abcde;abcde", 11, 11)); - TestDataUTF16CustomDelim.add(new ImmutableTriple("abcde;abcde", 11, 11)); - TestDataUTF16CustomDelim.add(new ImmutableTriple("abcde\r;abcde", 13, 13)); - TestDataUTF16CustomDelim.add(new ImmutableTriple("a;b\na\r;", 3, 13)); - TestDataUTF16CustomDelim.add(new ImmutableTriple(";b\na\r;", 1, 11)); - - TestDataUTF32.add(new ImmutableTriple("", -1, -1)); - TestDataUTF32.add(new ImmutableTriple("a", -1, -1)); - TestDataUTF32.add(new ImmutableTriple("a b", -1, -1)); - TestDataUTF32.add(new ImmutableTriple("\r", 3, 3)); - TestDataUTF32.add(new ImmutableTriple("\n", 3, 3)); - TestDataUTF32.add(new ImmutableTriple("\r\n", 7, 7)); - TestDataUTF32.add(new ImmutableTriple("\n\r", 7, 7)); - TestDataUTF32.add(new ImmutableTriple("\r\nabcde", 7, 7)); - TestDataUTF32.add(new ImmutableTriple("abcde\r", 23, 23)); - TestDataUTF32.add(new ImmutableTriple("abcde\n", 23, 23)); - TestDataUTF32.add(new ImmutableTriple("abcde\r\n", 27, 27)); - TestDataUTF32.add(new ImmutableTriple("abcde\rabcde", 23, 23)); - TestDataUTF32.add(new ImmutableTriple("abcde\nabcde", 23, 23)); - TestDataUTF32.add(new ImmutableTriple("abcde\r\nabcde", 27, 27)); - TestDataUTF32.add(new ImmutableTriple("a\rb\na\r\n", 7, 27)); - TestDataUTF32.add(new ImmutableTriple("\rb\na\r\n", 3, 23)); - - TestDataUTF32CustomDelim.add(new ImmutableTriple("", -1, -1)); - TestDataUTF32CustomDelim.add(new ImmutableTriple("a", -1, -1)); - TestDataUTF32CustomDelim.add(new ImmutableTriple("a b", -1, -1)); - TestDataUTF32CustomDelim.add(new ImmutableTriple(";", 3, 3)); - TestDataUTF32CustomDelim.add(new ImmutableTriple("a;", 7, 7)); - TestDataUTF32CustomDelim.add(new ImmutableTriple("b;", 7, 7)); - TestDataUTF32CustomDelim.add(new ImmutableTriple("a;abcde", 7, 7)); - TestDataUTF32CustomDelim.add(new ImmutableTriple("abcde;", 23, 23)); - TestDataUTF32CustomDelim.add(new ImmutableTriple("abcde\r;", 27, 27)); - TestDataUTF32CustomDelim.add(new ImmutableTriple("abcde;abcde", 23, 23)); - TestDataUTF32CustomDelim.add(new ImmutableTriple("abcde;abcde", 23, 23)); - TestDataUTF32CustomDelim.add(new ImmutableTriple("abcde\r;abcde", 27, 27)); - TestDataUTF32CustomDelim.add(new ImmutableTriple("a;b\na\r;", 7, 27)); - TestDataUTF32CustomDelim.add(new ImmutableTriple(";b\na\r;", 3, 23)); - } - - @Test - public void StringExtensions_FindNewLine_UTF8() - { - for (Triple t: TestDataUTF8) - { - byte[] exactBuffer = t.getLeft().getBytes(StandardCharsets.UTF_8); - byte[] largerBuffer = new byte[exactBuffer.length + 100]; - System.arraycopy(exactBuffer, 0, largerBuffer, 0, exactBuffer.length); - - int forwardInExactBuffer = StringExtensions.findNewline(exactBuffer, 0, exactBuffer.length, false, StandardCharsets.UTF_8, null); - Assert.assertEquals(t.getMiddle().intValue(), forwardInExactBuffer); - - int forwardInLargeBuffer = StringExtensions.findNewline(largerBuffer, 0, exactBuffer.length, false, StandardCharsets.UTF_8, null); - Assert.assertEquals(t.getMiddle().intValue(), forwardInLargeBuffer); - - int reverseInExactBuffer = StringExtensions.findNewline(exactBuffer, Math.max(0, exactBuffer.length - 1), exactBuffer.length, true, StandardCharsets.UTF_8, null); - Assert.assertEquals(t.getRight().intValue(), reverseInExactBuffer); - - int reverseInLargeBuffer = StringExtensions.findNewline(largerBuffer, Math.max(0, exactBuffer.length - 1), exactBuffer.length, true, StandardCharsets.UTF_8, null); - Assert.assertEquals(t.getRight().intValue(), reverseInLargeBuffer); - } - - for (Triple t: TestDataUTF8CustomDelim) - { - byte[] exactBuffer = t.getLeft().getBytes(StandardCharsets.UTF_8); - byte[] largerBuffer = new byte[exactBuffer.length + 100]; - System.arraycopy(exactBuffer, 0, largerBuffer, 0, exactBuffer.length); - - int forwardInExactBuffer = StringExtensions.findNewline(exactBuffer, 0, exactBuffer.length, false, StandardCharsets.UTF_8, customDelim); - Assert.assertEquals(t.getMiddle().intValue(), forwardInExactBuffer); - - int forwardInLargeBuffer = StringExtensions.findNewline(largerBuffer, 0, exactBuffer.length, false, StandardCharsets.UTF_8, customDelim); - Assert.assertEquals(t.getMiddle().intValue(), forwardInLargeBuffer); - - int reverseInExactBuffer = StringExtensions.findNewline(exactBuffer, Math.max(0, exactBuffer.length - 1), exactBuffer.length, true, StandardCharsets.UTF_8, customDelim); - Assert.assertEquals(t.getRight().intValue(), reverseInExactBuffer); - - int reverseInLargeBuffer = StringExtensions.findNewline(largerBuffer, Math.max(0, exactBuffer.length - 1), exactBuffer.length, true, StandardCharsets.UTF_8, customDelim); - Assert.assertEquals(t.getRight().intValue(), reverseInLargeBuffer); - } - } - - @Test - public void StringExtensions_FindNewLine_UTF16() - { - for (Triple t: TestDataUTF16) - { - byte[] exactBuffer = t.getLeft().getBytes(StandardCharsets.UTF_16LE); - byte[] largerBuffer = new byte[exactBuffer.length + 100]; - System.arraycopy(exactBuffer, 0, largerBuffer, 0, exactBuffer.length); - - int forwardInExactBuffer = StringExtensions.findNewline(exactBuffer, 0, exactBuffer.length, false, StandardCharsets.UTF_16LE, null); - Assert.assertEquals(t.getMiddle().intValue(), forwardInExactBuffer); - - int forwardInLargeBuffer = StringExtensions.findNewline(largerBuffer, 0, exactBuffer.length, false, StandardCharsets.UTF_16LE, null); - Assert.assertEquals(t.getMiddle().intValue(), forwardInLargeBuffer); - - int reverseInExactBuffer = StringExtensions.findNewline(exactBuffer, Math.max(0, exactBuffer.length - 1), exactBuffer.length, true, StandardCharsets.UTF_16LE, null); - Assert.assertEquals(t.getRight().intValue(), reverseInExactBuffer); - - int reverseInLargeBuffer = StringExtensions.findNewline(largerBuffer, Math.max(0, exactBuffer.length - 1), exactBuffer.length, true, StandardCharsets.UTF_16LE, null); - Assert.assertEquals(t.getRight().intValue(), reverseInLargeBuffer); - } - - for (Triple t: TestDataUTF16CustomDelim) - { - byte[] exactBuffer = t.getLeft().getBytes(StandardCharsets.UTF_16LE); - byte[] largerBuffer = new byte[exactBuffer.length + 100]; - System.arraycopy(exactBuffer, 0, largerBuffer, 0, exactBuffer.length); - - int forwardInExactBuffer = StringExtensions.findNewline(exactBuffer, 0, exactBuffer.length, false, StandardCharsets.UTF_16LE, customDelim); - Assert.assertEquals(t.getMiddle().intValue(), forwardInExactBuffer); - - int forwardInLargeBuffer = StringExtensions.findNewline(largerBuffer, 0, exactBuffer.length, false, StandardCharsets.UTF_16LE, customDelim); - Assert.assertEquals(t.getMiddle().intValue(), forwardInLargeBuffer); - - int reverseInExactBuffer = StringExtensions.findNewline(exactBuffer, Math.max(0, exactBuffer.length - 1), exactBuffer.length, true, StandardCharsets.UTF_16LE, customDelim); - Assert.assertEquals(t.getRight().intValue(), reverseInExactBuffer); - - int reverseInLargeBuffer = StringExtensions.findNewline(largerBuffer, Math.max(0, exactBuffer.length - 1), exactBuffer.length, true, StandardCharsets.UTF_16LE, customDelim); - Assert.assertEquals(t.getRight().intValue(), reverseInLargeBuffer); - } - } - - @Test - public void StringExtensions_FindNewLine_UTF16BigEndian() - { - for (Triple t: TestDataUTF16) - { - byte[] exactBuffer = t.getLeft().getBytes(StandardCharsets.UTF_16BE); - byte[] largerBuffer = new byte[exactBuffer.length + 100]; - System.arraycopy(exactBuffer, 0, largerBuffer, 0, exactBuffer.length); - - int forwardInExactBuffer = StringExtensions.findNewline(exactBuffer, 0, exactBuffer.length, false, StandardCharsets.UTF_16BE, null); - Assert.assertEquals(t.getMiddle().intValue(), forwardInExactBuffer); - - int forwardInLargeBuffer = StringExtensions.findNewline(largerBuffer, 0, exactBuffer.length, false, StandardCharsets.UTF_16BE, null); - Assert.assertEquals(t.getMiddle().intValue(), forwardInLargeBuffer); - - int reverseInExactBuffer = StringExtensions.findNewline(exactBuffer, Math.max(0, exactBuffer.length - 1), exactBuffer.length, true, StandardCharsets.UTF_16BE, null); - Assert.assertEquals(t.getRight().intValue(), reverseInExactBuffer); - - int reverseInLargeBuffer = StringExtensions.findNewline(largerBuffer, Math.max(0, exactBuffer.length - 1), exactBuffer.length, true, StandardCharsets.UTF_16BE, null); - Assert.assertEquals(t.getRight().intValue(), reverseInLargeBuffer); - } - - for (Triple t: TestDataUTF16CustomDelim) - { - byte[] exactBuffer = t.getLeft().getBytes(StandardCharsets.UTF_16BE); - byte[] largerBuffer = new byte[exactBuffer.length + 100]; - System.arraycopy(exactBuffer, 0, largerBuffer, 0, exactBuffer.length); - - int forwardInExactBuffer = StringExtensions.findNewline(exactBuffer, 0, exactBuffer.length, false, StandardCharsets.UTF_16BE, customDelim); - Assert.assertEquals(t.getMiddle().intValue(), forwardInExactBuffer); - - int forwardInLargeBuffer = StringExtensions.findNewline(largerBuffer, 0, exactBuffer.length, false, StandardCharsets.UTF_16BE, customDelim); - Assert.assertEquals(t.getMiddle().intValue(), forwardInLargeBuffer); - - int reverseInExactBuffer = StringExtensions.findNewline(exactBuffer, Math.max(0, exactBuffer.length - 1), exactBuffer.length, true, StandardCharsets.UTF_16BE, customDelim); - Assert.assertEquals(t.getRight().intValue(), reverseInExactBuffer); - - int reverseInLargeBuffer = StringExtensions.findNewline(largerBuffer, Math.max(0, exactBuffer.length - 1), exactBuffer.length, true, StandardCharsets.UTF_16BE, customDelim); - Assert.assertEquals(t.getRight().intValue(), reverseInLargeBuffer); - } - } - - @Test - public void StringExtensions_FindNewLine_ASCII() - { - for (Triple t: TestDataUTF8) - { - byte[] exactBuffer = t.getLeft().getBytes(StandardCharsets.US_ASCII); - byte[] largerBuffer = new byte[exactBuffer.length + 100]; - System.arraycopy(exactBuffer, 0, largerBuffer, 0, exactBuffer.length); - - int forwardInExactBuffer = StringExtensions.findNewline(exactBuffer, 0, exactBuffer.length, false, StandardCharsets.US_ASCII, null); - Assert.assertEquals(t.getMiddle().intValue(), forwardInExactBuffer); - - int forwardInLargeBuffer = StringExtensions.findNewline(largerBuffer, 0, exactBuffer.length, false, StandardCharsets.US_ASCII, null); - Assert.assertEquals(t.getMiddle().intValue(), forwardInLargeBuffer); - - int reverseInExactBuffer = StringExtensions.findNewline(exactBuffer, Math.max(0, exactBuffer.length - 1), exactBuffer.length, true, StandardCharsets.US_ASCII, null); - Assert.assertEquals(t.getRight().intValue(), reverseInExactBuffer); - - int reverseInLargeBuffer = StringExtensions.findNewline(largerBuffer, Math.max(0, exactBuffer.length - 1), exactBuffer.length, true, StandardCharsets.US_ASCII, null); - Assert.assertEquals(t.getRight().intValue(), reverseInLargeBuffer); - } - - for (Triple t: TestDataUTF8CustomDelim) - { - byte[] exactBuffer = t.getLeft().getBytes(StandardCharsets.US_ASCII); - byte[] largerBuffer = new byte[exactBuffer.length + 100]; - System.arraycopy(exactBuffer, 0, largerBuffer, 0, exactBuffer.length); - - int forwardInExactBuffer = StringExtensions.findNewline(exactBuffer, 0, exactBuffer.length, false, StandardCharsets.US_ASCII, customDelim); - Assert.assertEquals(t.getMiddle().intValue(), forwardInExactBuffer); - - int forwardInLargeBuffer = StringExtensions.findNewline(largerBuffer, 0, exactBuffer.length, false, StandardCharsets.US_ASCII, customDelim); - Assert.assertEquals(t.getMiddle().intValue(), forwardInLargeBuffer); - - int reverseInExactBuffer = StringExtensions.findNewline(exactBuffer, Math.max(0, exactBuffer.length - 1), exactBuffer.length, true, StandardCharsets.US_ASCII, customDelim); - Assert.assertEquals(t.getRight().intValue(), reverseInExactBuffer); - - int reverseInLargeBuffer = StringExtensions.findNewline(largerBuffer, Math.max(0, exactBuffer.length - 1), exactBuffer.length, true, StandardCharsets.US_ASCII, customDelim); - Assert.assertEquals(t.getRight().intValue(), reverseInLargeBuffer); - } - } -} diff --git a/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/TestHelpers.java b/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/TestHelpers.java deleted file mode 100644 index c7a8b0bab25f..000000000000 --- a/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/TestHelpers.java +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - */ -package com.microsoft.azure.management.datalake.store.uploader; - -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.util.Random; - -public class TestHelpers { - /** - * Generates some random data and writes it out to a temp file and to an in-memory array - * - * @param contents The array to write random data to (the length of this array will be the size of the file). - * @return The path of the file that will be created. - * @throws IOException - */ - static String GenerateFileData(byte[] contents) throws IOException { - File filePath = File.createTempFile("adlUploader", "test.data"); - - Random rnd = new Random(0); - rnd.nextBytes(contents); - if (filePath.exists()) - { - filePath.delete(); - } - - FileOutputStream writer = new FileOutputStream(filePath); - writer.write(contents); - writer.flush(); - writer.close(); - return filePath.toString(); - } - - /** - * Generates some random data and writes it out to a temp file and to an in-memory array - * - * @param contents The array to write random data to (the length of this array will be the size of the file). - * @param minRecordLength The minimum amount of data to write (inclusive) - * @param maxRecordLength The maximum amount of data to write (exclusive) - * @return The path of the file that will be created. - * @throws IOException - */ - static String GenerateTextFileData(byte[] contents, int minRecordLength, int maxRecordLength) throws IOException { - File filePath = File.createTempFile("adlUploader", "test.data"); - int offset = 0; - while (offset < contents.length) - { - int recordLength = minRecordLength + (int)(Math.random()*((maxRecordLength - minRecordLength) + 1)); - recordLength = Math.min(recordLength, contents.length - offset - 2); - - int recordEndPos = offset + recordLength; - while (offset < recordEndPos) - { - contents[offset] = (byte)((int)'a' + (int)(Math.random()*(((int)'z' - (int)'a') + 1))); - offset++; - } - contents[offset++] = (byte)'\r'; - contents[offset++] = (byte)'\n'; - } - if (filePath.exists()) - { - filePath.delete(); - } - - FileOutputStream writer = new FileOutputStream(filePath); - writer.write(contents); - writer.flush(); - writer.close(); - return filePath.toString(); - } -} diff --git a/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/UploadMetadataGeneratorTests.java b/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/UploadMetadataGeneratorTests.java deleted file mode 100644 index e06d56a22a59..000000000000 --- a/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/UploadMetadataGeneratorTests.java +++ /dev/null @@ -1,183 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - */ -package com.microsoft.azure.management.datalake.store.uploader; - -import com.google.common.io.CountingOutputStream; -import org.apache.commons.io.FileUtils; -import org.junit.Assert; -import org.junit.Test; - -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.RandomAccessFile; -import java.text.MessageFormat; -import java.util.Arrays; -import java.util.List; -import java.util.Random; - -/** - * Unit tests that target the {@link UploadMetadataGenerator} class - */ -public class UploadMetadataGeneratorTests { - private static final int MaxAppendLength = 4 * 1024 * 1024; - private static final byte[] NewLine = "\r\n".getBytes(); - private static final List FileLengthsMB = Arrays.asList(2, 4, 10, 14.123456, 20.123456, 23.456789, 30.987654, 37.897643, 50.546213, 53.456789, 123.456789 ); - - @Test - public void UploadMetadataGenerator_AlignSegmentsToRecordBoundaries() throws IOException, UploadFailedException, InvalidMetadataException { - //We keep creating a file, by appending a number of bytes to it (taken from FileLengthsInMB). - //At each iteration, we append a new blob of data, and then run the whole test on the entire file - Random rnd = new Random(0); - File folderPath = new File(MessageFormat.format("{0}\\uploadtest", new File(".").getAbsoluteFile())); - File filePath = new File(folderPath, "verifymetadata.txt"); - try - { - if (!folderPath.exists()) - { - folderPath.mkdirs(); - } - - if (filePath.exists()) - { - filePath.delete(); - } - - for (Number lengthMB: FileLengthsMB) - { - int appendLength = (int)(lengthMB.doubleValue()*1024*1024); - AppendToFile(filePath.getAbsolutePath(), appendLength, rnd, 0, MaxAppendLength); - String metadataFilePath = filePath + ".metadata.txt"; - - UploadParameters up = new UploadParameters(filePath.getAbsolutePath(), filePath.getAbsolutePath(), null, 1, false, false, false, 4*1024*1024, null); - UploadMetadataGenerator mg = new UploadMetadataGenerator(up, MaxAppendLength); - UploadMetadata metadata = mg.createNewMetadata(metadataFilePath); - - VerifySegmentsAreOnRecordBoundaries(metadata, filePath.getAbsolutePath()); - } - } - finally - { - if (folderPath.exists()) - { - FileUtils.deleteQuietly(folderPath); - } - } - } - - @Test - public void UploadMetadataGenerator_AlignSegmentsToRecordBoundariesTooLargeRecord() throws IOException { - //We keep creating a file, by appending a number of bytes to it (taken from FileLengthsInMB). - //At each iteration, we append a new blob of data, and then run the whole test on the entire file - Random rnd = new Random(0); - File folderPath = new File(MessageFormat.format("{0}\\uploadtest", new File(".").getAbsolutePath())); - File filePath = new File(folderPath, "verifymetadata.txt"); - try - { - if (!folderPath.exists()) - { - folderPath.mkdirs(); - } - - if (filePath.exists()) - { - filePath.delete(); - } - for (Number lengthMB: FileLengthsMB) - { - if(lengthMB.intValue() > MaxAppendLength) { - int length = lengthMB.intValue() * 1024 * 1024; - AppendToFile(filePath.getAbsolutePath(), length, rnd, MaxAppendLength + 1, MaxAppendLength + 10); - String metadataFilePath = filePath + ".metadata.txt"; - - UploadParameters up = new UploadParameters(filePath.getAbsolutePath(), filePath.getAbsolutePath(), null, 1, false, false, false, 4 * 1024 * 1024, null); - UploadMetadataGenerator mg = new UploadMetadataGenerator(up, MaxAppendLength); - - try { - mg.createNewMetadata(metadataFilePath); - Assert.assertTrue("Method createNewMetadata should fail due to record boundaries being being too large for the record, but didn't", false); - } - catch(Exception e) { - // do nothing, expected - } - } - } - } - finally - { - if (folderPath.exists()) - { - FileUtils.deleteQuietly(folderPath); - } - } - } - - private void VerifySegmentsAreOnRecordBoundaries(UploadMetadata metadata, String filePath) throws IOException { - try(RandomAccessFile stream = new RandomAccessFile(filePath, "r")) - { - for (UploadSegmentMetadata segment: metadata.getSegments()) - { - if (segment.getSegmentNumber() > 0) - { - //verify that each segment starts with a non-newline and that the 2 previous characters before that offset are newline characters - - //2 characters behind: newline - // always seek from the file origin - stream.seek(0); - stream.seek(segment.getOffset() - 2); - char c1 = (char)stream.read(); - Assert.assertTrue(MessageFormat.format("Expecting a newline at offset {0}", segment.getOffset() - 2), IsNewline(c1)); - - //1 character behind: newline - char c2 = (char)stream.read(); - Assert.assertTrue(MessageFormat.format("Expecting a newline at offset {0}", segment.getOffset() - 2), IsNewline(c2)); - - //by test design, we never have two consecutive newlines that are the same; we'd always have \r\n, but never \r\r or \r\n - char c3 = (char)stream.read(); - Assert.assertNotEquals(c2, c3); - } - } - } - } - - private boolean IsNewline(char c) - { - return c == '\r' || c == '\n'; - } - - private String AppendToFile(String filePath, int length, Random random, int minRecordLength, int maxRecordLength) throws IOException { - try (CountingOutputStream stream = new CountingOutputStream(new FileOutputStream(filePath))) - { - int newLength = (int) (new File(filePath).length() + length); - while (true) - { - int recordLength = minRecordLength + random.nextInt(maxRecordLength - minRecordLength); - if (stream.getCount() + recordLength + NewLine.length > newLength) - { - recordLength = newLength - NewLine.length - (int)stream.getCount(); - if (recordLength < 0) - { - stream.write(NewLine, 0, NewLine.length); - break; - } - } - WriteRecord(stream, recordLength); - stream.write(NewLine, 0, NewLine.length); - } - } - - return filePath; - } - - private void WriteRecord(CountingOutputStream stream, int count) throws IOException { - byte[] record = new byte[count]; - for (int i = 0; i < count; i++) - { - record[i] = (byte)('a' + i % 25); - } - stream.write(record, 0, record.length); - } -} diff --git a/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/UploadSegmentMetadataTests.java b/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/UploadSegmentMetadataTests.java deleted file mode 100644 index c17f05479f56..000000000000 --- a/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/UploadSegmentMetadataTests.java +++ /dev/null @@ -1,176 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - */ -package com.microsoft.azure.management.datalake.store.uploader; - -import org.junit.Assert; -import org.junit.Test; - -/** - * Unit tests that target the {@link UploadSegmentMetadata} class. - */ -public class UploadSegmentMetadataTests { - /** - * Tests that segment count calculation works (it's hard to verify correctness without having access to the data that the base class has, - * so we'll just check the boundary conditions, that it's monotonically increasing and that it doesn't throw exceptions for various inputs. - */ - @Test - public void UploadMetadata_CalculateSegmentCount() - { - try { - UploadSegmentMetadata.calculateSegmentCount(-1); - Assert.assertTrue("calculateSegmentCount should have failed for invalid count but it succeeded!", false); - } - catch (IllegalArgumentException ex) { - // do nothing, this is expected - } - - - Assert.assertEquals(0, UploadSegmentMetadata.calculateSegmentCount(0)); - - long maxLength = 100 * (long)Math.pow(2, 40);//100 TB - long increment = 10 * (long)Math.pow(2, 30); //10GB - int lastValue = 0; - for (long length = (long)Math.pow(2, 20); length < maxLength; length += increment) - { - int value = UploadSegmentMetadata.calculateSegmentCount(length); - Assert.assertTrue("Function is not monotonically increasing", lastValue <= value); - lastValue = value; - } - } - - /** - * Tests the correct calculation for a typical segment length. - */ - @Test - public void UploadSegmentMetadata_CalculateTypicalSegmentLength() - { - try { - UploadSegmentMetadata.calculateSegmentLength(1000, -1); - Assert.assertTrue("calculateSegmentLength should have failed for invalid length but it succeeded!", false); - } - catch (IllegalArgumentException ex) { - // do nothing, expected - } - - int maxSegmentCount = 16536; - long fileLength = (long)Math.pow(2, 30); // see comment below about actually making this larger than Int32.MaxValue - long segmentLength; - - for (int segmentCount = 1; segmentCount < maxSegmentCount; segmentCount++) - { - segmentLength = UploadSegmentMetadata.calculateSegmentLength(fileLength, segmentCount); - - //the next two asserts verify that the value calculated will split the input file into a balanced set of segments; - //all the segments should have the same length, except the last one which may have less than that (but never more). - //a quick heuristic to verify this is: (SegmentLength-1)*SegmentCount < FileLength <= SegmentLength*SegmentCount - Assert.assertTrue("SegmentLength * SegmentCount must be at least the length of the input file", segmentLength * segmentCount >= fileLength); - Assert.assertTrue("(SegmentLength - 1) * SegmentCount must be smaller than the length of the input file", (segmentLength - 1) * segmentCount < fileLength); - } - - // test segmentCount == fileLength; - segmentLength = UploadSegmentMetadata.calculateSegmentLength(fileLength, (int)fileLength); //for this to work, FileLength must be less than In32.MaxValue - Assert.assertEquals(1, segmentLength); - - // test that if segment count = 0 then the return value is 0. - Assert.assertEquals( - 0, - UploadSegmentMetadata.calculateSegmentLength(fileLength, 0)); - } - - /** - * Tests the correct calculation for a particular segment length (ending vs non-ending). - */ - @Test - public void UploadSegmentMetadata_CalculateParticularSegmentLength() - { - - UploadMetadata lengthOf10 = new UploadMetadata(); - lengthOf10.setFileLength(10); - lengthOf10.setSegmentCount(5); - lengthOf10.setSegmentLength(2); - - UploadMetadata lengthOfNegative10 = new UploadMetadata(); - lengthOfNegative10.setFileLength(-10); - lengthOfNegative10.setSegmentCount(5); - lengthOfNegative10.setSegmentLength(2); - - UploadMetadata lengthOf100 = new UploadMetadata(); - lengthOf100.setFileLength(100); - lengthOf100.setSegmentCount(2); - lengthOf100.setSegmentLength(2); - - UploadMetadata lengthOf100SegmentCount5 = new UploadMetadata(); - lengthOf100SegmentCount5.setFileLength(100); - lengthOf100SegmentCount5.setSegmentCount(5); - lengthOf100SegmentCount5.setSegmentLength(26); - //verify bad inputs - try { - UploadSegmentMetadata.calculateSegmentLength(-1, lengthOf10); - Assert.assertTrue("calculateSegmentLength should have failed for invalid segment number but it succeeded!", false); - } - catch (IndexOutOfBoundsException ex) { - // do nothing, expected - } - - try { - UploadSegmentMetadata.calculateSegmentLength(100, lengthOf10); - Assert.assertTrue("calculateSegmentLength should have failed for invalid segment number but it succeeded!", false); - } - catch (IndexOutOfBoundsException ex) { - // do nothing, expected - } - - try { - UploadSegmentMetadata.calculateSegmentLength(1, lengthOfNegative10); - Assert.assertTrue("calculateSegmentLength should have failed for invalid segment number but it succeeded!", false); - } - catch (IllegalArgumentException ex) { - // do nothing, expected - } - - try { - UploadSegmentMetadata.calculateSegmentLength(1, lengthOf100); - Assert.assertTrue("calculateSegmentLength should have failed for invalid segment number but it succeeded!", false); - } - catch (IllegalArgumentException ex) { - // do nothing, expected - } - - try { - UploadSegmentMetadata.calculateSegmentLength(1, lengthOf100SegmentCount5); - Assert.assertTrue("calculateSegmentLength should have failed for invalid segment number but it succeeded!", false); - } - catch (IllegalArgumentException ex) { - // do nothing, expected - } - - //test various scenarios with a fixed file length, and varying the segment count from 1 to the FileLength - - int FileLength = 16 * (int)Math.pow(2, 20);//16MB - - for (int segmentCount = 1; segmentCount <= FileLength; segmentCount += 1024) - { - long typicalSegmentLength = UploadSegmentMetadata.calculateSegmentLength(FileLength, segmentCount); - - UploadMetadata uploadMetadata = new UploadMetadata(); - uploadMetadata.setFileLength(FileLength); - uploadMetadata.setSegmentCount(segmentCount); - uploadMetadata.setSegmentLength(typicalSegmentLength); - - long firstSegmentLength = UploadSegmentMetadata.calculateSegmentLength(0, uploadMetadata); - long lastSegmentLength = UploadSegmentMetadata.calculateSegmentLength(segmentCount - 1, uploadMetadata); - - Assert.assertEquals(typicalSegmentLength, firstSegmentLength); - if (segmentCount == 1) - { - Assert.assertEquals(firstSegmentLength, lastSegmentLength); - } - - long reconstructedFileLength = typicalSegmentLength * (segmentCount - 1) + lastSegmentLength; - Assert.assertEquals(FileLength, reconstructedFileLength); - } - } -} diff --git a/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/UploaderFrontEndMock.java b/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/UploaderFrontEndMock.java deleted file mode 100644 index 3e42cf307518..000000000000 --- a/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/UploaderFrontEndMock.java +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - */ -package com.microsoft.azure.management.datalake.store.uploader; - -import com.microsoft.rest.RestException; -import org.junit.Assert; - -import java.io.IOException; - -/** - * A front end mock used for unit testing {@link DataLakeStoreUploader} - */ -public class UploaderFrontEndMock implements FrontEndAdapter { - - private FrontEndAdapter BaseAdapter; - private int createStreamCount; - private boolean ThrowInConcat; - private boolean ThrowInCreate; - - /** - * Constructor with base front end. - * @param baseAdapter The real front end to use when methods are not mocked. - * @param throwInConcat If true, indicates that concatenation implementation should throw instead of doing work. - * @param throwInCreate If true, indicates that the create implementation should throw instead of doing work. - */ - public UploaderFrontEndMock(FrontEndAdapter baseAdapter, boolean throwInConcat, boolean throwInCreate) - { - createStreamCount = 0; - ThrowInConcat = throwInConcat; - ThrowInCreate = throwInCreate; - BaseAdapter = baseAdapter; - } - - public void createStream(String streamPath, boolean overwrite, byte[] data, int byteCount) throws RestException, IOException { - - if(ThrowInCreate) { - createStreamCount++; - if (createStreamCount > 1) { - //we only allow 1 file to be created - throw new IntentionalException(); - } - } - - BaseAdapter.createStream(streamPath, overwrite, data, byteCount); - } - - public void deleteStream(String streamPath, boolean recurse) throws RestException, IOException { - BaseAdapter.deleteStream(streamPath, recurse); - } - - public void appendToStream(String streamPath, byte[] data, long offset, int byteCount) throws RestException, IOException { - BaseAdapter.appendToStream(streamPath, data, offset, byteCount); - } - - public boolean streamExists(String streamPath) throws RestException, IOException { - return BaseAdapter.streamExists(streamPath); - } - - public long getStreamLength(String streamPath) throws RestException, IOException { - return BaseAdapter.getStreamLength(streamPath); - } - - public void concatenate(String targetStreamPath, String[] inputStreamPaths) throws RestException, IOException { - if(ThrowInConcat) { - throw new IntentionalException(); - } - - Assert.assertTrue("concatenate should not be called when using 1 segment", false); - BaseAdapter.concatenate(targetStreamPath, inputStreamPaths); - } -} diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/Accounts.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/Accounts.java index cea3374d8145..741724dd2728 100644 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/Accounts.java +++ b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/Accounts.java @@ -8,7 +8,6 @@ package com.microsoft.azure.management.datalake.store; -import com.microsoft.azure.CloudException; import com.microsoft.azure.ListOperationCallback; import com.microsoft.azure.management.datalake.store.models.DataLakeStoreAccount; import com.microsoft.azure.management.datalake.store.models.FirewallRule; @@ -17,7 +16,6 @@ import com.microsoft.rest.ServiceCall; import com.microsoft.rest.ServiceCallback; import com.microsoft.rest.ServiceResponse; -import java.io.IOException; import java.util.List; import rx.Observable; @@ -32,12 +30,8 @@ public interface Accounts { * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. * @param accountName The name of the Data Lake Store account from which to delete the firewall rule. * @param firewallRuleName The name of the firewall rule to delete. - * @throws CloudException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. */ - ServiceResponse deleteFirewallRule(String resourceGroupName, String accountName, String firewallRuleName) throws CloudException, IOException, IllegalArgumentException; + void deleteFirewallRule(String resourceGroupName, String accountName, String firewallRuleName); /** * Deletes the specified firewall rule from the specified Data Lake Store account. @@ -58,7 +52,17 @@ public interface Accounts { * @param firewallRuleName The name of the firewall rule to delete. * @return the {@link ServiceResponse} object if successful. */ - Observable> deleteFirewallRuleAsync(String resourceGroupName, String accountName, String firewallRuleName); + Observable deleteFirewallRuleAsync(String resourceGroupName, String accountName, String firewallRuleName); + + /** + * Deletes the specified firewall rule from the specified Data Lake Store account. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. + * @param accountName The name of the Data Lake Store account from which to delete the firewall rule. + * @param firewallRuleName The name of the firewall rule to delete. + * @return the {@link ServiceResponse} object if successful. + */ + Observable> deleteFirewallRuleWithServiceResponseAsync(String resourceGroupName, String accountName, String firewallRuleName); /** * Gets the specified Data Lake Store firewall rule. @@ -66,12 +70,9 @@ public interface Accounts { * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. * @param accountName The name of the Data Lake Store account from which to get the firewall rule. * @param firewallRuleName The name of the firewall rule to retrieve. - * @throws CloudException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the FirewallRule object wrapped in {@link ServiceResponse} if successful. + * @return the FirewallRule object if successful. */ - ServiceResponse getFirewallRule(String resourceGroupName, String accountName, String firewallRuleName) throws CloudException, IOException, IllegalArgumentException; + FirewallRule getFirewallRule(String resourceGroupName, String accountName, String firewallRuleName); /** * Gets the specified Data Lake Store firewall rule. @@ -92,19 +93,26 @@ public interface Accounts { * @param firewallRuleName The name of the firewall rule to retrieve. * @return the observable to the FirewallRule object */ - Observable> getFirewallRuleAsync(String resourceGroupName, String accountName, String firewallRuleName); + Observable getFirewallRuleAsync(String resourceGroupName, String accountName, String firewallRuleName); + + /** + * Gets the specified Data Lake Store firewall rule. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. + * @param accountName The name of the Data Lake Store account from which to get the firewall rule. + * @param firewallRuleName The name of the firewall rule to retrieve. + * @return the observable to the FirewallRule object + */ + Observable> getFirewallRuleWithServiceResponseAsync(String resourceGroupName, String accountName, String firewallRuleName); /** * Lists the Data Lake Store firewall rules within the specified Data Lake Store account. * * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. * @param accountName The name of the Data Lake Store account from which to get the firewall rules. - * @throws CloudException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the List<FirewallRule> object wrapped in {@link ServiceResponse} if successful. + * @return the PagedList<FirewallRule> object if successful. */ - ServiceResponse> listFirewallRules(final String resourceGroupName, final String accountName) throws CloudException, IOException, IllegalArgumentException; + PagedList listFirewallRules(final String resourceGroupName, final String accountName); /** * Lists the Data Lake Store firewall rules within the specified Data Lake Store account. @@ -121,9 +129,18 @@ public interface Accounts { * * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. * @param accountName The name of the Data Lake Store account from which to get the firewall rules. - * @return the observable to the List<FirewallRule> object + * @return the observable to the PagedList<FirewallRule> object + */ + Observable> listFirewallRulesAsync(final String resourceGroupName, final String accountName); + + /** + * Lists the Data Lake Store firewall rules within the specified Data Lake Store account. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. + * @param accountName The name of the Data Lake Store account from which to get the firewall rules. + * @return the observable to the PagedList<FirewallRule> object */ - Observable>> listFirewallRulesAsync(final String resourceGroupName, final String accountName); + Observable>> listFirewallRulesWithServiceResponseAsync(final String resourceGroupName, final String accountName); /** * Creates or updates the specified firewall rule. @@ -132,12 +149,9 @@ public interface Accounts { * @param accountName The name of the Data Lake Store account to which to add the firewall rule. * @param name The name of the firewall rule to create or update. * @param parameters Parameters supplied to create the create firewall rule. - * @throws CloudException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the FirewallRule object wrapped in {@link ServiceResponse} if successful. + * @return the FirewallRule object if successful. */ - ServiceResponse createOrUpdateFirewallRule(String resourceGroupName, String accountName, String name, FirewallRule parameters) throws CloudException, IOException, IllegalArgumentException; + FirewallRule createOrUpdateFirewallRule(String resourceGroupName, String accountName, String name, FirewallRule parameters); /** * Creates or updates the specified firewall rule. @@ -160,7 +174,18 @@ public interface Accounts { * @param parameters Parameters supplied to create the create firewall rule. * @return the observable to the FirewallRule object */ - Observable> createOrUpdateFirewallRuleAsync(String resourceGroupName, String accountName, String name, FirewallRule parameters); + Observable createOrUpdateFirewallRuleAsync(String resourceGroupName, String accountName, String name, FirewallRule parameters); + + /** + * Creates or updates the specified firewall rule. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. + * @param accountName The name of the Data Lake Store account to which to add the firewall rule. + * @param name The name of the firewall rule to create or update. + * @param parameters Parameters supplied to create the create firewall rule. + * @return the observable to the FirewallRule object + */ + Observable> createOrUpdateFirewallRuleWithServiceResponseAsync(String resourceGroupName, String accountName, String name, FirewallRule parameters); /** * Creates the specified Data Lake Store account. @@ -168,13 +193,9 @@ public interface Accounts { * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. * @param name The name of the Data Lake Store account to create. * @param parameters Parameters supplied to create the Data Lake Store account. - * @throws CloudException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @throws InterruptedException exception thrown when long running operation is interrupted - * @return the DataLakeStoreAccount object wrapped in {@link ServiceResponse} if successful. + * @return the DataLakeStoreAccount object if successful. */ - ServiceResponse create(String resourceGroupName, String name, DataLakeStoreAccount parameters) throws CloudException, IOException, IllegalArgumentException, InterruptedException; + DataLakeStoreAccount create(String resourceGroupName, String name, DataLakeStoreAccount parameters); /** * Creates the specified Data Lake Store account. @@ -195,7 +216,7 @@ public interface Accounts { * @param parameters Parameters supplied to create the Data Lake Store account. * @return the observable to the DataLakeStoreAccount object */ - Observable> createAsync(String resourceGroupName, String name, DataLakeStoreAccount parameters); + Observable createAsync(String resourceGroupName, String name, DataLakeStoreAccount parameters); /** * Creates the specified Data Lake Store account. @@ -203,12 +224,19 @@ public interface Accounts { * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. * @param name The name of the Data Lake Store account to create. * @param parameters Parameters supplied to create the Data Lake Store account. - * @throws CloudException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the DataLakeStoreAccount object wrapped in {@link ServiceResponse} if successful. + * @return the observable to the DataLakeStoreAccount object + */ + Observable> createWithServiceResponseAsync(String resourceGroupName, String name, DataLakeStoreAccount parameters); + + /** + * Creates the specified Data Lake Store account. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. + * @param name The name of the Data Lake Store account to create. + * @param parameters Parameters supplied to create the Data Lake Store account. + * @return the DataLakeStoreAccount object if successful. */ - ServiceResponse beginCreate(String resourceGroupName, String name, DataLakeStoreAccount parameters) throws CloudException, IOException, IllegalArgumentException; + DataLakeStoreAccount beginCreate(String resourceGroupName, String name, DataLakeStoreAccount parameters); /** * Creates the specified Data Lake Store account. @@ -229,7 +257,17 @@ public interface Accounts { * @param parameters Parameters supplied to create the Data Lake Store account. * @return the observable to the DataLakeStoreAccount object */ - Observable> beginCreateAsync(String resourceGroupName, String name, DataLakeStoreAccount parameters); + Observable beginCreateAsync(String resourceGroupName, String name, DataLakeStoreAccount parameters); + + /** + * Creates the specified Data Lake Store account. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. + * @param name The name of the Data Lake Store account to create. + * @param parameters Parameters supplied to create the Data Lake Store account. + * @return the observable to the DataLakeStoreAccount object + */ + Observable> beginCreateWithServiceResponseAsync(String resourceGroupName, String name, DataLakeStoreAccount parameters); /** * Updates the specified Data Lake Store account information. @@ -237,13 +275,9 @@ public interface Accounts { * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. * @param name The name of the Data Lake Store account to update. * @param parameters Parameters supplied to update the Data Lake Store account. - * @throws CloudException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @throws InterruptedException exception thrown when long running operation is interrupted - * @return the DataLakeStoreAccount object wrapped in {@link ServiceResponse} if successful. + * @return the DataLakeStoreAccount object if successful. */ - ServiceResponse update(String resourceGroupName, String name, DataLakeStoreAccount parameters) throws CloudException, IOException, IllegalArgumentException, InterruptedException; + DataLakeStoreAccount update(String resourceGroupName, String name, DataLakeStoreAccount parameters); /** * Updates the specified Data Lake Store account information. @@ -264,7 +298,17 @@ public interface Accounts { * @param parameters Parameters supplied to update the Data Lake Store account. * @return the observable to the DataLakeStoreAccount object */ - Observable> updateAsync(String resourceGroupName, String name, DataLakeStoreAccount parameters); + Observable updateAsync(String resourceGroupName, String name, DataLakeStoreAccount parameters); + + /** + * Updates the specified Data Lake Store account information. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. + * @param name The name of the Data Lake Store account to update. + * @param parameters Parameters supplied to update the Data Lake Store account. + * @return the observable to the DataLakeStoreAccount object + */ + Observable> updateWithServiceResponseAsync(String resourceGroupName, String name, DataLakeStoreAccount parameters); /** * Updates the specified Data Lake Store account information. @@ -272,12 +316,9 @@ public interface Accounts { * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. * @param name The name of the Data Lake Store account to update. * @param parameters Parameters supplied to update the Data Lake Store account. - * @throws CloudException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the DataLakeStoreAccount object wrapped in {@link ServiceResponse} if successful. + * @return the DataLakeStoreAccount object if successful. */ - ServiceResponse beginUpdate(String resourceGroupName, String name, DataLakeStoreAccount parameters) throws CloudException, IOException, IllegalArgumentException; + DataLakeStoreAccount beginUpdate(String resourceGroupName, String name, DataLakeStoreAccount parameters); /** * Updates the specified Data Lake Store account information. @@ -298,20 +339,25 @@ public interface Accounts { * @param parameters Parameters supplied to update the Data Lake Store account. * @return the observable to the DataLakeStoreAccount object */ - Observable> beginUpdateAsync(String resourceGroupName, String name, DataLakeStoreAccount parameters); + Observable beginUpdateAsync(String resourceGroupName, String name, DataLakeStoreAccount parameters); + + /** + * Updates the specified Data Lake Store account information. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. + * @param name The name of the Data Lake Store account to update. + * @param parameters Parameters supplied to update the Data Lake Store account. + * @return the observable to the DataLakeStoreAccount object + */ + Observable> beginUpdateWithServiceResponseAsync(String resourceGroupName, String name, DataLakeStoreAccount parameters); /** * Deletes the specified Data Lake Store account. * * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. * @param accountName The name of the Data Lake Store account to delete. - * @throws CloudException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @throws InterruptedException exception thrown when long running operation is interrupted - * @return the {@link ServiceResponse} object if successful. */ - ServiceResponse delete(String resourceGroupName, String accountName) throws CloudException, IOException, IllegalArgumentException, InterruptedException; + void delete(String resourceGroupName, String accountName); /** * Deletes the specified Data Lake Store account. @@ -330,19 +376,24 @@ public interface Accounts { * @param accountName The name of the Data Lake Store account to delete. * @return the {@link ServiceResponse} object if successful. */ - Observable> deleteAsync(String resourceGroupName, String accountName); + Observable deleteAsync(String resourceGroupName, String accountName); /** * Deletes the specified Data Lake Store account. * * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. * @param accountName The name of the Data Lake Store account to delete. - * @throws CloudException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters * @return the {@link ServiceResponse} object if successful. */ - ServiceResponse beginDelete(String resourceGroupName, String accountName) throws CloudException, IOException, IllegalArgumentException; + Observable> deleteWithServiceResponseAsync(String resourceGroupName, String accountName); + + /** + * Deletes the specified Data Lake Store account. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. + * @param accountName The name of the Data Lake Store account to delete. + */ + void beginDelete(String resourceGroupName, String accountName); /** * Deletes the specified Data Lake Store account. @@ -361,19 +412,25 @@ public interface Accounts { * @param accountName The name of the Data Lake Store account to delete. * @return the {@link ServiceResponse} object if successful. */ - Observable> beginDeleteAsync(String resourceGroupName, String accountName); + Observable beginDeleteAsync(String resourceGroupName, String accountName); + + /** + * Deletes the specified Data Lake Store account. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. + * @param accountName The name of the Data Lake Store account to delete. + * @return the {@link ServiceResponse} object if successful. + */ + Observable> beginDeleteWithServiceResponseAsync(String resourceGroupName, String accountName); /** * Gets the specified Data Lake Store account. * * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. * @param accountName The name of the Data Lake Store account to retrieve. - * @throws CloudException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the DataLakeStoreAccount object wrapped in {@link ServiceResponse} if successful. + * @return the DataLakeStoreAccount object if successful. */ - ServiceResponse get(String resourceGroupName, String accountName) throws CloudException, IOException, IllegalArgumentException; + DataLakeStoreAccount get(String resourceGroupName, String accountName); /** * Gets the specified Data Lake Store account. @@ -392,18 +449,60 @@ public interface Accounts { * @param accountName The name of the Data Lake Store account to retrieve. * @return the observable to the DataLakeStoreAccount object */ - Observable> getAsync(String resourceGroupName, String accountName); + Observable getAsync(String resourceGroupName, String accountName); + + /** + * Gets the specified Data Lake Store account. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. + * @param accountName The name of the Data Lake Store account to retrieve. + * @return the observable to the DataLakeStoreAccount object + */ + Observable> getWithServiceResponseAsync(String resourceGroupName, String accountName); + + /** + * Attempts to enable a user managed key vault for encryption of the specified Data Lake Store account. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. + * @param accountName The name of the Data Lake Store account to attempt to enable the Key Vault for. + */ + void enableKeyVault(String resourceGroupName, String accountName); + + /** + * Attempts to enable a user managed key vault for encryption of the specified Data Lake Store account. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. + * @param accountName The name of the Data Lake Store account to attempt to enable the Key Vault for. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @return the {@link ServiceCall} object + */ + ServiceCall enableKeyVaultAsync(String resourceGroupName, String accountName, final ServiceCallback serviceCallback); + + /** + * Attempts to enable a user managed key vault for encryption of the specified Data Lake Store account. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. + * @param accountName The name of the Data Lake Store account to attempt to enable the Key Vault for. + * @return the {@link ServiceResponse} object if successful. + */ + Observable enableKeyVaultAsync(String resourceGroupName, String accountName); + + /** + * Attempts to enable a user managed key vault for encryption of the specified Data Lake Store account. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. + * @param accountName The name of the Data Lake Store account to attempt to enable the Key Vault for. + * @return the {@link ServiceResponse} object if successful. + */ + Observable> enableKeyVaultWithServiceResponseAsync(String resourceGroupName, String accountName); /** * Lists the Data Lake Store accounts within a specific resource group. The response includes a link to the next page of results, if any. * * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account(s). - * @throws CloudException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the List<DataLakeStoreAccount> object wrapped in {@link ServiceResponse} if successful. + * @return the PagedList<DataLakeStoreAccount> object if successful. */ - ServiceResponse> listByResourceGroup(final String resourceGroupName) throws CloudException, IOException, IllegalArgumentException; + PagedList listByResourceGroup(final String resourceGroupName); /** * Lists the Data Lake Store accounts within a specific resource group. The response includes a link to the next page of results, if any. @@ -413,6 +512,22 @@ public interface Accounts { * @return the {@link ServiceCall} object */ ServiceCall> listByResourceGroupAsync(final String resourceGroupName, final ListOperationCallback serviceCallback); + + /** + * Lists the Data Lake Store accounts within a specific resource group. The response includes a link to the next page of results, if any. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account(s). + * @return the observable to the PagedList<DataLakeStoreAccount> object + */ + Observable> listByResourceGroupAsync(final String resourceGroupName); + + /** + * Lists the Data Lake Store accounts within a specific resource group. The response includes a link to the next page of results, if any. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account(s). + * @return the observable to the PagedList<DataLakeStoreAccount> object + */ + Observable>> listByResourceGroupWithServiceResponseAsync(final String resourceGroupName); /** * Lists the Data Lake Store accounts within a specific resource group. The response includes a link to the next page of results, if any. * @@ -426,12 +541,9 @@ public interface Accounts { * @param count A Boolean value of true or false to request a count of the matching resources included with the resources in the response, e.g. Categories?$count=true. Optional. * @param search A free form search. A free-text search expression to match for whether a particular entry should be included in the feed, e.g. Categories?$search=blue OR green. Optional. * @param format The desired return format. Return the response in particular formatxii without access to request headers for standard content-type negotiation (e.g Orders?$format=json). Optional. - * @throws CloudException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the List<DataLakeStoreAccount> object wrapped in {@link ServiceResponse} if successful. + * @return the PagedList<DataLakeStoreAccount> object if successful. */ - ServiceResponse> listByResourceGroup(final String resourceGroupName, final String filter, final Integer top, final Integer skip, final String expand, final String select, final String orderby, final Boolean count, final String search, final String format) throws CloudException, IOException, IllegalArgumentException; + PagedList listByResourceGroup(final String resourceGroupName, final String filter, final Integer top, final Integer skip, final String expand, final String select, final String orderby, final Boolean count, final String search, final String format); /** * Lists the Data Lake Store accounts within a specific resource group. The response includes a link to the next page of results, if any. @@ -464,19 +576,33 @@ public interface Accounts { * @param count A Boolean value of true or false to request a count of the matching resources included with the resources in the response, e.g. Categories?$count=true. Optional. * @param search A free form search. A free-text search expression to match for whether a particular entry should be included in the feed, e.g. Categories?$search=blue OR green. Optional. * @param format The desired return format. Return the response in particular formatxii without access to request headers for standard content-type negotiation (e.g Orders?$format=json). Optional. - * @return the observable to the List<DataLakeStoreAccount> object + * @return the observable to the PagedList<DataLakeStoreAccount> object + */ + Observable> listByResourceGroupAsync(final String resourceGroupName, final String filter, final Integer top, final Integer skip, final String expand, final String select, final String orderby, final Boolean count, final String search, final String format); + + /** + * Lists the Data Lake Store accounts within a specific resource group. The response includes a link to the next page of results, if any. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account(s). + * @param filter OData filter. Optional. + * @param top The number of items to return. Optional. + * @param skip The number of items to skip over before returning elements. Optional. + * @param expand OData expansion. Expand related resources in line with the retrieved resources, e.g. Categories/$expand=Products would expand Product data in line with each Category entry. Optional. + * @param select OData Select statement. Limits the properties on each entry to just those requested, e.g. Categories?$select=CategoryName,Description. Optional. + * @param orderby OrderBy clause. One or more comma-separated expressions with an optional "asc" (the default) or "desc" depending on the order you'd like the values sorted, e.g. Categories?$orderby=CategoryName desc. Optional. + * @param count A Boolean value of true or false to request a count of the matching resources included with the resources in the response, e.g. Categories?$count=true. Optional. + * @param search A free form search. A free-text search expression to match for whether a particular entry should be included in the feed, e.g. Categories?$search=blue OR green. Optional. + * @param format The desired return format. Return the response in particular formatxii without access to request headers for standard content-type negotiation (e.g Orders?$format=json). Optional. + * @return the observable to the PagedList<DataLakeStoreAccount> object */ - Observable>> listByResourceGroupAsync(final String resourceGroupName, final String filter, final Integer top, final Integer skip, final String expand, final String select, final String orderby, final Boolean count, final String search, final String format); + Observable>> listByResourceGroupWithServiceResponseAsync(final String resourceGroupName, final String filter, final Integer top, final Integer skip, final String expand, final String select, final String orderby, final Boolean count, final String search, final String format); /** * Lists the Data Lake Store accounts within the subscription. The response includes a link to the next page of results, if any. * - * @throws CloudException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the List<DataLakeStoreAccount> object wrapped in {@link ServiceResponse} if successful. + * @return the PagedList<DataLakeStoreAccount> object if successful. */ - ServiceResponse> list() throws CloudException, IOException, IllegalArgumentException; + PagedList list(); /** * Lists the Data Lake Store accounts within the subscription. The response includes a link to the next page of results, if any. @@ -485,6 +611,20 @@ public interface Accounts { * @return the {@link ServiceCall} object */ ServiceCall> listAsync(final ListOperationCallback serviceCallback); + + /** + * Lists the Data Lake Store accounts within the subscription. The response includes a link to the next page of results, if any. + * + * @return the observable to the PagedList<DataLakeStoreAccount> object + */ + Observable> listAsync(); + + /** + * Lists the Data Lake Store accounts within the subscription. The response includes a link to the next page of results, if any. + * + * @return the observable to the PagedList<DataLakeStoreAccount> object + */ + Observable>> listWithServiceResponseAsync(); /** * Lists the Data Lake Store accounts within the subscription. The response includes a link to the next page of results, if any. * @@ -497,12 +637,9 @@ public interface Accounts { * @param count The Boolean value of true or false to request a count of the matching resources included with the resources in the response, e.g. Categories?$count=true. Optional. * @param search A free form search. A free-text search expression to match for whether a particular entry should be included in the feed, e.g. Categories?$search=blue OR green. Optional. * @param format The desired return format. Return the response in particular formatxii without access to request headers for standard content-type negotiation (e.g Orders?$format=json). Optional. - * @throws CloudException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the List<DataLakeStoreAccount> object wrapped in {@link ServiceResponse} if successful. + * @return the PagedList<DataLakeStoreAccount> object if successful. */ - ServiceResponse> list(final String filter, final Integer top, final Integer skip, final String expand, final String select, final String orderby, final Boolean count, final String search, final String format) throws CloudException, IOException, IllegalArgumentException; + PagedList list(final String filter, final Integer top, final Integer skip, final String expand, final String select, final String orderby, final Boolean count, final String search, final String format); /** * Lists the Data Lake Store accounts within the subscription. The response includes a link to the next page of results, if any. @@ -533,20 +670,33 @@ public interface Accounts { * @param count The Boolean value of true or false to request a count of the matching resources included with the resources in the response, e.g. Categories?$count=true. Optional. * @param search A free form search. A free-text search expression to match for whether a particular entry should be included in the feed, e.g. Categories?$search=blue OR green. Optional. * @param format The desired return format. Return the response in particular formatxii without access to request headers for standard content-type negotiation (e.g Orders?$format=json). Optional. - * @return the observable to the List<DataLakeStoreAccount> object + * @return the observable to the PagedList<DataLakeStoreAccount> object */ - Observable>> listAsync(final String filter, final Integer top, final Integer skip, final String expand, final String select, final String orderby, final Boolean count, final String search, final String format); + Observable> listAsync(final String filter, final Integer top, final Integer skip, final String expand, final String select, final String orderby, final Boolean count, final String search, final String format); + + /** + * Lists the Data Lake Store accounts within the subscription. The response includes a link to the next page of results, if any. + * + * @param filter OData filter. Optional. + * @param top The number of items to return. Optional. + * @param skip The number of items to skip over before returning elements. Optional. + * @param expand OData expansion. Expand related resources in line with the retrieved resources, e.g. Categories/$expand=Products would expand Product data in line with each Category entry. Optional. + * @param select OData Select statement. Limits the properties on each entry to just those requested, e.g. Categories?$select=CategoryName,Description. Optional. + * @param orderby OrderBy clause. One or more comma-separated expressions with an optional "asc" (the default) or "desc" depending on the order you'd like the values sorted, e.g. Categories?$orderby=CategoryName desc. Optional. + * @param count The Boolean value of true or false to request a count of the matching resources included with the resources in the response, e.g. Categories?$count=true. Optional. + * @param search A free form search. A free-text search expression to match for whether a particular entry should be included in the feed, e.g. Categories?$search=blue OR green. Optional. + * @param format The desired return format. Return the response in particular formatxii without access to request headers for standard content-type negotiation (e.g Orders?$format=json). Optional. + * @return the observable to the PagedList<DataLakeStoreAccount> object + */ + Observable>> listWithServiceResponseAsync(final String filter, final Integer top, final Integer skip, final String expand, final String select, final String orderby, final Boolean count, final String search, final String format); /** * Lists the Data Lake Store firewall rules within the specified Data Lake Store account. * * @param nextPageLink The NextLink from the previous successful call to List operation. - * @throws CloudException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the List<FirewallRule> object wrapped in {@link ServiceResponse} if successful. + * @return the PagedList<FirewallRule> object if successful. */ - ServiceResponse> listFirewallRulesNext(final String nextPageLink) throws CloudException, IOException, IllegalArgumentException; + PagedList listFirewallRulesNext(final String nextPageLink); /** * Lists the Data Lake Store firewall rules within the specified Data Lake Store account. @@ -562,20 +712,25 @@ public interface Accounts { * Lists the Data Lake Store firewall rules within the specified Data Lake Store account. * * @param nextPageLink The NextLink from the previous successful call to List operation. - * @return the observable to the List<FirewallRule> object + * @return the observable to the PagedList<FirewallRule> object + */ + Observable> listFirewallRulesNextAsync(final String nextPageLink); + + /** + * Lists the Data Lake Store firewall rules within the specified Data Lake Store account. + * + * @param nextPageLink The NextLink from the previous successful call to List operation. + * @return the observable to the PagedList<FirewallRule> object */ - Observable>> listFirewallRulesNextAsync(final String nextPageLink); + Observable>> listFirewallRulesNextWithServiceResponseAsync(final String nextPageLink); /** * Lists the Data Lake Store accounts within a specific resource group. The response includes a link to the next page of results, if any. * * @param nextPageLink The NextLink from the previous successful call to List operation. - * @throws CloudException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the List<DataLakeStoreAccount> object wrapped in {@link ServiceResponse} if successful. + * @return the PagedList<DataLakeStoreAccount> object if successful. */ - ServiceResponse> listByResourceGroupNext(final String nextPageLink) throws CloudException, IOException, IllegalArgumentException; + PagedList listByResourceGroupNext(final String nextPageLink); /** * Lists the Data Lake Store accounts within a specific resource group. The response includes a link to the next page of results, if any. @@ -591,20 +746,25 @@ public interface Accounts { * Lists the Data Lake Store accounts within a specific resource group. The response includes a link to the next page of results, if any. * * @param nextPageLink The NextLink from the previous successful call to List operation. - * @return the observable to the List<DataLakeStoreAccount> object + * @return the observable to the PagedList<DataLakeStoreAccount> object */ - Observable>> listByResourceGroupNextAsync(final String nextPageLink); + Observable> listByResourceGroupNextAsync(final String nextPageLink); + + /** + * Lists the Data Lake Store accounts within a specific resource group. The response includes a link to the next page of results, if any. + * + * @param nextPageLink The NextLink from the previous successful call to List operation. + * @return the observable to the PagedList<DataLakeStoreAccount> object + */ + Observable>> listByResourceGroupNextWithServiceResponseAsync(final String nextPageLink); /** * Lists the Data Lake Store accounts within the subscription. The response includes a link to the next page of results, if any. * * @param nextPageLink The NextLink from the previous successful call to List operation. - * @throws CloudException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the List<DataLakeStoreAccount> object wrapped in {@link ServiceResponse} if successful. + * @return the PagedList<DataLakeStoreAccount> object if successful. */ - ServiceResponse> listNext(final String nextPageLink) throws CloudException, IOException, IllegalArgumentException; + PagedList listNext(final String nextPageLink); /** * Lists the Data Lake Store accounts within the subscription. The response includes a link to the next page of results, if any. @@ -620,8 +780,16 @@ public interface Accounts { * Lists the Data Lake Store accounts within the subscription. The response includes a link to the next page of results, if any. * * @param nextPageLink The NextLink from the previous successful call to List operation. - * @return the observable to the List<DataLakeStoreAccount> object + * @return the observable to the PagedList<DataLakeStoreAccount> object + */ + Observable> listNextAsync(final String nextPageLink); + + /** + * Lists the Data Lake Store accounts within the subscription. The response includes a link to the next page of results, if any. + * + * @param nextPageLink The NextLink from the previous successful call to List operation. + * @return the observable to the PagedList<DataLakeStoreAccount> object */ - Observable>> listNextAsync(final String nextPageLink); + Observable>> listNextWithServiceResponseAsync(final String nextPageLink); } diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/DataLakeStoreFileSystemManagementClient.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/DataLakeStoreFileSystemManagementClient.java deleted file mode 100644 index 66175c9c0e65..000000000000 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/DataLakeStoreFileSystemManagementClient.java +++ /dev/null @@ -1,111 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - */ - -package com.microsoft.azure.management.datalake.store; - -import com.microsoft.azure.AzureClient; -import com.microsoft.azure.RestClient; - -/** - * The interface for DataLakeStoreFileSystemManagementClient class. - */ -public interface DataLakeStoreFileSystemManagementClient { - /** - * Gets the REST client. - * - * @return the {@link RestClient} object. - */ - RestClient restClient(); - - /** - * Gets the {@link AzureClient} used for long running operations. - * @return the azure client; - */ - AzureClient getAzureClient(); - - /** - * Gets the User-Agent header for the client. - * - * @return the user agent string. - */ - String userAgent(); - - /** - * Gets Client Api Version.. - * - * @return the apiVersion value. - */ - String apiVersion(); - - /** - * Gets Gets the URI used as the base for all cloud service requests.. - * - * @return the adlsFileSystemDnsSuffix value. - */ - String adlsFileSystemDnsSuffix(); - - /** - * Sets Gets the URI used as the base for all cloud service requests.. - * - * @param adlsFileSystemDnsSuffix the adlsFileSystemDnsSuffix value. - * @return the service client itself - */ - DataLakeStoreFileSystemManagementClient withAdlsFileSystemDnsSuffix(String adlsFileSystemDnsSuffix); - - /** - * Gets Gets or sets the preferred language for the response.. - * - * @return the acceptLanguage value. - */ - String acceptLanguage(); - - /** - * Sets Gets or sets the preferred language for the response.. - * - * @param acceptLanguage the acceptLanguage value. - * @return the service client itself - */ - DataLakeStoreFileSystemManagementClient withAcceptLanguage(String acceptLanguage); - - /** - * Gets Gets or sets the retry timeout in seconds for Long Running Operations. Default value is 30.. - * - * @return the longRunningOperationRetryTimeout value. - */ - int longRunningOperationRetryTimeout(); - - /** - * Sets Gets or sets the retry timeout in seconds for Long Running Operations. Default value is 30.. - * - * @param longRunningOperationRetryTimeout the longRunningOperationRetryTimeout value. - * @return the service client itself - */ - DataLakeStoreFileSystemManagementClient withLongRunningOperationRetryTimeout(int longRunningOperationRetryTimeout); - - /** - * Gets When set to true a unique x-ms-client-request-id value is generated and included in each request. Default is true.. - * - * @return the generateClientRequestId value. - */ - boolean generateClientRequestId(); - - /** - * Sets When set to true a unique x-ms-client-request-id value is generated and included in each request. Default is true.. - * - * @param generateClientRequestId the generateClientRequestId value. - * @return the service client itself - */ - DataLakeStoreFileSystemManagementClient withGenerateClientRequestId(boolean generateClientRequestId); - - /** - * Gets the FileSystems object to access its operations. - * @return the FileSystems object. - */ - FileSystems fileSystems(); - -} diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/FileSystems.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/FileSystems.java deleted file mode 100644 index 58a692a55b6a..000000000000 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/FileSystems.java +++ /dev/null @@ -1,905 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - */ - -package com.microsoft.azure.management.datalake.store; - -import com.microsoft.azure.management.datalake.store.models.AclStatusResult; -import com.microsoft.azure.management.datalake.store.models.AdlsErrorException; -import com.microsoft.azure.management.datalake.store.models.AppendModeType; -import com.microsoft.azure.management.datalake.store.models.ContentSummaryResult; -import com.microsoft.azure.management.datalake.store.models.FileOperationResult; -import com.microsoft.azure.management.datalake.store.models.FileStatusesResult; -import com.microsoft.azure.management.datalake.store.models.FileStatusResult; -import com.microsoft.rest.ServiceCall; -import com.microsoft.rest.ServiceCallback; -import com.microsoft.rest.ServiceResponse; -import java.io.InputStream; -import java.io.IOException; -import java.util.List; -import rx.Observable; - -/** - * An instance of this class provides access to all the operations defined - * in FileSystems. - */ -public interface FileSystems { - /** - * Appends to the specified file. This method supports multiple concurrent appends to the file. NOTE: Concurrent append and normal (serial) append CANNOT be used interchangeably. Once a file has been appended to using either append option, it can only be appended to using that append option. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param filePath The Data Lake Store path (starting with '/') of the file to which to append using concurrent append. - * @param streamContents The file contents to include when appending to the file. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - ServiceResponse concurrentAppend(String accountName, String filePath, byte[] streamContents) throws AdlsErrorException, IOException, IllegalArgumentException; - - /** - * Appends to the specified file. This method supports multiple concurrent appends to the file. NOTE: Concurrent append and normal (serial) append CANNOT be used interchangeably. Once a file has been appended to using either append option, it can only be appended to using that append option. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param filePath The Data Lake Store path (starting with '/') of the file to which to append using concurrent append. - * @param streamContents The file contents to include when appending to the file. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - ServiceCall concurrentAppendAsync(String accountName, String filePath, byte[] streamContents, final ServiceCallback serviceCallback); - /** - * Appends to the specified file. This method supports multiple concurrent appends to the file. NOTE: Concurrent append and normal (serial) append CANNOT be used interchangeably. Once a file has been appended to using either append option, it can only be appended to using that append option. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param filePath The Data Lake Store path (starting with '/') of the file to which to append using concurrent append. - * @param streamContents The file contents to include when appending to the file. - * @param appendMode Indicates the concurrent append call should create the file if it doesn't exist or just open the existing file for append. Possible values include: 'autocreate' - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - ServiceResponse concurrentAppend(String accountName, String filePath, byte[] streamContents, AppendModeType appendMode) throws AdlsErrorException, IOException, IllegalArgumentException; - - /** - * Appends to the specified file. This method supports multiple concurrent appends to the file. NOTE: Concurrent append and normal (serial) append CANNOT be used interchangeably. Once a file has been appended to using either append option, it can only be appended to using that append option. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param filePath The Data Lake Store path (starting with '/') of the file to which to append using concurrent append. - * @param streamContents The file contents to include when appending to the file. - * @param appendMode Indicates the concurrent append call should create the file if it doesn't exist or just open the existing file for append. Possible values include: 'autocreate' - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - ServiceCall concurrentAppendAsync(String accountName, String filePath, byte[] streamContents, AppendModeType appendMode, final ServiceCallback serviceCallback); - - /** - * Appends to the specified file. This method supports multiple concurrent appends to the file. NOTE: Concurrent append and normal (serial) append CANNOT be used interchangeably. Once a file has been appended to using either append option, it can only be appended to using that append option. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param filePath The Data Lake Store path (starting with '/') of the file to which to append using concurrent append. - * @param streamContents The file contents to include when appending to the file. - * @param appendMode Indicates the concurrent append call should create the file if it doesn't exist or just open the existing file for append. Possible values include: 'autocreate' - * @return the {@link ServiceResponse} object if successful. - */ - Observable> concurrentAppendAsync(String accountName, String filePath, byte[] streamContents, AppendModeType appendMode); - - /** - * Checks if the specified access is available at the given path. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param path The Data Lake Store path (starting with '/') of the file or directory for which to check access. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - ServiceResponse checkAccess(String accountName, String path) throws AdlsErrorException, IOException, IllegalArgumentException; - - /** - * Checks if the specified access is available at the given path. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param path The Data Lake Store path (starting with '/') of the file or directory for which to check access. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - ServiceCall checkAccessAsync(String accountName, String path, final ServiceCallback serviceCallback); - /** - * Checks if the specified access is available at the given path. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param path The Data Lake Store path (starting with '/') of the file or directory for which to check access. - * @param fsaction File system operation read/write/execute in string form, matching regex pattern '[rwx-]{3}' - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - ServiceResponse checkAccess(String accountName, String path, String fsaction) throws AdlsErrorException, IOException, IllegalArgumentException; - - /** - * Checks if the specified access is available at the given path. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param path The Data Lake Store path (starting with '/') of the file or directory for which to check access. - * @param fsaction File system operation read/write/execute in string form, matching regex pattern '[rwx-]{3}' - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - ServiceCall checkAccessAsync(String accountName, String path, String fsaction, final ServiceCallback serviceCallback); - - /** - * Checks if the specified access is available at the given path. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param path The Data Lake Store path (starting with '/') of the file or directory for which to check access. - * @param fsaction File system operation read/write/execute in string form, matching regex pattern '[rwx-]{3}' - * @return the {@link ServiceResponse} object if successful. - */ - Observable> checkAccessAsync(String accountName, String path, String fsaction); - - /** - * Creates a directory. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param path The Data Lake Store path (starting with '/') of the directory to create. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the FileOperationResult object wrapped in {@link ServiceResponse} if successful. - */ - ServiceResponse mkdirs(String accountName, String path) throws AdlsErrorException, IOException, IllegalArgumentException; - - /** - * Creates a directory. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param path The Data Lake Store path (starting with '/') of the directory to create. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - ServiceCall mkdirsAsync(String accountName, String path, final ServiceCallback serviceCallback); - - /** - * Creates a directory. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param path The Data Lake Store path (starting with '/') of the directory to create. - * @return the observable to the FileOperationResult object - */ - Observable> mkdirsAsync(String accountName, String path); - - /** - * Concatenates the list of source files into the destination file, removing all source files upon success. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param destinationPath The Data Lake Store path (starting with '/') of the destination file resulting from the concatenation. - * @param sources A list of comma seperated Data Lake Store paths (starting with '/') of the files to concatenate, in the order in which they should be concatenated. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - ServiceResponse concat(String accountName, String destinationPath, List sources) throws AdlsErrorException, IOException, IllegalArgumentException; - - /** - * Concatenates the list of source files into the destination file, removing all source files upon success. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param destinationPath The Data Lake Store path (starting with '/') of the destination file resulting from the concatenation. - * @param sources A list of comma seperated Data Lake Store paths (starting with '/') of the files to concatenate, in the order in which they should be concatenated. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - ServiceCall concatAsync(String accountName, String destinationPath, List sources, final ServiceCallback serviceCallback); - - /** - * Concatenates the list of source files into the destination file, removing all source files upon success. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param destinationPath The Data Lake Store path (starting with '/') of the destination file resulting from the concatenation. - * @param sources A list of comma seperated Data Lake Store paths (starting with '/') of the files to concatenate, in the order in which they should be concatenated. - * @return the {@link ServiceResponse} object if successful. - */ - Observable> concatAsync(String accountName, String destinationPath, List sources); - - /** - * Concatenates the list of source files into the destination file, deleting all source files upon success. This method accepts more source file paths than the Concat method. This method and the parameters it accepts are subject to change for usability in an upcoming version. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param msConcatDestinationPath The Data Lake Store path (starting with '/') of the destination file resulting from the concatenation. - * @param streamContents A list of Data Lake Store paths (starting with '/') of the source files. Must be in the format: sources=<comma separated list> - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - ServiceResponse msConcat(String accountName, String msConcatDestinationPath, byte[] streamContents) throws AdlsErrorException, IOException, IllegalArgumentException; - - /** - * Concatenates the list of source files into the destination file, deleting all source files upon success. This method accepts more source file paths than the Concat method. This method and the parameters it accepts are subject to change for usability in an upcoming version. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param msConcatDestinationPath The Data Lake Store path (starting with '/') of the destination file resulting from the concatenation. - * @param streamContents A list of Data Lake Store paths (starting with '/') of the source files. Must be in the format: sources=<comma separated list> - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - ServiceCall msConcatAsync(String accountName, String msConcatDestinationPath, byte[] streamContents, final ServiceCallback serviceCallback); - /** - * Concatenates the list of source files into the destination file, deleting all source files upon success. This method accepts more source file paths than the Concat method. This method and the parameters it accepts are subject to change for usability in an upcoming version. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param msConcatDestinationPath The Data Lake Store path (starting with '/') of the destination file resulting from the concatenation. - * @param streamContents A list of Data Lake Store paths (starting with '/') of the source files. Must be in the format: sources=<comma separated list> - * @param deleteSourceDirectory Indicates that as an optimization instead of deleting each individual source stream, delete the source stream folder if all streams are in the same folder instead. This results in a substantial performance improvement when the only streams in the folder are part of the concatenation operation. WARNING: This includes the deletion of any other files that are not source files. Only set this to true when source files are the only files in the source directory. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - ServiceResponse msConcat(String accountName, String msConcatDestinationPath, byte[] streamContents, Boolean deleteSourceDirectory) throws AdlsErrorException, IOException, IllegalArgumentException; - - /** - * Concatenates the list of source files into the destination file, deleting all source files upon success. This method accepts more source file paths than the Concat method. This method and the parameters it accepts are subject to change for usability in an upcoming version. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param msConcatDestinationPath The Data Lake Store path (starting with '/') of the destination file resulting from the concatenation. - * @param streamContents A list of Data Lake Store paths (starting with '/') of the source files. Must be in the format: sources=<comma separated list> - * @param deleteSourceDirectory Indicates that as an optimization instead of deleting each individual source stream, delete the source stream folder if all streams are in the same folder instead. This results in a substantial performance improvement when the only streams in the folder are part of the concatenation operation. WARNING: This includes the deletion of any other files that are not source files. Only set this to true when source files are the only files in the source directory. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - ServiceCall msConcatAsync(String accountName, String msConcatDestinationPath, byte[] streamContents, Boolean deleteSourceDirectory, final ServiceCallback serviceCallback); - - /** - * Concatenates the list of source files into the destination file, deleting all source files upon success. This method accepts more source file paths than the Concat method. This method and the parameters it accepts are subject to change for usability in an upcoming version. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param msConcatDestinationPath The Data Lake Store path (starting with '/') of the destination file resulting from the concatenation. - * @param streamContents A list of Data Lake Store paths (starting with '/') of the source files. Must be in the format: sources=<comma separated list> - * @param deleteSourceDirectory Indicates that as an optimization instead of deleting each individual source stream, delete the source stream folder if all streams are in the same folder instead. This results in a substantial performance improvement when the only streams in the folder are part of the concatenation operation. WARNING: This includes the deletion of any other files that are not source files. Only set this to true when source files are the only files in the source directory. - * @return the {@link ServiceResponse} object if successful. - */ - Observable> msConcatAsync(String accountName, String msConcatDestinationPath, byte[] streamContents, Boolean deleteSourceDirectory); - - /** - * Get the list of file status objects specified by the file path, with optional pagination parameters. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param listFilePath The Data Lake Store path (starting with '/') of the directory to list. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the FileStatusesResult object wrapped in {@link ServiceResponse} if successful. - */ - ServiceResponse listFileStatus(String accountName, String listFilePath) throws AdlsErrorException, IOException, IllegalArgumentException; - - /** - * Get the list of file status objects specified by the file path, with optional pagination parameters. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param listFilePath The Data Lake Store path (starting with '/') of the directory to list. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - ServiceCall listFileStatusAsync(String accountName, String listFilePath, final ServiceCallback serviceCallback); - /** - * Get the list of file status objects specified by the file path, with optional pagination parameters. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param listFilePath The Data Lake Store path (starting with '/') of the directory to list. - * @param listSize Gets or sets the number of items to return. Optional. - * @param listAfter Gets or sets the item or lexographical index after which to begin returning results. For example, a file list of 'a','b','d' and listAfter='b' will return 'd', and a listAfter='c' will also return 'd'. Optional. - * @param listBefore Gets or sets the item or lexographical index before which to begin returning results. For example, a file list of 'a','b','d' and listBefore='d' will return 'a','b', and a listBefore='c' will also return 'a','b'. Optional. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the FileStatusesResult object wrapped in {@link ServiceResponse} if successful. - */ - ServiceResponse listFileStatus(String accountName, String listFilePath, Integer listSize, String listAfter, String listBefore) throws AdlsErrorException, IOException, IllegalArgumentException; - - /** - * Get the list of file status objects specified by the file path, with optional pagination parameters. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param listFilePath The Data Lake Store path (starting with '/') of the directory to list. - * @param listSize Gets or sets the number of items to return. Optional. - * @param listAfter Gets or sets the item or lexographical index after which to begin returning results. For example, a file list of 'a','b','d' and listAfter='b' will return 'd', and a listAfter='c' will also return 'd'. Optional. - * @param listBefore Gets or sets the item or lexographical index before which to begin returning results. For example, a file list of 'a','b','d' and listBefore='d' will return 'a','b', and a listBefore='c' will also return 'a','b'. Optional. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - ServiceCall listFileStatusAsync(String accountName, String listFilePath, Integer listSize, String listAfter, String listBefore, final ServiceCallback serviceCallback); - - /** - * Get the list of file status objects specified by the file path, with optional pagination parameters. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param listFilePath The Data Lake Store path (starting with '/') of the directory to list. - * @param listSize Gets or sets the number of items to return. Optional. - * @param listAfter Gets or sets the item or lexographical index after which to begin returning results. For example, a file list of 'a','b','d' and listAfter='b' will return 'd', and a listAfter='c' will also return 'd'. Optional. - * @param listBefore Gets or sets the item or lexographical index before which to begin returning results. For example, a file list of 'a','b','d' and listBefore='d' will return 'a','b', and a listBefore='c' will also return 'a','b'. Optional. - * @return the observable to the FileStatusesResult object - */ - Observable> listFileStatusAsync(String accountName, String listFilePath, Integer listSize, String listAfter, String listBefore); - - /** - * Gets the file content summary object specified by the file path. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param getContentSummaryFilePath The Data Lake Store path (starting with '/') of the file for which to retrieve the summary. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the ContentSummaryResult object wrapped in {@link ServiceResponse} if successful. - */ - ServiceResponse getContentSummary(String accountName, String getContentSummaryFilePath) throws AdlsErrorException, IOException, IllegalArgumentException; - - /** - * Gets the file content summary object specified by the file path. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param getContentSummaryFilePath The Data Lake Store path (starting with '/') of the file for which to retrieve the summary. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - ServiceCall getContentSummaryAsync(String accountName, String getContentSummaryFilePath, final ServiceCallback serviceCallback); - - /** - * Gets the file content summary object specified by the file path. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param getContentSummaryFilePath The Data Lake Store path (starting with '/') of the file for which to retrieve the summary. - * @return the observable to the ContentSummaryResult object - */ - Observable> getContentSummaryAsync(String accountName, String getContentSummaryFilePath); - - /** - * Get the file status object specified by the file path. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param getFilePath The Data Lake Store path (starting with '/') of the file or directory for which to retrieve the status. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the FileStatusResult object wrapped in {@link ServiceResponse} if successful. - */ - ServiceResponse getFileStatus(String accountName, String getFilePath) throws AdlsErrorException, IOException, IllegalArgumentException; - - /** - * Get the file status object specified by the file path. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param getFilePath The Data Lake Store path (starting with '/') of the file or directory for which to retrieve the status. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - ServiceCall getFileStatusAsync(String accountName, String getFilePath, final ServiceCallback serviceCallback); - - /** - * Get the file status object specified by the file path. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param getFilePath The Data Lake Store path (starting with '/') of the file or directory for which to retrieve the status. - * @return the observable to the FileStatusResult object - */ - Observable> getFileStatusAsync(String accountName, String getFilePath); - - /** - * Appends to the specified file. This method does not support multiple concurrent appends to the file. NOTE: Concurrent append and normal (serial) append CANNOT be used interchangeably. Once a file has been appended to using either append option, it can only be appended to using that append option. Use the ConcurrentAppend option if you would like support for concurrent appends. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to which to append. - * @param streamContents The file contents to include when appending to the file. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - ServiceResponse append(String accountName, String directFilePath, byte[] streamContents) throws AdlsErrorException, IOException, IllegalArgumentException; - - /** - * Appends to the specified file. This method does not support multiple concurrent appends to the file. NOTE: Concurrent append and normal (serial) append CANNOT be used interchangeably. Once a file has been appended to using either append option, it can only be appended to using that append option. Use the ConcurrentAppend option if you would like support for concurrent appends. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to which to append. - * @param streamContents The file contents to include when appending to the file. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - ServiceCall appendAsync(String accountName, String directFilePath, byte[] streamContents, final ServiceCallback serviceCallback); - /** - * Appends to the specified file. This method does not support multiple concurrent appends to the file. NOTE: Concurrent append and normal (serial) append CANNOT be used interchangeably. Once a file has been appended to using either append option, it can only be appended to using that append option. Use the ConcurrentAppend option if you would like support for concurrent appends. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to which to append. - * @param streamContents The file contents to include when appending to the file. - * @param offset The optional offset in the stream to begin the append operation. Default is to append at the end of the stream. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - ServiceResponse append(String accountName, String directFilePath, byte[] streamContents, Long offset) throws AdlsErrorException, IOException, IllegalArgumentException; - - /** - * Appends to the specified file. This method does not support multiple concurrent appends to the file. NOTE: Concurrent append and normal (serial) append CANNOT be used interchangeably. Once a file has been appended to using either append option, it can only be appended to using that append option. Use the ConcurrentAppend option if you would like support for concurrent appends. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to which to append. - * @param streamContents The file contents to include when appending to the file. - * @param offset The optional offset in the stream to begin the append operation. Default is to append at the end of the stream. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - ServiceCall appendAsync(String accountName, String directFilePath, byte[] streamContents, Long offset, final ServiceCallback serviceCallback); - - /** - * Appends to the specified file. This method does not support multiple concurrent appends to the file. NOTE: Concurrent append and normal (serial) append CANNOT be used interchangeably. Once a file has been appended to using either append option, it can only be appended to using that append option. Use the ConcurrentAppend option if you would like support for concurrent appends. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to which to append. - * @param streamContents The file contents to include when appending to the file. - * @param offset The optional offset in the stream to begin the append operation. Default is to append at the end of the stream. - * @return the {@link ServiceResponse} object if successful. - */ - Observable> appendAsync(String accountName, String directFilePath, byte[] streamContents, Long offset); - - /** - * Creates a file with optionally specified content. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to create. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - ServiceResponse create(String accountName, String directFilePath) throws AdlsErrorException, IOException, IllegalArgumentException; - - /** - * Creates a file with optionally specified content. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to create. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - ServiceCall createAsync(String accountName, String directFilePath, final ServiceCallback serviceCallback); - /** - * Creates a file with optionally specified content. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to create. - * @param streamContents The file contents to include when creating the file. This parameter is optional, resulting in an empty file if not specified. - * @param overwrite The indication of if the file should be overwritten. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - ServiceResponse create(String accountName, String directFilePath, byte[] streamContents, Boolean overwrite) throws AdlsErrorException, IOException, IllegalArgumentException; - - /** - * Creates a file with optionally specified content. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to create. - * @param streamContents The file contents to include when creating the file. This parameter is optional, resulting in an empty file if not specified. - * @param overwrite The indication of if the file should be overwritten. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - ServiceCall createAsync(String accountName, String directFilePath, byte[] streamContents, Boolean overwrite, final ServiceCallback serviceCallback); - - /** - * Creates a file with optionally specified content. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to create. - * @param streamContents The file contents to include when creating the file. This parameter is optional, resulting in an empty file if not specified. - * @param overwrite The indication of if the file should be overwritten. - * @return the {@link ServiceResponse} object if successful. - */ - Observable> createAsync(String accountName, String directFilePath, byte[] streamContents, Boolean overwrite); - - /** - * Opens and reads from the specified file. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to open. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the InputStream object wrapped in {@link ServiceResponse} if successful. - */ - ServiceResponse open(String accountName, String directFilePath) throws AdlsErrorException, IOException, IllegalArgumentException; - - /** - * Opens and reads from the specified file. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to open. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - ServiceCall openAsync(String accountName, String directFilePath, final ServiceCallback serviceCallback); - /** - * Opens and reads from the specified file. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to open. - * @param length the Long value - * @param offset the Long value - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the InputStream object wrapped in {@link ServiceResponse} if successful. - */ - ServiceResponse open(String accountName, String directFilePath, Long length, Long offset) throws AdlsErrorException, IOException, IllegalArgumentException; - - /** - * Opens and reads from the specified file. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to open. - * @param length the Long value - * @param offset the Long value - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - ServiceCall openAsync(String accountName, String directFilePath, Long length, Long offset, final ServiceCallback serviceCallback); - - /** - * Opens and reads from the specified file. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to open. - * @param length the Long value - * @param offset the Long value - * @return the observable to the InputStream object - */ - Observable> openAsync(String accountName, String directFilePath, Long length, Long offset); - - /** - * Sets the Access Control List (ACL) for a file or folder. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param setAclFilePath The Data Lake Store path (starting with '/') of the file or directory on which to set the ACL. - * @param aclspec The ACL spec included in ACL creation operations in the format '[default:]user|group|other::r|-w|-x|-' - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - ServiceResponse setAcl(String accountName, String setAclFilePath, String aclspec) throws AdlsErrorException, IOException, IllegalArgumentException; - - /** - * Sets the Access Control List (ACL) for a file or folder. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param setAclFilePath The Data Lake Store path (starting with '/') of the file or directory on which to set the ACL. - * @param aclspec The ACL spec included in ACL creation operations in the format '[default:]user|group|other::r|-w|-x|-' - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - ServiceCall setAclAsync(String accountName, String setAclFilePath, String aclspec, final ServiceCallback serviceCallback); - - /** - * Sets the Access Control List (ACL) for a file or folder. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param setAclFilePath The Data Lake Store path (starting with '/') of the file or directory on which to set the ACL. - * @param aclspec The ACL spec included in ACL creation operations in the format '[default:]user|group|other::r|-w|-x|-' - * @return the {@link ServiceResponse} object if successful. - */ - Observable> setAclAsync(String accountName, String setAclFilePath, String aclspec); - - /** - * Modifies existing Access Control List (ACL) entries on a file or folder. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param modifyAclFilePath The Data Lake Store path (starting with '/') of the file or directory with the ACL being modified. - * @param aclspec The ACL specification included in ACL modification operations in the format '[default:]user|group|other::r|-w|-x|-' - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - ServiceResponse modifyAclEntries(String accountName, String modifyAclFilePath, String aclspec) throws AdlsErrorException, IOException, IllegalArgumentException; - - /** - * Modifies existing Access Control List (ACL) entries on a file or folder. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param modifyAclFilePath The Data Lake Store path (starting with '/') of the file or directory with the ACL being modified. - * @param aclspec The ACL specification included in ACL modification operations in the format '[default:]user|group|other::r|-w|-x|-' - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - ServiceCall modifyAclEntriesAsync(String accountName, String modifyAclFilePath, String aclspec, final ServiceCallback serviceCallback); - - /** - * Modifies existing Access Control List (ACL) entries on a file or folder. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param modifyAclFilePath The Data Lake Store path (starting with '/') of the file or directory with the ACL being modified. - * @param aclspec The ACL specification included in ACL modification operations in the format '[default:]user|group|other::r|-w|-x|-' - * @return the {@link ServiceResponse} object if successful. - */ - Observable> modifyAclEntriesAsync(String accountName, String modifyAclFilePath, String aclspec); - - /** - * Removes existing Access Control List (ACL) entries for a file or folder. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param removeAclFilePath The Data Lake Store path (starting with '/') of the file or directory with the ACL being removed. - * @param aclspec The ACL spec included in ACL removal operations in the format '[default:]user|group|other' - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - ServiceResponse removeAclEntries(String accountName, String removeAclFilePath, String aclspec) throws AdlsErrorException, IOException, IllegalArgumentException; - - /** - * Removes existing Access Control List (ACL) entries for a file or folder. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param removeAclFilePath The Data Lake Store path (starting with '/') of the file or directory with the ACL being removed. - * @param aclspec The ACL spec included in ACL removal operations in the format '[default:]user|group|other' - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - ServiceCall removeAclEntriesAsync(String accountName, String removeAclFilePath, String aclspec, final ServiceCallback serviceCallback); - - /** - * Removes existing Access Control List (ACL) entries for a file or folder. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param removeAclFilePath The Data Lake Store path (starting with '/') of the file or directory with the ACL being removed. - * @param aclspec The ACL spec included in ACL removal operations in the format '[default:]user|group|other' - * @return the {@link ServiceResponse} object if successful. - */ - Observable> removeAclEntriesAsync(String accountName, String removeAclFilePath, String aclspec); - - /** - * Gets Access Control List (ACL) entries for the specified file or directory. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param aclFilePath The Data Lake Store path (starting with '/') of the file or directory for which to get the ACL. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the AclStatusResult object wrapped in {@link ServiceResponse} if successful. - */ - ServiceResponse getAclStatus(String accountName, String aclFilePath) throws AdlsErrorException, IOException, IllegalArgumentException; - - /** - * Gets Access Control List (ACL) entries for the specified file or directory. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param aclFilePath The Data Lake Store path (starting with '/') of the file or directory for which to get the ACL. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - ServiceCall getAclStatusAsync(String accountName, String aclFilePath, final ServiceCallback serviceCallback); - - /** - * Gets Access Control List (ACL) entries for the specified file or directory. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param aclFilePath The Data Lake Store path (starting with '/') of the file or directory for which to get the ACL. - * @return the observable to the AclStatusResult object - */ - Observable> getAclStatusAsync(String accountName, String aclFilePath); - - /** - * Deletes the requested file or directory, optionally recursively. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param filePath The Data Lake Store path (starting with '/') of the file or directory to delete. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the FileOperationResult object wrapped in {@link ServiceResponse} if successful. - */ - ServiceResponse delete(String accountName, String filePath) throws AdlsErrorException, IOException, IllegalArgumentException; - - /** - * Deletes the requested file or directory, optionally recursively. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param filePath The Data Lake Store path (starting with '/') of the file or directory to delete. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - ServiceCall deleteAsync(String accountName, String filePath, final ServiceCallback serviceCallback); - /** - * Deletes the requested file or directory, optionally recursively. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param filePath The Data Lake Store path (starting with '/') of the file or directory to delete. - * @param recursive The optional switch indicating if the delete should be recursive - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the FileOperationResult object wrapped in {@link ServiceResponse} if successful. - */ - ServiceResponse delete(String accountName, String filePath, Boolean recursive) throws AdlsErrorException, IOException, IllegalArgumentException; - - /** - * Deletes the requested file or directory, optionally recursively. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param filePath The Data Lake Store path (starting with '/') of the file or directory to delete. - * @param recursive The optional switch indicating if the delete should be recursive - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - ServiceCall deleteAsync(String accountName, String filePath, Boolean recursive, final ServiceCallback serviceCallback); - - /** - * Deletes the requested file or directory, optionally recursively. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param filePath The Data Lake Store path (starting with '/') of the file or directory to delete. - * @param recursive The optional switch indicating if the delete should be recursive - * @return the observable to the FileOperationResult object - */ - Observable> deleteAsync(String accountName, String filePath, Boolean recursive); - - /** - * Rename a file or directory. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param renameFilePath The Data Lake Store path (starting with '/') of the file or directory to move/rename. - * @param destination The path to move/rename the file or folder to - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the FileOperationResult object wrapped in {@link ServiceResponse} if successful. - */ - ServiceResponse rename(String accountName, String renameFilePath, String destination) throws AdlsErrorException, IOException, IllegalArgumentException; - - /** - * Rename a file or directory. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param renameFilePath The Data Lake Store path (starting with '/') of the file or directory to move/rename. - * @param destination The path to move/rename the file or folder to - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - ServiceCall renameAsync(String accountName, String renameFilePath, String destination, final ServiceCallback serviceCallback); - - /** - * Rename a file or directory. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param renameFilePath The Data Lake Store path (starting with '/') of the file or directory to move/rename. - * @param destination The path to move/rename the file or folder to - * @return the observable to the FileOperationResult object - */ - Observable> renameAsync(String accountName, String renameFilePath, String destination); - - /** - * Sets the owner of a file or directory. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param setOwnerFilePath The Data Lake Store path (starting with '/') of the file or directory for which to set the owner. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - ServiceResponse setOwner(String accountName, String setOwnerFilePath) throws AdlsErrorException, IOException, IllegalArgumentException; - - /** - * Sets the owner of a file or directory. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param setOwnerFilePath The Data Lake Store path (starting with '/') of the file or directory for which to set the owner. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - ServiceCall setOwnerAsync(String accountName, String setOwnerFilePath, final ServiceCallback serviceCallback); - /** - * Sets the owner of a file or directory. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param setOwnerFilePath The Data Lake Store path (starting with '/') of the file or directory for which to set the owner. - * @param owner The AAD Object ID of the user owner of the file or directory. If empty, the property will remain unchanged. - * @param group The AAD Object ID of the group owner of the file or directory. If empty, the property will remain unchanged. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - ServiceResponse setOwner(String accountName, String setOwnerFilePath, String owner, String group) throws AdlsErrorException, IOException, IllegalArgumentException; - - /** - * Sets the owner of a file or directory. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param setOwnerFilePath The Data Lake Store path (starting with '/') of the file or directory for which to set the owner. - * @param owner The AAD Object ID of the user owner of the file or directory. If empty, the property will remain unchanged. - * @param group The AAD Object ID of the group owner of the file or directory. If empty, the property will remain unchanged. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - ServiceCall setOwnerAsync(String accountName, String setOwnerFilePath, String owner, String group, final ServiceCallback serviceCallback); - - /** - * Sets the owner of a file or directory. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param setOwnerFilePath The Data Lake Store path (starting with '/') of the file or directory for which to set the owner. - * @param owner The AAD Object ID of the user owner of the file or directory. If empty, the property will remain unchanged. - * @param group The AAD Object ID of the group owner of the file or directory. If empty, the property will remain unchanged. - * @return the {@link ServiceResponse} object if successful. - */ - Observable> setOwnerAsync(String accountName, String setOwnerFilePath, String owner, String group); - - /** - * Sets the permission of the file or folder. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param setPermissionFilePath The Data Lake Store path (starting with '/') of the file or directory for which to set the permission. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - ServiceResponse setPermission(String accountName, String setPermissionFilePath) throws AdlsErrorException, IOException, IllegalArgumentException; - - /** - * Sets the permission of the file or folder. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param setPermissionFilePath The Data Lake Store path (starting with '/') of the file or directory for which to set the permission. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - ServiceCall setPermissionAsync(String accountName, String setPermissionFilePath, final ServiceCallback serviceCallback); - /** - * Sets the permission of the file or folder. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param setPermissionFilePath The Data Lake Store path (starting with '/') of the file or directory for which to set the permission. - * @param permission A string representation of the permission (i.e 'rwx'). If empty, this property remains unchanged. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - ServiceResponse setPermission(String accountName, String setPermissionFilePath, String permission) throws AdlsErrorException, IOException, IllegalArgumentException; - - /** - * Sets the permission of the file or folder. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param setPermissionFilePath The Data Lake Store path (starting with '/') of the file or directory for which to set the permission. - * @param permission A string representation of the permission (i.e 'rwx'). If empty, this property remains unchanged. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - ServiceCall setPermissionAsync(String accountName, String setPermissionFilePath, String permission, final ServiceCallback serviceCallback); - - /** - * Sets the permission of the file or folder. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param setPermissionFilePath The Data Lake Store path (starting with '/') of the file or directory for which to set the permission. - * @param permission A string representation of the permission (i.e 'rwx'). If empty, this property remains unchanged. - * @return the {@link ServiceResponse} object if successful. - */ - Observable> setPermissionAsync(String accountName, String setPermissionFilePath, String permission); - -} diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/implementation/AccountsImpl.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/implementation/AccountsImpl.java index 61d11db012a4..15aaca8a9440 100644 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/implementation/AccountsImpl.java +++ b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/implementation/AccountsImpl.java @@ -20,7 +20,6 @@ import com.microsoft.azure.management.datalake.store.models.PageImpl; import com.microsoft.azure.Page; import com.microsoft.azure.PagedList; -import com.microsoft.rest.RestException; import com.microsoft.rest.ServiceCall; import com.microsoft.rest.ServiceCallback; import com.microsoft.rest.ServiceResponse; @@ -35,6 +34,7 @@ import retrofit2.http.HTTP; import retrofit2.http.PATCH; import retrofit2.http.Path; +import retrofit2.http.POST; import retrofit2.http.PUT; import retrofit2.http.Query; import retrofit2.Response; @@ -111,6 +111,10 @@ interface AccountsService { @GET("subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}") Observable> get(@Path("resourceGroupName") String resourceGroupName, @Path("accountName") String accountName, @Path("subscriptionId") String subscriptionId, @Query("api-version") String apiVersion, @Header("accept-language") String acceptLanguage, @Header("User-Agent") String userAgent); + @Headers("Content-Type: application/json; charset=utf-8") + @POST("subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/enableKeyVault") + Observable> enableKeyVault(@Path("resourceGroupName") String resourceGroupName, @Path("accountName") String accountName, @Path("subscriptionId") String subscriptionId, @Query("api-version") String apiVersion, @Header("accept-language") String acceptLanguage, @Header("User-Agent") String userAgent); + @Headers("Content-Type: application/json; charset=utf-8") @GET("subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts") Observable> listByResourceGroup(@Path("resourceGroupName") String resourceGroupName, @Path("subscriptionId") String subscriptionId, @Query("$filter") String filter, @Query("$top") Integer top, @Query("$skip") Integer skip, @Query("$expand") String expand, @Query("$select") String select, @Query("$orderby") String orderby, @Query("$count") Boolean count, @Query("$search") String search, @Query("$format") String format, @Query("api-version") String apiVersion, @Header("accept-language") String acceptLanguage, @Header("User-Agent") String userAgent); @@ -139,13 +143,9 @@ interface AccountsService { * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. * @param accountName The name of the Data Lake Store account from which to delete the firewall rule. * @param firewallRuleName The name of the firewall rule to delete. - * @throws CloudException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. */ - public ServiceResponse deleteFirewallRule(String resourceGroupName, String accountName, String firewallRuleName) throws CloudException, IOException, IllegalArgumentException { - return deleteFirewallRuleAsync(resourceGroupName, accountName, firewallRuleName).toBlocking().single(); + public void deleteFirewallRule(String resourceGroupName, String accountName, String firewallRuleName) { + deleteFirewallRuleWithServiceResponseAsync(resourceGroupName, accountName, firewallRuleName).toBlocking().single().getBody(); } /** @@ -158,7 +158,7 @@ public ServiceResponse deleteFirewallRule(String resourceGroupName, String * @return the {@link ServiceCall} object */ public ServiceCall deleteFirewallRuleAsync(String resourceGroupName, String accountName, String firewallRuleName, final ServiceCallback serviceCallback) { - return ServiceCall.create(deleteFirewallRuleAsync(resourceGroupName, accountName, firewallRuleName), serviceCallback); + return ServiceCall.create(deleteFirewallRuleWithServiceResponseAsync(resourceGroupName, accountName, firewallRuleName), serviceCallback); } /** @@ -169,7 +169,24 @@ public ServiceCall deleteFirewallRuleAsync(String resourceGroupName, Strin * @param firewallRuleName The name of the firewall rule to delete. * @return the {@link ServiceResponse} object if successful. */ - public Observable> deleteFirewallRuleAsync(String resourceGroupName, String accountName, String firewallRuleName) { + public Observable deleteFirewallRuleAsync(String resourceGroupName, String accountName, String firewallRuleName) { + return deleteFirewallRuleWithServiceResponseAsync(resourceGroupName, accountName, firewallRuleName).map(new Func1, Void>() { + @Override + public Void call(ServiceResponse response) { + return response.getBody(); + } + }); + } + + /** + * Deletes the specified firewall rule from the specified Data Lake Store account. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. + * @param accountName The name of the Data Lake Store account from which to delete the firewall rule. + * @param firewallRuleName The name of the firewall rule to delete. + * @return the {@link ServiceResponse} object if successful. + */ + public Observable> deleteFirewallRuleWithServiceResponseAsync(String resourceGroupName, String accountName, String firewallRuleName) { if (resourceGroupName == null) { throw new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."); } @@ -212,13 +229,10 @@ private ServiceResponse deleteFirewallRuleDelegate(Response * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. * @param accountName The name of the Data Lake Store account from which to get the firewall rule. * @param firewallRuleName The name of the firewall rule to retrieve. - * @throws CloudException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the FirewallRule object wrapped in {@link ServiceResponse} if successful. + * @return the FirewallRule object if successful. */ - public ServiceResponse getFirewallRule(String resourceGroupName, String accountName, String firewallRuleName) throws CloudException, IOException, IllegalArgumentException { - return getFirewallRuleAsync(resourceGroupName, accountName, firewallRuleName).toBlocking().single(); + public FirewallRule getFirewallRule(String resourceGroupName, String accountName, String firewallRuleName) { + return getFirewallRuleWithServiceResponseAsync(resourceGroupName, accountName, firewallRuleName).toBlocking().single().getBody(); } /** @@ -231,7 +245,7 @@ public ServiceResponse getFirewallRule(String resourceGroupName, S * @return the {@link ServiceCall} object */ public ServiceCall getFirewallRuleAsync(String resourceGroupName, String accountName, String firewallRuleName, final ServiceCallback serviceCallback) { - return ServiceCall.create(getFirewallRuleAsync(resourceGroupName, accountName, firewallRuleName), serviceCallback); + return ServiceCall.create(getFirewallRuleWithServiceResponseAsync(resourceGroupName, accountName, firewallRuleName), serviceCallback); } /** @@ -242,7 +256,24 @@ public ServiceCall getFirewallRuleAsync(String resourceGroupName, * @param firewallRuleName The name of the firewall rule to retrieve. * @return the observable to the FirewallRule object */ - public Observable> getFirewallRuleAsync(String resourceGroupName, String accountName, String firewallRuleName) { + public Observable getFirewallRuleAsync(String resourceGroupName, String accountName, String firewallRuleName) { + return getFirewallRuleWithServiceResponseAsync(resourceGroupName, accountName, firewallRuleName).map(new Func1, FirewallRule>() { + @Override + public FirewallRule call(ServiceResponse response) { + return response.getBody(); + } + }); + } + + /** + * Gets the specified Data Lake Store firewall rule. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. + * @param accountName The name of the Data Lake Store account from which to get the firewall rule. + * @param firewallRuleName The name of the firewall rule to retrieve. + * @return the observable to the FirewallRule object + */ + public Observable> getFirewallRuleWithServiceResponseAsync(String resourceGroupName, String accountName, String firewallRuleName) { if (resourceGroupName == null) { throw new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."); } @@ -284,20 +315,16 @@ private ServiceResponse getFirewallRuleDelegate(Response> listFirewallRules(final String resourceGroupName, final String accountName) throws CloudException, IOException, IllegalArgumentException { + public PagedList listFirewallRules(final String resourceGroupName, final String accountName) { ServiceResponse> response = listFirewallRulesSinglePageAsync(resourceGroupName, accountName).toBlocking().single(); - PagedList pagedList = new PagedList(response.getBody()) { + return new PagedList(response.getBody()) { @Override - public Page nextPage(String nextPageLink) throws RestException, IOException { + public Page nextPage(String nextPageLink) { return listFirewallRulesNextSinglePageAsync(nextPageLink).toBlocking().single().getBody(); } }; - return new ServiceResponse>(pagedList, response.getResponse()); } /** @@ -325,15 +352,35 @@ public Observable>> call(String nextPageLink) * * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. * @param accountName The name of the Data Lake Store account from which to get the firewall rules. - * @return the observable to the List<FirewallRule> object + * @return the observable to the PagedList<FirewallRule> object */ - public Observable>> listFirewallRulesAsync(final String resourceGroupName, final String accountName) { + public Observable> listFirewallRulesAsync(final String resourceGroupName, final String accountName) { + return listFirewallRulesWithServiceResponseAsync(resourceGroupName, accountName) + .map(new Func1>, Page>() { + @Override + public Page call(ServiceResponse> response) { + return response.getBody(); + } + }); + } + + /** + * Lists the Data Lake Store firewall rules within the specified Data Lake Store account. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. + * @param accountName The name of the Data Lake Store account from which to get the firewall rules. + * @return the observable to the PagedList<FirewallRule> object + */ + public Observable>> listFirewallRulesWithServiceResponseAsync(final String resourceGroupName, final String accountName) { return listFirewallRulesSinglePageAsync(resourceGroupName, accountName) .concatMap(new Func1>, Observable>>>() { @Override public Observable>> call(ServiceResponse> page) { String nextPageLink = page.getBody().getNextPageLink(); - return listFirewallRulesNextSinglePageAsync(nextPageLink); + if (nextPageLink == null) { + return Observable.just(page); + } + return Observable.just(page).concatWith(listFirewallRulesNextWithServiceResponseAsync(nextPageLink)); } }); } @@ -343,7 +390,7 @@ public Observable>> call(ServiceResponse> * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. ServiceResponse> * @param accountName The name of the Data Lake Store account from which to get the firewall rules. - * @return the List<FirewallRule> object wrapped in {@link ServiceResponse} if successful. + * @return the PagedList<FirewallRule> object wrapped in {@link ServiceResponse} if successful. */ public Observable>> listFirewallRulesSinglePageAsync(final String resourceGroupName, final String accountName) { if (resourceGroupName == null) { @@ -386,13 +433,10 @@ private ServiceResponse> listFirewallRulesDelegate(Respon * @param accountName The name of the Data Lake Store account to which to add the firewall rule. * @param name The name of the firewall rule to create or update. * @param parameters Parameters supplied to create the create firewall rule. - * @throws CloudException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the FirewallRule object wrapped in {@link ServiceResponse} if successful. + * @return the FirewallRule object if successful. */ - public ServiceResponse createOrUpdateFirewallRule(String resourceGroupName, String accountName, String name, FirewallRule parameters) throws CloudException, IOException, IllegalArgumentException { - return createOrUpdateFirewallRuleAsync(resourceGroupName, accountName, name, parameters).toBlocking().single(); + public FirewallRule createOrUpdateFirewallRule(String resourceGroupName, String accountName, String name, FirewallRule parameters) { + return createOrUpdateFirewallRuleWithServiceResponseAsync(resourceGroupName, accountName, name, parameters).toBlocking().single().getBody(); } /** @@ -406,7 +450,25 @@ public ServiceResponse createOrUpdateFirewallRule(String resourceG * @return the {@link ServiceCall} object */ public ServiceCall createOrUpdateFirewallRuleAsync(String resourceGroupName, String accountName, String name, FirewallRule parameters, final ServiceCallback serviceCallback) { - return ServiceCall.create(createOrUpdateFirewallRuleAsync(resourceGroupName, accountName, name, parameters), serviceCallback); + return ServiceCall.create(createOrUpdateFirewallRuleWithServiceResponseAsync(resourceGroupName, accountName, name, parameters), serviceCallback); + } + + /** + * Creates or updates the specified firewall rule. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. + * @param accountName The name of the Data Lake Store account to which to add the firewall rule. + * @param name The name of the firewall rule to create or update. + * @param parameters Parameters supplied to create the create firewall rule. + * @return the observable to the FirewallRule object + */ + public Observable createOrUpdateFirewallRuleAsync(String resourceGroupName, String accountName, String name, FirewallRule parameters) { + return createOrUpdateFirewallRuleWithServiceResponseAsync(resourceGroupName, accountName, name, parameters).map(new Func1, FirewallRule>() { + @Override + public FirewallRule call(ServiceResponse response) { + return response.getBody(); + } + }); } /** @@ -418,7 +480,7 @@ public ServiceCall createOrUpdateFirewallRuleAsync(String resource * @param parameters Parameters supplied to create the create firewall rule. * @return the observable to the FirewallRule object */ - public Observable> createOrUpdateFirewallRuleAsync(String resourceGroupName, String accountName, String name, FirewallRule parameters) { + public Observable> createOrUpdateFirewallRuleWithServiceResponseAsync(String resourceGroupName, String accountName, String name, FirewallRule parameters) { if (resourceGroupName == null) { throw new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."); } @@ -465,14 +527,10 @@ private ServiceResponse createOrUpdateFirewallRuleDelegate(Respons * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. * @param name The name of the Data Lake Store account to create. * @param parameters Parameters supplied to create the Data Lake Store account. - * @throws CloudException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @throws InterruptedException exception thrown when long running operation is interrupted - * @return the DataLakeStoreAccount object wrapped in ServiceResponse if successful. + * @return the DataLakeStoreAccount object if successful. */ - public ServiceResponse create(String resourceGroupName, String name, DataLakeStoreAccount parameters) throws CloudException, IOException, IllegalArgumentException, InterruptedException { - return createAsync(resourceGroupName, name, parameters).toBlocking().last(); + public DataLakeStoreAccount create(String resourceGroupName, String name, DataLakeStoreAccount parameters) { + return createWithServiceResponseAsync(resourceGroupName, name, parameters).toBlocking().last().getBody(); } /** @@ -485,7 +543,24 @@ public ServiceResponse create(String resourceGroupName, St * @return the {@link ServiceCall} object */ public ServiceCall createAsync(String resourceGroupName, String name, DataLakeStoreAccount parameters, final ServiceCallback serviceCallback) { - return ServiceCall.create(createAsync(resourceGroupName, name, parameters), serviceCallback); + return ServiceCall.create(createWithServiceResponseAsync(resourceGroupName, name, parameters), serviceCallback); + } + + /** + * Creates the specified Data Lake Store account. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. + * @param name The name of the Data Lake Store account to create. + * @param parameters Parameters supplied to create the Data Lake Store account. + * @return the observable for the request + */ + public Observable createAsync(String resourceGroupName, String name, DataLakeStoreAccount parameters) { + return createWithServiceResponseAsync(resourceGroupName, name, parameters).map(new Func1, DataLakeStoreAccount>() { + @Override + public DataLakeStoreAccount call(ServiceResponse response) { + return response.getBody(); + } + }); } /** @@ -496,7 +571,7 @@ public ServiceCall createAsync(String resourceGroupName, S * @param parameters Parameters supplied to create the Data Lake Store account. * @return the observable for the request */ - public Observable> createAsync(String resourceGroupName, String name, DataLakeStoreAccount parameters) { + public Observable> createWithServiceResponseAsync(String resourceGroupName, String name, DataLakeStoreAccount parameters) { if (resourceGroupName == null) { throw new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."); } @@ -523,13 +598,10 @@ public Observable> createAsync(String reso * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. * @param name The name of the Data Lake Store account to create. * @param parameters Parameters supplied to create the Data Lake Store account. - * @throws CloudException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the DataLakeStoreAccount object wrapped in {@link ServiceResponse} if successful. + * @return the DataLakeStoreAccount object if successful. */ - public ServiceResponse beginCreate(String resourceGroupName, String name, DataLakeStoreAccount parameters) throws CloudException, IOException, IllegalArgumentException { - return beginCreateAsync(resourceGroupName, name, parameters).toBlocking().single(); + public DataLakeStoreAccount beginCreate(String resourceGroupName, String name, DataLakeStoreAccount parameters) { + return beginCreateWithServiceResponseAsync(resourceGroupName, name, parameters).toBlocking().single().getBody(); } /** @@ -542,7 +614,24 @@ public ServiceResponse beginCreate(String resourceGroupNam * @return the {@link ServiceCall} object */ public ServiceCall beginCreateAsync(String resourceGroupName, String name, DataLakeStoreAccount parameters, final ServiceCallback serviceCallback) { - return ServiceCall.create(beginCreateAsync(resourceGroupName, name, parameters), serviceCallback); + return ServiceCall.create(beginCreateWithServiceResponseAsync(resourceGroupName, name, parameters), serviceCallback); + } + + /** + * Creates the specified Data Lake Store account. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. + * @param name The name of the Data Lake Store account to create. + * @param parameters Parameters supplied to create the Data Lake Store account. + * @return the observable to the DataLakeStoreAccount object + */ + public Observable beginCreateAsync(String resourceGroupName, String name, DataLakeStoreAccount parameters) { + return beginCreateWithServiceResponseAsync(resourceGroupName, name, parameters).map(new Func1, DataLakeStoreAccount>() { + @Override + public DataLakeStoreAccount call(ServiceResponse response) { + return response.getBody(); + } + }); } /** @@ -553,7 +642,7 @@ public ServiceCall beginCreateAsync(String resourceGroupNa * @param parameters Parameters supplied to create the Data Lake Store account. * @return the observable to the DataLakeStoreAccount object */ - public Observable> beginCreateAsync(String resourceGroupName, String name, DataLakeStoreAccount parameters) { + public Observable> beginCreateWithServiceResponseAsync(String resourceGroupName, String name, DataLakeStoreAccount parameters) { if (resourceGroupName == null) { throw new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."); } @@ -598,14 +687,10 @@ private ServiceResponse beginCreateDelegate(Response update(String resourceGroupName, String name, DataLakeStoreAccount parameters) throws CloudException, IOException, IllegalArgumentException, InterruptedException { - return updateAsync(resourceGroupName, name, parameters).toBlocking().last(); + public DataLakeStoreAccount update(String resourceGroupName, String name, DataLakeStoreAccount parameters) { + return updateWithServiceResponseAsync(resourceGroupName, name, parameters).toBlocking().last().getBody(); } /** @@ -618,7 +703,7 @@ public ServiceResponse update(String resourceGroupName, St * @return the {@link ServiceCall} object */ public ServiceCall updateAsync(String resourceGroupName, String name, DataLakeStoreAccount parameters, final ServiceCallback serviceCallback) { - return ServiceCall.create(updateAsync(resourceGroupName, name, parameters), serviceCallback); + return ServiceCall.create(updateWithServiceResponseAsync(resourceGroupName, name, parameters), serviceCallback); } /** @@ -629,7 +714,24 @@ public ServiceCall updateAsync(String resourceGroupName, S * @param parameters Parameters supplied to update the Data Lake Store account. * @return the observable for the request */ - public Observable> updateAsync(String resourceGroupName, String name, DataLakeStoreAccount parameters) { + public Observable updateAsync(String resourceGroupName, String name, DataLakeStoreAccount parameters) { + return updateWithServiceResponseAsync(resourceGroupName, name, parameters).map(new Func1, DataLakeStoreAccount>() { + @Override + public DataLakeStoreAccount call(ServiceResponse response) { + return response.getBody(); + } + }); + } + + /** + * Updates the specified Data Lake Store account information. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. + * @param name The name of the Data Lake Store account to update. + * @param parameters Parameters supplied to update the Data Lake Store account. + * @return the observable for the request + */ + public Observable> updateWithServiceResponseAsync(String resourceGroupName, String name, DataLakeStoreAccount parameters) { if (resourceGroupName == null) { throw new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."); } @@ -656,13 +758,10 @@ public Observable> updateAsync(String reso * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. * @param name The name of the Data Lake Store account to update. * @param parameters Parameters supplied to update the Data Lake Store account. - * @throws CloudException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the DataLakeStoreAccount object wrapped in {@link ServiceResponse} if successful. + * @return the DataLakeStoreAccount object if successful. */ - public ServiceResponse beginUpdate(String resourceGroupName, String name, DataLakeStoreAccount parameters) throws CloudException, IOException, IllegalArgumentException { - return beginUpdateAsync(resourceGroupName, name, parameters).toBlocking().single(); + public DataLakeStoreAccount beginUpdate(String resourceGroupName, String name, DataLakeStoreAccount parameters) { + return beginUpdateWithServiceResponseAsync(resourceGroupName, name, parameters).toBlocking().single().getBody(); } /** @@ -675,7 +774,7 @@ public ServiceResponse beginUpdate(String resourceGroupNam * @return the {@link ServiceCall} object */ public ServiceCall beginUpdateAsync(String resourceGroupName, String name, DataLakeStoreAccount parameters, final ServiceCallback serviceCallback) { - return ServiceCall.create(beginUpdateAsync(resourceGroupName, name, parameters), serviceCallback); + return ServiceCall.create(beginUpdateWithServiceResponseAsync(resourceGroupName, name, parameters), serviceCallback); } /** @@ -686,7 +785,24 @@ public ServiceCall beginUpdateAsync(String resourceGroupNa * @param parameters Parameters supplied to update the Data Lake Store account. * @return the observable to the DataLakeStoreAccount object */ - public Observable> beginUpdateAsync(String resourceGroupName, String name, DataLakeStoreAccount parameters) { + public Observable beginUpdateAsync(String resourceGroupName, String name, DataLakeStoreAccount parameters) { + return beginUpdateWithServiceResponseAsync(resourceGroupName, name, parameters).map(new Func1, DataLakeStoreAccount>() { + @Override + public DataLakeStoreAccount call(ServiceResponse response) { + return response.getBody(); + } + }); + } + + /** + * Updates the specified Data Lake Store account information. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. + * @param name The name of the Data Lake Store account to update. + * @param parameters Parameters supplied to update the Data Lake Store account. + * @return the observable to the DataLakeStoreAccount object + */ + public Observable> beginUpdateWithServiceResponseAsync(String resourceGroupName, String name, DataLakeStoreAccount parameters) { if (resourceGroupName == null) { throw new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."); } @@ -730,14 +846,9 @@ private ServiceResponse beginUpdateDelegate(Response delete(String resourceGroupName, String accountName) throws CloudException, IOException, IllegalArgumentException, InterruptedException { - return deleteAsync(resourceGroupName, accountName).toBlocking().last(); + public void delete(String resourceGroupName, String accountName) { + deleteWithServiceResponseAsync(resourceGroupName, accountName).toBlocking().last().getBody(); } /** @@ -749,7 +860,7 @@ public ServiceResponse delete(String resourceGroupName, String accountName * @return the {@link ServiceCall} object */ public ServiceCall deleteAsync(String resourceGroupName, String accountName, final ServiceCallback serviceCallback) { - return ServiceCall.create(deleteAsync(resourceGroupName, accountName), serviceCallback); + return ServiceCall.create(deleteWithServiceResponseAsync(resourceGroupName, accountName), serviceCallback); } /** @@ -759,7 +870,23 @@ public ServiceCall deleteAsync(String resourceGroupName, String accountNam * @param accountName The name of the Data Lake Store account to delete. * @return the observable for the request */ - public Observable> deleteAsync(String resourceGroupName, String accountName) { + public Observable deleteAsync(String resourceGroupName, String accountName) { + return deleteWithServiceResponseAsync(resourceGroupName, accountName).map(new Func1, Void>() { + @Override + public Void call(ServiceResponse response) { + return response.getBody(); + } + }); + } + + /** + * Deletes the specified Data Lake Store account. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. + * @param accountName The name of the Data Lake Store account to delete. + * @return the observable for the request + */ + public Observable> deleteWithServiceResponseAsync(String resourceGroupName, String accountName) { if (resourceGroupName == null) { throw new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."); } @@ -781,13 +908,9 @@ public Observable> deleteAsync(String resourceGroupName, S * * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. * @param accountName The name of the Data Lake Store account to delete. - * @throws CloudException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. */ - public ServiceResponse beginDelete(String resourceGroupName, String accountName) throws CloudException, IOException, IllegalArgumentException { - return beginDeleteAsync(resourceGroupName, accountName).toBlocking().single(); + public void beginDelete(String resourceGroupName, String accountName) { + beginDeleteWithServiceResponseAsync(resourceGroupName, accountName).toBlocking().single().getBody(); } /** @@ -799,7 +922,23 @@ public ServiceResponse beginDelete(String resourceGroupName, String accoun * @return the {@link ServiceCall} object */ public ServiceCall beginDeleteAsync(String resourceGroupName, String accountName, final ServiceCallback serviceCallback) { - return ServiceCall.create(beginDeleteAsync(resourceGroupName, accountName), serviceCallback); + return ServiceCall.create(beginDeleteWithServiceResponseAsync(resourceGroupName, accountName), serviceCallback); + } + + /** + * Deletes the specified Data Lake Store account. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. + * @param accountName The name of the Data Lake Store account to delete. + * @return the {@link ServiceResponse} object if successful. + */ + public Observable beginDeleteAsync(String resourceGroupName, String accountName) { + return beginDeleteWithServiceResponseAsync(resourceGroupName, accountName).map(new Func1, Void>() { + @Override + public Void call(ServiceResponse response) { + return response.getBody(); + } + }); } /** @@ -809,7 +948,7 @@ public ServiceCall beginDeleteAsync(String resourceGroupName, String accou * @param accountName The name of the Data Lake Store account to delete. * @return the {@link ServiceResponse} object if successful. */ - public Observable> beginDeleteAsync(String resourceGroupName, String accountName) { + public Observable> beginDeleteWithServiceResponseAsync(String resourceGroupName, String accountName) { if (resourceGroupName == null) { throw new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."); } @@ -850,13 +989,10 @@ private ServiceResponse beginDeleteDelegate(Response respons * * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. * @param accountName The name of the Data Lake Store account to retrieve. - * @throws CloudException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the DataLakeStoreAccount object wrapped in {@link ServiceResponse} if successful. + * @return the DataLakeStoreAccount object if successful. */ - public ServiceResponse get(String resourceGroupName, String accountName) throws CloudException, IOException, IllegalArgumentException { - return getAsync(resourceGroupName, accountName).toBlocking().single(); + public DataLakeStoreAccount get(String resourceGroupName, String accountName) { + return getWithServiceResponseAsync(resourceGroupName, accountName).toBlocking().single().getBody(); } /** @@ -868,7 +1004,23 @@ public ServiceResponse get(String resourceGroupName, Strin * @return the {@link ServiceCall} object */ public ServiceCall getAsync(String resourceGroupName, String accountName, final ServiceCallback serviceCallback) { - return ServiceCall.create(getAsync(resourceGroupName, accountName), serviceCallback); + return ServiceCall.create(getWithServiceResponseAsync(resourceGroupName, accountName), serviceCallback); + } + + /** + * Gets the specified Data Lake Store account. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. + * @param accountName The name of the Data Lake Store account to retrieve. + * @return the observable to the DataLakeStoreAccount object + */ + public Observable getAsync(String resourceGroupName, String accountName) { + return getWithServiceResponseAsync(resourceGroupName, accountName).map(new Func1, DataLakeStoreAccount>() { + @Override + public DataLakeStoreAccount call(ServiceResponse response) { + return response.getBody(); + } + }); } /** @@ -878,7 +1030,7 @@ public ServiceCall getAsync(String resourceGroupName, Stri * @param accountName The name of the Data Lake Store account to retrieve. * @return the observable to the DataLakeStoreAccount object */ - public Observable> getAsync(String resourceGroupName, String accountName) { + public Observable> getWithServiceResponseAsync(String resourceGroupName, String accountName) { if (resourceGroupName == null) { throw new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."); } @@ -912,24 +1064,98 @@ private ServiceResponse getDelegate(Response .build(response); } + /** + * Attempts to enable a user managed key vault for encryption of the specified Data Lake Store account. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. + * @param accountName The name of the Data Lake Store account to attempt to enable the Key Vault for. + */ + public void enableKeyVault(String resourceGroupName, String accountName) { + enableKeyVaultWithServiceResponseAsync(resourceGroupName, accountName).toBlocking().single().getBody(); + } + + /** + * Attempts to enable a user managed key vault for encryption of the specified Data Lake Store account. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. + * @param accountName The name of the Data Lake Store account to attempt to enable the Key Vault for. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @return the {@link ServiceCall} object + */ + public ServiceCall enableKeyVaultAsync(String resourceGroupName, String accountName, final ServiceCallback serviceCallback) { + return ServiceCall.create(enableKeyVaultWithServiceResponseAsync(resourceGroupName, accountName), serviceCallback); + } + + /** + * Attempts to enable a user managed key vault for encryption of the specified Data Lake Store account. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. + * @param accountName The name of the Data Lake Store account to attempt to enable the Key Vault for. + * @return the {@link ServiceResponse} object if successful. + */ + public Observable enableKeyVaultAsync(String resourceGroupName, String accountName) { + return enableKeyVaultWithServiceResponseAsync(resourceGroupName, accountName).map(new Func1, Void>() { + @Override + public Void call(ServiceResponse response) { + return response.getBody(); + } + }); + } + + /** + * Attempts to enable a user managed key vault for encryption of the specified Data Lake Store account. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account. + * @param accountName The name of the Data Lake Store account to attempt to enable the Key Vault for. + * @return the {@link ServiceResponse} object if successful. + */ + public Observable> enableKeyVaultWithServiceResponseAsync(String resourceGroupName, String accountName) { + if (resourceGroupName == null) { + throw new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."); + } + if (accountName == null) { + throw new IllegalArgumentException("Parameter accountName is required and cannot be null."); + } + if (this.client.subscriptionId() == null) { + throw new IllegalArgumentException("Parameter this.client.subscriptionId() is required and cannot be null."); + } + if (this.client.apiVersion() == null) { + throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); + } + return service.enableKeyVault(resourceGroupName, accountName, this.client.subscriptionId(), this.client.apiVersion(), this.client.acceptLanguage(), this.client.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = enableKeyVaultDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponse enableKeyVaultDelegate(Response response) throws CloudException, IOException, IllegalArgumentException { + return new AzureServiceResponseBuilder(this.client.mapperAdapter()) + .register(200, new TypeToken() { }.getType()) + .build(response); + } + /** * Lists the Data Lake Store accounts within a specific resource group. The response includes a link to the next page of results, if any. * * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account(s). - * @throws CloudException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the List<DataLakeStoreAccount> object wrapped in {@link ServiceResponse} if successful. + * @return the PagedList<DataLakeStoreAccount> object if successful. */ - public ServiceResponse> listByResourceGroup(final String resourceGroupName) throws CloudException, IOException, IllegalArgumentException { + public PagedList listByResourceGroup(final String resourceGroupName) { ServiceResponse> response = listByResourceGroupSinglePageAsync(resourceGroupName).toBlocking().single(); - PagedList pagedList = new PagedList(response.getBody()) { + return new PagedList(response.getBody()) { @Override - public Page nextPage(String nextPageLink) throws RestException, IOException { + public Page nextPage(String nextPageLink) { return listByResourceGroupNextSinglePageAsync(nextPageLink).toBlocking().single().getBody(); } }; - return new ServiceResponse>(pagedList, response.getResponse()); } /** @@ -955,15 +1181,34 @@ public Observable>> call(String nextP * Lists the Data Lake Store accounts within a specific resource group. The response includes a link to the next page of results, if any. * * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account(s). - * @return the observable to the List<DataLakeStoreAccount> object + * @return the observable to the PagedList<DataLakeStoreAccount> object + */ + public Observable> listByResourceGroupAsync(final String resourceGroupName) { + return listByResourceGroupWithServiceResponseAsync(resourceGroupName) + .map(new Func1>, Page>() { + @Override + public Page call(ServiceResponse> response) { + return response.getBody(); + } + }); + } + + /** + * Lists the Data Lake Store accounts within a specific resource group. The response includes a link to the next page of results, if any. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account(s). + * @return the observable to the PagedList<DataLakeStoreAccount> object */ - public Observable>> listByResourceGroupAsync(final String resourceGroupName) { + public Observable>> listByResourceGroupWithServiceResponseAsync(final String resourceGroupName) { return listByResourceGroupSinglePageAsync(resourceGroupName) .concatMap(new Func1>, Observable>>>() { @Override public Observable>> call(ServiceResponse> page) { String nextPageLink = page.getBody().getNextPageLink(); - return listByResourceGroupNextSinglePageAsync(nextPageLink); + if (nextPageLink == null) { + return Observable.just(page); + } + return Observable.just(page).concatWith(listByResourceGroupNextWithServiceResponseAsync(nextPageLink)); } }); } @@ -972,7 +1217,7 @@ public Observable>> call(ServiceRespo * Lists the Data Lake Store accounts within a specific resource group. The response includes a link to the next page of results, if any. * * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account(s). - * @return the List<DataLakeStoreAccount> object wrapped in {@link ServiceResponse} if successful. + * @return the PagedList<DataLakeStoreAccount> object wrapped in {@link ServiceResponse} if successful. */ public Observable>> listByResourceGroupSinglePageAsync(final String resourceGroupName) { if (resourceGroupName == null) { @@ -1020,20 +1265,16 @@ public Observable>> call(Response> listByResourceGroup(final String resourceGroupName, final String filter, final Integer top, final Integer skip, final String expand, final String select, final String orderby, final Boolean count, final String search, final String format) throws CloudException, IOException, IllegalArgumentException { + public PagedList listByResourceGroup(final String resourceGroupName, final String filter, final Integer top, final Integer skip, final String expand, final String select, final String orderby, final Boolean count, final String search, final String format) { ServiceResponse> response = listByResourceGroupSinglePageAsync(resourceGroupName, filter, top, skip, expand, select, orderby, count, search, format).toBlocking().single(); - PagedList pagedList = new PagedList(response.getBody()) { + return new PagedList(response.getBody()) { @Override - public Page nextPage(String nextPageLink) throws RestException, IOException { + public Page nextPage(String nextPageLink) { return listByResourceGroupNextSinglePageAsync(nextPageLink).toBlocking().single().getBody(); } }; - return new ServiceResponse>(pagedList, response.getResponse()); } /** @@ -1077,15 +1318,43 @@ public Observable>> call(String nextP * @param count A Boolean value of true or false to request a count of the matching resources included with the resources in the response, e.g. Categories?$count=true. Optional. * @param search A free form search. A free-text search expression to match for whether a particular entry should be included in the feed, e.g. Categories?$search=blue OR green. Optional. * @param format The desired return format. Return the response in particular formatxii without access to request headers for standard content-type negotiation (e.g Orders?$format=json). Optional. - * @return the observable to the List<DataLakeStoreAccount> object + * @return the observable to the PagedList<DataLakeStoreAccount> object + */ + public Observable> listByResourceGroupAsync(final String resourceGroupName, final String filter, final Integer top, final Integer skip, final String expand, final String select, final String orderby, final Boolean count, final String search, final String format) { + return listByResourceGroupWithServiceResponseAsync(resourceGroupName, filter, top, skip, expand, select, orderby, count, search, format) + .map(new Func1>, Page>() { + @Override + public Page call(ServiceResponse> response) { + return response.getBody(); + } + }); + } + + /** + * Lists the Data Lake Store accounts within a specific resource group. The response includes a link to the next page of results, if any. + * + * @param resourceGroupName The name of the Azure resource group that contains the Data Lake Store account(s). + * @param filter OData filter. Optional. + * @param top The number of items to return. Optional. + * @param skip The number of items to skip over before returning elements. Optional. + * @param expand OData expansion. Expand related resources in line with the retrieved resources, e.g. Categories/$expand=Products would expand Product data in line with each Category entry. Optional. + * @param select OData Select statement. Limits the properties on each entry to just those requested, e.g. Categories?$select=CategoryName,Description. Optional. + * @param orderby OrderBy clause. One or more comma-separated expressions with an optional "asc" (the default) or "desc" depending on the order you'd like the values sorted, e.g. Categories?$orderby=CategoryName desc. Optional. + * @param count A Boolean value of true or false to request a count of the matching resources included with the resources in the response, e.g. Categories?$count=true. Optional. + * @param search A free form search. A free-text search expression to match for whether a particular entry should be included in the feed, e.g. Categories?$search=blue OR green. Optional. + * @param format The desired return format. Return the response in particular formatxii without access to request headers for standard content-type negotiation (e.g Orders?$format=json). Optional. + * @return the observable to the PagedList<DataLakeStoreAccount> object */ - public Observable>> listByResourceGroupAsync(final String resourceGroupName, final String filter, final Integer top, final Integer skip, final String expand, final String select, final String orderby, final Boolean count, final String search, final String format) { + public Observable>> listByResourceGroupWithServiceResponseAsync(final String resourceGroupName, final String filter, final Integer top, final Integer skip, final String expand, final String select, final String orderby, final Boolean count, final String search, final String format) { return listByResourceGroupSinglePageAsync(resourceGroupName, filter, top, skip, expand, select, orderby, count, search, format) .concatMap(new Func1>, Observable>>>() { @Override public Observable>> call(ServiceResponse> page) { String nextPageLink = page.getBody().getNextPageLink(); - return listByResourceGroupNextSinglePageAsync(nextPageLink); + if (nextPageLink == null) { + return Observable.just(page); + } + return Observable.just(page).concatWith(listByResourceGroupNextWithServiceResponseAsync(nextPageLink)); } }); } @@ -1103,7 +1372,7 @@ public Observable>> call(ServiceRespo ServiceResponse> * @param count A Boolean value of true or false to request a count of the matching resources included with the resources in the response, e.g. Categories?$count=true. Optional. ServiceResponse> * @param search A free form search. A free-text search expression to match for whether a particular entry should be included in the feed, e.g. Categories?$search=blue OR green. Optional. ServiceResponse> * @param format The desired return format. Return the response in particular formatxii without access to request headers for standard content-type negotiation (e.g Orders?$format=json). Optional. - * @return the List<DataLakeStoreAccount> object wrapped in {@link ServiceResponse} if successful. + * @return the PagedList<DataLakeStoreAccount> object wrapped in {@link ServiceResponse} if successful. */ public Observable>> listByResourceGroupSinglePageAsync(final String resourceGroupName, final String filter, final Integer top, final Integer skip, final String expand, final String select, final String orderby, final Boolean count, final String search, final String format) { if (resourceGroupName == null) { @@ -1139,20 +1408,16 @@ private ServiceResponse> listByResourceGroupDeleg /** * Lists the Data Lake Store accounts within the subscription. The response includes a link to the next page of results, if any. * - * @throws CloudException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the List<DataLakeStoreAccount> object wrapped in {@link ServiceResponse} if successful. + * @return the PagedList<DataLakeStoreAccount> object if successful. */ - public ServiceResponse> list() throws CloudException, IOException, IllegalArgumentException { + public PagedList list() { ServiceResponse> response = listSinglePageAsync().toBlocking().single(); - PagedList pagedList = new PagedList(response.getBody()) { + return new PagedList(response.getBody()) { @Override - public Page nextPage(String nextPageLink) throws RestException, IOException { + public Page nextPage(String nextPageLink) { return listNextSinglePageAsync(nextPageLink).toBlocking().single().getBody(); } }; - return new ServiceResponse>(pagedList, response.getResponse()); } /** @@ -1176,15 +1441,33 @@ public Observable>> call(String nextP /** * Lists the Data Lake Store accounts within the subscription. The response includes a link to the next page of results, if any. * - * @return the observable to the List<DataLakeStoreAccount> object + * @return the observable to the PagedList<DataLakeStoreAccount> object */ - public Observable>> listAsync() { + public Observable> listAsync() { + return listWithServiceResponseAsync() + .map(new Func1>, Page>() { + @Override + public Page call(ServiceResponse> response) { + return response.getBody(); + } + }); + } + + /** + * Lists the Data Lake Store accounts within the subscription. The response includes a link to the next page of results, if any. + * + * @return the observable to the PagedList<DataLakeStoreAccount> object + */ + public Observable>> listWithServiceResponseAsync() { return listSinglePageAsync() .concatMap(new Func1>, Observable>>>() { @Override public Observable>> call(ServiceResponse> page) { String nextPageLink = page.getBody().getNextPageLink(); - return listNextSinglePageAsync(nextPageLink); + if (nextPageLink == null) { + return Observable.just(page); + } + return Observable.just(page).concatWith(listNextWithServiceResponseAsync(nextPageLink)); } }); } @@ -1192,7 +1475,7 @@ public Observable>> call(ServiceRespo /** * Lists the Data Lake Store accounts within the subscription. The response includes a link to the next page of results, if any. * - * @return the List<DataLakeStoreAccount> object wrapped in {@link ServiceResponse} if successful. + * @return the PagedList<DataLakeStoreAccount> object wrapped in {@link ServiceResponse} if successful. */ public Observable>> listSinglePageAsync() { if (this.client.subscriptionId() == null) { @@ -1236,20 +1519,16 @@ public Observable>> call(Response> list(final String filter, final Integer top, final Integer skip, final String expand, final String select, final String orderby, final Boolean count, final String search, final String format) throws CloudException, IOException, IllegalArgumentException { + public PagedList list(final String filter, final Integer top, final Integer skip, final String expand, final String select, final String orderby, final Boolean count, final String search, final String format) { ServiceResponse> response = listSinglePageAsync(filter, top, skip, expand, select, orderby, count, search, format).toBlocking().single(); - PagedList pagedList = new PagedList(response.getBody()) { + return new PagedList(response.getBody()) { @Override - public Page nextPage(String nextPageLink) throws RestException, IOException { + public Page nextPage(String nextPageLink) { return listNextSinglePageAsync(nextPageLink).toBlocking().single().getBody(); } }; - return new ServiceResponse>(pagedList, response.getResponse()); } /** @@ -1291,15 +1570,42 @@ public Observable>> call(String nextP * @param count The Boolean value of true or false to request a count of the matching resources included with the resources in the response, e.g. Categories?$count=true. Optional. * @param search A free form search. A free-text search expression to match for whether a particular entry should be included in the feed, e.g. Categories?$search=blue OR green. Optional. * @param format The desired return format. Return the response in particular formatxii without access to request headers for standard content-type negotiation (e.g Orders?$format=json). Optional. - * @return the observable to the List<DataLakeStoreAccount> object + * @return the observable to the PagedList<DataLakeStoreAccount> object */ - public Observable>> listAsync(final String filter, final Integer top, final Integer skip, final String expand, final String select, final String orderby, final Boolean count, final String search, final String format) { + public Observable> listAsync(final String filter, final Integer top, final Integer skip, final String expand, final String select, final String orderby, final Boolean count, final String search, final String format) { + return listWithServiceResponseAsync(filter, top, skip, expand, select, orderby, count, search, format) + .map(new Func1>, Page>() { + @Override + public Page call(ServiceResponse> response) { + return response.getBody(); + } + }); + } + + /** + * Lists the Data Lake Store accounts within the subscription. The response includes a link to the next page of results, if any. + * + * @param filter OData filter. Optional. + * @param top The number of items to return. Optional. + * @param skip The number of items to skip over before returning elements. Optional. + * @param expand OData expansion. Expand related resources in line with the retrieved resources, e.g. Categories/$expand=Products would expand Product data in line with each Category entry. Optional. + * @param select OData Select statement. Limits the properties on each entry to just those requested, e.g. Categories?$select=CategoryName,Description. Optional. + * @param orderby OrderBy clause. One or more comma-separated expressions with an optional "asc" (the default) or "desc" depending on the order you'd like the values sorted, e.g. Categories?$orderby=CategoryName desc. Optional. + * @param count The Boolean value of true or false to request a count of the matching resources included with the resources in the response, e.g. Categories?$count=true. Optional. + * @param search A free form search. A free-text search expression to match for whether a particular entry should be included in the feed, e.g. Categories?$search=blue OR green. Optional. + * @param format The desired return format. Return the response in particular formatxii without access to request headers for standard content-type negotiation (e.g Orders?$format=json). Optional. + * @return the observable to the PagedList<DataLakeStoreAccount> object + */ + public Observable>> listWithServiceResponseAsync(final String filter, final Integer top, final Integer skip, final String expand, final String select, final String orderby, final Boolean count, final String search, final String format) { return listSinglePageAsync(filter, top, skip, expand, select, orderby, count, search, format) .concatMap(new Func1>, Observable>>>() { @Override public Observable>> call(ServiceResponse> page) { String nextPageLink = page.getBody().getNextPageLink(); - return listNextSinglePageAsync(nextPageLink); + if (nextPageLink == null) { + return Observable.just(page); + } + return Observable.just(page).concatWith(listNextWithServiceResponseAsync(nextPageLink)); } }); } @@ -1316,7 +1622,7 @@ public Observable>> call(ServiceRespo ServiceResponse> * @param count The Boolean value of true or false to request a count of the matching resources included with the resources in the response, e.g. Categories?$count=true. Optional. ServiceResponse> * @param search A free form search. A free-text search expression to match for whether a particular entry should be included in the feed, e.g. Categories?$search=blue OR green. Optional. ServiceResponse> * @param format The desired return format. Return the response in particular formatxii without access to request headers for standard content-type negotiation (e.g Orders?$format=json). Optional. - * @return the List<DataLakeStoreAccount> object wrapped in {@link ServiceResponse} if successful. + * @return the PagedList<DataLakeStoreAccount> object wrapped in {@link ServiceResponse} if successful. */ public Observable>> listSinglePageAsync(final String filter, final Integer top, final Integer skip, final String expand, final String select, final String orderby, final Boolean count, final String search, final String format) { if (this.client.subscriptionId() == null) { @@ -1350,20 +1656,16 @@ private ServiceResponse> listDelegate(Response> listFirewallRulesNext(final String nextPageLink) throws CloudException, IOException, IllegalArgumentException { + public PagedList listFirewallRulesNext(final String nextPageLink) { ServiceResponse> response = listFirewallRulesNextSinglePageAsync(nextPageLink).toBlocking().single(); - PagedList pagedList = new PagedList(response.getBody()) { + return new PagedList(response.getBody()) { @Override - public Page nextPage(String nextPageLink) throws RestException, IOException { + public Page nextPage(String nextPageLink) { return listFirewallRulesNextSinglePageAsync(nextPageLink).toBlocking().single().getBody(); } }; - return new ServiceResponse>(pagedList, response.getResponse()); } /** @@ -1390,15 +1692,34 @@ public Observable>> call(String nextPageLink) * Lists the Data Lake Store firewall rules within the specified Data Lake Store account. * * @param nextPageLink The NextLink from the previous successful call to List operation. - * @return the observable to the List<FirewallRule> object + * @return the observable to the PagedList<FirewallRule> object */ - public Observable>> listFirewallRulesNextAsync(final String nextPageLink) { + public Observable> listFirewallRulesNextAsync(final String nextPageLink) { + return listFirewallRulesNextWithServiceResponseAsync(nextPageLink) + .map(new Func1>, Page>() { + @Override + public Page call(ServiceResponse> response) { + return response.getBody(); + } + }); + } + + /** + * Lists the Data Lake Store firewall rules within the specified Data Lake Store account. + * + * @param nextPageLink The NextLink from the previous successful call to List operation. + * @return the observable to the PagedList<FirewallRule> object + */ + public Observable>> listFirewallRulesNextWithServiceResponseAsync(final String nextPageLink) { return listFirewallRulesNextSinglePageAsync(nextPageLink) .concatMap(new Func1>, Observable>>>() { @Override public Observable>> call(ServiceResponse> page) { String nextPageLink = page.getBody().getNextPageLink(); - return listFirewallRulesNextSinglePageAsync(nextPageLink); + if (nextPageLink == null) { + return Observable.just(page); + } + return Observable.just(page).concatWith(listFirewallRulesNextWithServiceResponseAsync(nextPageLink)); } }); } @@ -1407,7 +1728,7 @@ public Observable>> call(ServiceResponse> * @param nextPageLink The NextLink from the previous successful call to List operation. - * @return the List<FirewallRule> object wrapped in {@link ServiceResponse} if successful. + * @return the PagedList<FirewallRule> object wrapped in {@link ServiceResponse} if successful. */ public Observable>> listFirewallRulesNextSinglePageAsync(final String nextPageLink) { if (nextPageLink == null) { @@ -1438,20 +1759,16 @@ private ServiceResponse> listFirewallRulesNextDelegate(Re * Lists the Data Lake Store accounts within a specific resource group. The response includes a link to the next page of results, if any. * * @param nextPageLink The NextLink from the previous successful call to List operation. - * @throws CloudException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the List<DataLakeStoreAccount> object wrapped in {@link ServiceResponse} if successful. + * @return the PagedList<DataLakeStoreAccount> object if successful. */ - public ServiceResponse> listByResourceGroupNext(final String nextPageLink) throws CloudException, IOException, IllegalArgumentException { + public PagedList listByResourceGroupNext(final String nextPageLink) { ServiceResponse> response = listByResourceGroupNextSinglePageAsync(nextPageLink).toBlocking().single(); - PagedList pagedList = new PagedList(response.getBody()) { + return new PagedList(response.getBody()) { @Override - public Page nextPage(String nextPageLink) throws RestException, IOException { + public Page nextPage(String nextPageLink) { return listByResourceGroupNextSinglePageAsync(nextPageLink).toBlocking().single().getBody(); } }; - return new ServiceResponse>(pagedList, response.getResponse()); } /** @@ -1478,15 +1795,34 @@ public Observable>> call(String nextP * Lists the Data Lake Store accounts within a specific resource group. The response includes a link to the next page of results, if any. * * @param nextPageLink The NextLink from the previous successful call to List operation. - * @return the observable to the List<DataLakeStoreAccount> object + * @return the observable to the PagedList<DataLakeStoreAccount> object + */ + public Observable> listByResourceGroupNextAsync(final String nextPageLink) { + return listByResourceGroupNextWithServiceResponseAsync(nextPageLink) + .map(new Func1>, Page>() { + @Override + public Page call(ServiceResponse> response) { + return response.getBody(); + } + }); + } + + /** + * Lists the Data Lake Store accounts within a specific resource group. The response includes a link to the next page of results, if any. + * + * @param nextPageLink The NextLink from the previous successful call to List operation. + * @return the observable to the PagedList<DataLakeStoreAccount> object */ - public Observable>> listByResourceGroupNextAsync(final String nextPageLink) { + public Observable>> listByResourceGroupNextWithServiceResponseAsync(final String nextPageLink) { return listByResourceGroupNextSinglePageAsync(nextPageLink) .concatMap(new Func1>, Observable>>>() { @Override public Observable>> call(ServiceResponse> page) { String nextPageLink = page.getBody().getNextPageLink(); - return listByResourceGroupNextSinglePageAsync(nextPageLink); + if (nextPageLink == null) { + return Observable.just(page); + } + return Observable.just(page).concatWith(listByResourceGroupNextWithServiceResponseAsync(nextPageLink)); } }); } @@ -1495,7 +1831,7 @@ public Observable>> call(ServiceRespo * Lists the Data Lake Store accounts within a specific resource group. The response includes a link to the next page of results, if any. * ServiceResponse> * @param nextPageLink The NextLink from the previous successful call to List operation. - * @return the List<DataLakeStoreAccount> object wrapped in {@link ServiceResponse} if successful. + * @return the PagedList<DataLakeStoreAccount> object wrapped in {@link ServiceResponse} if successful. */ public Observable>> listByResourceGroupNextSinglePageAsync(final String nextPageLink) { if (nextPageLink == null) { @@ -1526,20 +1862,16 @@ private ServiceResponse> listByResourceGroupNextD * Lists the Data Lake Store accounts within the subscription. The response includes a link to the next page of results, if any. * * @param nextPageLink The NextLink from the previous successful call to List operation. - * @throws CloudException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the List<DataLakeStoreAccount> object wrapped in {@link ServiceResponse} if successful. + * @return the PagedList<DataLakeStoreAccount> object if successful. */ - public ServiceResponse> listNext(final String nextPageLink) throws CloudException, IOException, IllegalArgumentException { + public PagedList listNext(final String nextPageLink) { ServiceResponse> response = listNextSinglePageAsync(nextPageLink).toBlocking().single(); - PagedList pagedList = new PagedList(response.getBody()) { + return new PagedList(response.getBody()) { @Override - public Page nextPage(String nextPageLink) throws RestException, IOException { + public Page nextPage(String nextPageLink) { return listNextSinglePageAsync(nextPageLink).toBlocking().single().getBody(); } }; - return new ServiceResponse>(pagedList, response.getResponse()); } /** @@ -1566,15 +1898,34 @@ public Observable>> call(String nextP * Lists the Data Lake Store accounts within the subscription. The response includes a link to the next page of results, if any. * * @param nextPageLink The NextLink from the previous successful call to List operation. - * @return the observable to the List<DataLakeStoreAccount> object + * @return the observable to the PagedList<DataLakeStoreAccount> object + */ + public Observable> listNextAsync(final String nextPageLink) { + return listNextWithServiceResponseAsync(nextPageLink) + .map(new Func1>, Page>() { + @Override + public Page call(ServiceResponse> response) { + return response.getBody(); + } + }); + } + + /** + * Lists the Data Lake Store accounts within the subscription. The response includes a link to the next page of results, if any. + * + * @param nextPageLink The NextLink from the previous successful call to List operation. + * @return the observable to the PagedList<DataLakeStoreAccount> object */ - public Observable>> listNextAsync(final String nextPageLink) { + public Observable>> listNextWithServiceResponseAsync(final String nextPageLink) { return listNextSinglePageAsync(nextPageLink) .concatMap(new Func1>, Observable>>>() { @Override public Observable>> call(ServiceResponse> page) { String nextPageLink = page.getBody().getNextPageLink(); - return listNextSinglePageAsync(nextPageLink); + if (nextPageLink == null) { + return Observable.just(page); + } + return Observable.just(page).concatWith(listNextWithServiceResponseAsync(nextPageLink)); } }); } @@ -1583,7 +1934,7 @@ public Observable>> call(ServiceRespo * Lists the Data Lake Store accounts within the subscription. The response includes a link to the next page of results, if any. * ServiceResponse> * @param nextPageLink The NextLink from the previous successful call to List operation. - * @return the List<DataLakeStoreAccount> object wrapped in {@link ServiceResponse} if successful. + * @return the PagedList<DataLakeStoreAccount> object wrapped in {@link ServiceResponse} if successful. */ public Observable>> listNextSinglePageAsync(final String nextPageLink) { if (nextPageLink == null) { diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/implementation/DataLakeStoreFileSystemManagementClientImpl.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/implementation/DataLakeStoreFileSystemManagementClientImpl.java deleted file mode 100644 index 552169907eb5..000000000000 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/implementation/DataLakeStoreFileSystemManagementClientImpl.java +++ /dev/null @@ -1,203 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - */ - -package com.microsoft.azure.management.datalake.store.implementation; - -import com.microsoft.azure.AzureClient; -import com.microsoft.azure.AzureServiceClient; -import com.microsoft.azure.management.datalake.store.DataLakeStoreFileSystemManagementClient; -import com.microsoft.azure.management.datalake.store.FileSystems; -import com.microsoft.azure.RestClient; -import com.microsoft.rest.credentials.ServiceClientCredentials; - -/** - * Initializes a new instance of the DataLakeStoreFileSystemManagementClientImpl class. - */ -public final class DataLakeStoreFileSystemManagementClientImpl extends AzureServiceClient implements DataLakeStoreFileSystemManagementClient { - /** the {@link AzureClient} used for long running operations. */ - private AzureClient azureClient; - - /** - * Gets the {@link AzureClient} used for long running operations. - * @return the azure client; - */ - public AzureClient getAzureClient() { - return this.azureClient; - } - - /** Client Api Version. */ - private String apiVersion; - - /** - * Gets Client Api Version. - * - * @return the apiVersion value. - */ - public String apiVersion() { - return this.apiVersion; - } - - /** Gets the URI used as the base for all cloud service requests. */ - private String adlsFileSystemDnsSuffix; - - /** - * Gets Gets the URI used as the base for all cloud service requests. - * - * @return the adlsFileSystemDnsSuffix value. - */ - public String adlsFileSystemDnsSuffix() { - return this.adlsFileSystemDnsSuffix; - } - - /** - * Sets Gets the URI used as the base for all cloud service requests. - * - * @param adlsFileSystemDnsSuffix the adlsFileSystemDnsSuffix value. - * @return the service client itself - */ - public DataLakeStoreFileSystemManagementClientImpl withAdlsFileSystemDnsSuffix(String adlsFileSystemDnsSuffix) { - this.adlsFileSystemDnsSuffix = adlsFileSystemDnsSuffix; - return this; - } - - /** Gets or sets the preferred language for the response. */ - private String acceptLanguage; - - /** - * Gets Gets or sets the preferred language for the response. - * - * @return the acceptLanguage value. - */ - public String acceptLanguage() { - return this.acceptLanguage; - } - - /** - * Sets Gets or sets the preferred language for the response. - * - * @param acceptLanguage the acceptLanguage value. - * @return the service client itself - */ - public DataLakeStoreFileSystemManagementClientImpl withAcceptLanguage(String acceptLanguage) { - this.acceptLanguage = acceptLanguage; - return this; - } - - /** Gets or sets the retry timeout in seconds for Long Running Operations. Default value is 30. */ - private int longRunningOperationRetryTimeout; - - /** - * Gets Gets or sets the retry timeout in seconds for Long Running Operations. Default value is 30. - * - * @return the longRunningOperationRetryTimeout value. - */ - public int longRunningOperationRetryTimeout() { - return this.longRunningOperationRetryTimeout; - } - - /** - * Sets Gets or sets the retry timeout in seconds for Long Running Operations. Default value is 30. - * - * @param longRunningOperationRetryTimeout the longRunningOperationRetryTimeout value. - * @return the service client itself - */ - public DataLakeStoreFileSystemManagementClientImpl withLongRunningOperationRetryTimeout(int longRunningOperationRetryTimeout) { - this.longRunningOperationRetryTimeout = longRunningOperationRetryTimeout; - return this; - } - - /** When set to true a unique x-ms-client-request-id value is generated and included in each request. Default is true. */ - private boolean generateClientRequestId; - - /** - * Gets When set to true a unique x-ms-client-request-id value is generated and included in each request. Default is true. - * - * @return the generateClientRequestId value. - */ - public boolean generateClientRequestId() { - return this.generateClientRequestId; - } - - /** - * Sets When set to true a unique x-ms-client-request-id value is generated and included in each request. Default is true. - * - * @param generateClientRequestId the generateClientRequestId value. - * @return the service client itself - */ - public DataLakeStoreFileSystemManagementClientImpl withGenerateClientRequestId(boolean generateClientRequestId) { - this.generateClientRequestId = generateClientRequestId; - return this; - } - - /** - * The FileSystems object to access its operations. - */ - private FileSystems fileSystems; - - /** - * Gets the FileSystems object to access its operations. - * @return the FileSystems object. - */ - public FileSystems fileSystems() { - return this.fileSystems; - } - - /** - * Initializes an instance of DataLakeStoreFileSystemManagementClient client. - * - * @param credentials the management credentials for Azure - */ - public DataLakeStoreFileSystemManagementClientImpl(ServiceClientCredentials credentials) { - this("https://{accountName}.{adlsFileSystemDnsSuffix}", credentials); - } - - /** - * Initializes an instance of DataLakeStoreFileSystemManagementClient client. - * - * @param baseUrl the base URL of the host - * @param credentials the management credentials for Azure - */ - private DataLakeStoreFileSystemManagementClientImpl(String baseUrl, ServiceClientCredentials credentials) { - this(new RestClient.Builder() - .withBaseUrl(baseUrl) - .withCredentials(credentials) - .build()); - } - - /** - * Initializes an instance of DataLakeStoreFileSystemManagementClient client. - * - * @param restClient the REST client to connect to Azure. - */ - public DataLakeStoreFileSystemManagementClientImpl(RestClient restClient) { - super(restClient); - initialize(); - } - - protected void initialize() { - this.apiVersion = "2015-10-01-preview"; - this.adlsFileSystemDnsSuffix = "azuredatalakestore.net"; - this.acceptLanguage = "en-US"; - this.longRunningOperationRetryTimeout = 30; - this.generateClientRequestId = true; - this.fileSystems = new FileSystemsImpl(restClient().retrofit(), this); - this.azureClient = new AzureClient(this); - } - - /** - * Gets the User-Agent header for the client. - * - * @return the user agent string. - */ - @Override - public String userAgent() { - return String.format("Azure-SDK-For-Java/%s (%s)", - getClass().getPackage().getImplementationVersion(), - "DataLakeStoreFileSystemManagementClient, 2015-10-01-preview"); - } -} diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/implementation/FileSystemsImpl.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/implementation/FileSystemsImpl.java deleted file mode 100644 index 936775b6c156..000000000000 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/implementation/FileSystemsImpl.java +++ /dev/null @@ -1,2238 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - */ - -package com.microsoft.azure.management.datalake.store.implementation; - -import retrofit2.Retrofit; -import com.microsoft.azure.management.datalake.store.FileSystems; -import com.google.common.base.Joiner; -import com.google.common.reflect.TypeToken; -import com.microsoft.azure.AzureServiceResponseBuilder; -import com.microsoft.azure.management.datalake.store.models.AclStatusResult; -import com.microsoft.azure.management.datalake.store.models.AdlsErrorException; -import com.microsoft.azure.management.datalake.store.models.AppendModeType; -import com.microsoft.azure.management.datalake.store.models.ContentSummaryResult; -import com.microsoft.azure.management.datalake.store.models.FileOperationResult; -import com.microsoft.azure.management.datalake.store.models.FileStatusesResult; -import com.microsoft.azure.management.datalake.store.models.FileStatusResult; -import com.microsoft.rest.serializer.CollectionFormat; -import com.microsoft.rest.ServiceCall; -import com.microsoft.rest.ServiceCallback; -import com.microsoft.rest.ServiceResponse; -import com.microsoft.rest.Validator; -import java.io.InputStream; -import java.io.IOException; -import java.util.List; -import okhttp3.MediaType; -import okhttp3.RequestBody; -import okhttp3.ResponseBody; -import retrofit2.http.Body; -import retrofit2.http.GET; -import retrofit2.http.Header; -import retrofit2.http.Headers; -import retrofit2.http.HTTP; -import retrofit2.http.Path; -import retrofit2.http.POST; -import retrofit2.http.PUT; -import retrofit2.http.Query; -import retrofit2.http.Streaming; -import retrofit2.Response; -import rx.functions.Func1; -import rx.Observable; - -/** - * An instance of this class provides access to all the operations defined - * in FileSystems. - */ -public final class FileSystemsImpl implements FileSystems { - /** The Retrofit service to perform REST calls. */ - private FileSystemsService service; - /** The service client containing this operation class. */ - private DataLakeStoreFileSystemManagementClientImpl client; - - /** - * Initializes an instance of FileSystemsImpl. - * - * @param retrofit the Retrofit instance built from a Retrofit Builder. - * @param client the instance of the service client containing this operation class. - */ - public FileSystemsImpl(Retrofit retrofit, DataLakeStoreFileSystemManagementClientImpl client) { - this.service = retrofit.create(FileSystemsService.class); - this.client = client; - } - - /** - * The interface defining all the services for FileSystems to be - * used by Retrofit to perform actually REST calls. - */ - interface FileSystemsService { - @Headers("Content-Type: application/octet-stream") - @POST("WebHdfsExt/{filePath}") - Observable> concurrentAppend(@Path("filePath") String filePath, @Body RequestBody streamContents, @Query("appendMode") AppendModeType appendMode, @Query("op") String op, @Header("Transfer-Encoding") String transferEncoding, @Query("api-version") String apiVersion, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); - - @Headers("Content-Type: application/json; charset=utf-8") - @GET("webhdfs/v1/{path}") - Observable> checkAccess(@Path("path") String path, @Query("fsaction") String fsaction, @Query("op") String op, @Query("api-version") String apiVersion, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); - - @Headers("Content-Type: application/json; charset=utf-8") - @PUT("webhdfs/v1/{path}") - Observable> mkdirs(@Path("path") String path, @Query("op") String op, @Query("api-version") String apiVersion, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); - - @Headers("Content-Type: application/json; charset=utf-8") - @POST("webhdfs/v1/{destinationPath}") - Observable> concat(@Path("destinationPath") String destinationPath, @Query("sources") String sources, @Query("op") String op, @Query("api-version") String apiVersion, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); - - @Headers("Content-Type: application/octet-stream") - @POST("webhdfs/v1/{msConcatDestinationPath}") - Observable> msConcat(@Path("msConcatDestinationPath") String msConcatDestinationPath, @Query("deleteSourceDirectory") Boolean deleteSourceDirectory, @Body RequestBody streamContents, @Query("op") String op, @Query("api-version") String apiVersion, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); - - @Headers("Content-Type: application/json; charset=utf-8") - @GET("webhdfs/v1/{listFilePath}") - Observable> listFileStatus(@Path("listFilePath") String listFilePath, @Query("listSize") Integer listSize, @Query("listAfter") String listAfter, @Query("listBefore") String listBefore, @Query("op") String op, @Query("api-version") String apiVersion, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); - - @Headers("Content-Type: application/json; charset=utf-8") - @GET("webhdfs/va/{getContentSummaryFilePath}") - Observable> getContentSummary(@Path("getContentSummaryFilePath") String getContentSummaryFilePath, @Query("op") String op, @Query("api-version") String apiVersion, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); - - @Headers("Content-Type: application/json; charset=utf-8") - @GET("webhdfs/v1/{getFilePath}") - Observable> getFileStatus(@Path("getFilePath") String getFilePath, @Query("op") String op, @Query("api-version") String apiVersion, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); - - @Headers("Content-Type: application/octet-stream") - @POST("webhdfs/v1/{directFilePath}") - Observable> append(@Path("directFilePath") String directFilePath, @Body RequestBody streamContents, @Query("offset") Long offset, @Query("op") String op, @Query("append") String append, @Header("Transfer-Encoding") String transferEncoding, @Query("api-version") String apiVersion, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); - - @Headers("Content-Type: application/octet-stream") - @PUT("webhdfs/v1/{directFilePath}") - Observable> create(@Path("directFilePath") String directFilePath, @Body RequestBody streamContents, @Query("overwrite") Boolean overwrite, @Query("op") String op, @Query("write") String write, @Header("Transfer-Encoding") String transferEncoding, @Query("api-version") String apiVersion, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); - - @Headers("Content-Type: application/json; charset=utf-8") - @GET("webhdfs/v1/{directFilePath}") - @Streaming - Observable> open(@Path("directFilePath") String directFilePath, @Query("length") Long length, @Query("offset") Long offset, @Query("op") String op, @Query("read") String read, @Query("api-version") String apiVersion, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); - - @Headers("Content-Type: application/json; charset=utf-8") - @PUT("webhdfs/v1/{setAclFilePath}") - Observable> setAcl(@Path("setAclFilePath") String setAclFilePath, @Query("aclspec") String aclspec, @Query("op") String op, @Query("api-version") String apiVersion, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); - - @Headers("Content-Type: application/json; charset=utf-8") - @PUT("webhdfs/v1/{modifyAclFilePath}") - Observable> modifyAclEntries(@Path("modifyAclFilePath") String modifyAclFilePath, @Query("aclspec") String aclspec, @Query("op") String op, @Query("api-version") String apiVersion, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); - - @Headers("Content-Type: application/json; charset=utf-8") - @PUT("webhdfs/v1/{removeAclFilePath}") - Observable> removeAclEntries(@Path("removeAclFilePath") String removeAclFilePath, @Query("aclspec") String aclspec, @Query("op") String op, @Query("api-version") String apiVersion, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); - - @Headers("Content-Type: application/json; charset=utf-8") - @GET("webhdfs/v1/{aclFilePath}") - Observable> getAclStatus(@Path("aclFilePath") String aclFilePath, @Query("op") String op, @Query("api-version") String apiVersion, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); - - @Headers("Content-Type: application/json; charset=utf-8") - @HTTP(path = "webhdfs/v1/{filePath}", method = "DELETE", hasBody = true) - Observable> delete(@Path("filePath") String filePath, @Query("recursive") Boolean recursive, @Query("op") String op, @Query("api-version") String apiVersion, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); - - @Headers("Content-Type: application/json; charset=utf-8") - @PUT("webhdfs/v1/{renameFilePath}") - Observable> rename(@Path("renameFilePath") String renameFilePath, @Query("destination") String destination, @Query("op") String op, @Query("api-version") String apiVersion, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); - - @Headers("Content-Type: application/json; charset=utf-8") - @PUT("webhdfs/v1/{setOwnerFilePath}") - Observable> setOwner(@Path("setOwnerFilePath") String setOwnerFilePath, @Query("owner") String owner, @Query("group") String group, @Query("op") String op, @Query("api-version") String apiVersion, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); - - @Headers("Content-Type: application/json; charset=utf-8") - @PUT("webhdfs/v1/{setPermissionFilePath}") - Observable> setPermission(@Path("setPermissionFilePath") String setPermissionFilePath, @Query("permission") String permission, @Query("op") String op, @Query("api-version") String apiVersion, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); - - } - - /** - * Appends to the specified file. This method supports multiple concurrent appends to the file. NOTE: Concurrent append and normal (serial) append CANNOT be used interchangeably. Once a file has been appended to using either append option, it can only be appended to using that append option. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param filePath The Data Lake Store path (starting with '/') of the file to which to append using concurrent append. - * @param streamContents The file contents to include when appending to the file. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - public ServiceResponse concurrentAppend(String accountName, String filePath, byte[] streamContents) throws AdlsErrorException, IOException, IllegalArgumentException { - return concurrentAppendAsync(accountName, filePath, streamContents).toBlocking().single(); - } - - /** - * Appends to the specified file. This method supports multiple concurrent appends to the file. NOTE: Concurrent append and normal (serial) append CANNOT be used interchangeably. Once a file has been appended to using either append option, it can only be appended to using that append option. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param filePath The Data Lake Store path (starting with '/') of the file to which to append using concurrent append. - * @param streamContents The file contents to include when appending to the file. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - public ServiceCall concurrentAppendAsync(String accountName, String filePath, byte[] streamContents, final ServiceCallback serviceCallback) { - return ServiceCall.create(concurrentAppendAsync(accountName, filePath, streamContents), serviceCallback); - } - - /** - * Appends to the specified file. This method supports multiple concurrent appends to the file. NOTE: Concurrent append and normal (serial) append CANNOT be used interchangeably. Once a file has been appended to using either append option, it can only be appended to using that append option. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param filePath The Data Lake Store path (starting with '/') of the file to which to append using concurrent append. - * @param streamContents The file contents to include when appending to the file. - * @return the {@link ServiceResponse} object if successful. - */ - public Observable> concurrentAppendAsync(String accountName, String filePath, byte[] streamContents) { - if (accountName == null) { - throw new IllegalArgumentException("Parameter accountName is required and cannot be null."); - } - if (this.client.adlsFileSystemDnsSuffix() == null) { - throw new IllegalArgumentException("Parameter this.client.adlsFileSystemDnsSuffix() is required and cannot be null."); - } - if (filePath == null) { - throw new IllegalArgumentException("Parameter filePath is required and cannot be null."); - } - if (streamContents == null) { - throw new IllegalArgumentException("Parameter streamContents is required and cannot be null."); - } - if (this.client.apiVersion() == null) { - throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); - } - final String op = "CONCURRENTAPPEND"; - final String transferEncoding = "chunked"; - final AppendModeType appendMode = null; - String parameterizedHost = Joiner.on(", ").join("{accountName}", accountName, "{adlsFileSystemDnsSuffix}", this.client.adlsFileSystemDnsSuffix()); - RequestBody streamContentsConverted = RequestBody.create(MediaType.parse("application/octet-stream"), streamContents); - return service.concurrentAppend(filePath, streamContentsConverted, appendMode, op, transferEncoding, this.client.apiVersion(), this.client.acceptLanguage(), parameterizedHost, this.client.userAgent()) - .flatMap(new Func1, Observable>>() { - @Override - public Observable> call(Response response) { - try { - ServiceResponse clientResponse = concurrentAppendDelegate(response); - return Observable.just(clientResponse); - } catch (Throwable t) { - return Observable.error(t); - } - } - }); - } - - /** - * Appends to the specified file. This method supports multiple concurrent appends to the file. NOTE: Concurrent append and normal (serial) append CANNOT be used interchangeably. Once a file has been appended to using either append option, it can only be appended to using that append option. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param filePath The Data Lake Store path (starting with '/') of the file to which to append using concurrent append. - * @param streamContents The file contents to include when appending to the file. - * @param appendMode Indicates the concurrent append call should create the file if it doesn't exist or just open the existing file for append. Possible values include: 'autocreate' - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - public ServiceResponse concurrentAppend(String accountName, String filePath, byte[] streamContents, AppendModeType appendMode) throws AdlsErrorException, IOException, IllegalArgumentException { - return concurrentAppendAsync(accountName, filePath, streamContents, appendMode).toBlocking().single(); - } - - /** - * Appends to the specified file. This method supports multiple concurrent appends to the file. NOTE: Concurrent append and normal (serial) append CANNOT be used interchangeably. Once a file has been appended to using either append option, it can only be appended to using that append option. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param filePath The Data Lake Store path (starting with '/') of the file to which to append using concurrent append. - * @param streamContents The file contents to include when appending to the file. - * @param appendMode Indicates the concurrent append call should create the file if it doesn't exist or just open the existing file for append. Possible values include: 'autocreate' - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - public ServiceCall concurrentAppendAsync(String accountName, String filePath, byte[] streamContents, AppendModeType appendMode, final ServiceCallback serviceCallback) { - return ServiceCall.create(concurrentAppendAsync(accountName, filePath, streamContents, appendMode), serviceCallback); - } - - /** - * Appends to the specified file. This method supports multiple concurrent appends to the file. NOTE: Concurrent append and normal (serial) append CANNOT be used interchangeably. Once a file has been appended to using either append option, it can only be appended to using that append option. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param filePath The Data Lake Store path (starting with '/') of the file to which to append using concurrent append. - * @param streamContents The file contents to include when appending to the file. - * @param appendMode Indicates the concurrent append call should create the file if it doesn't exist or just open the existing file for append. Possible values include: 'autocreate' - * @return the {@link ServiceResponse} object if successful. - */ - public Observable> concurrentAppendAsync(String accountName, String filePath, byte[] streamContents, AppendModeType appendMode) { - if (accountName == null) { - throw new IllegalArgumentException("Parameter accountName is required and cannot be null."); - } - if (this.client.adlsFileSystemDnsSuffix() == null) { - throw new IllegalArgumentException("Parameter this.client.adlsFileSystemDnsSuffix() is required and cannot be null."); - } - if (filePath == null) { - throw new IllegalArgumentException("Parameter filePath is required and cannot be null."); - } - if (streamContents == null) { - throw new IllegalArgumentException("Parameter streamContents is required and cannot be null."); - } - if (this.client.apiVersion() == null) { - throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); - } - final String op = "CONCURRENTAPPEND"; - final String transferEncoding = "chunked"; - String parameterizedHost = Joiner.on(", ").join("{accountName}", accountName, "{adlsFileSystemDnsSuffix}", this.client.adlsFileSystemDnsSuffix()); - RequestBody streamContentsConverted = RequestBody.create(MediaType.parse("application/octet-stream"), streamContents); - return service.concurrentAppend(filePath, streamContentsConverted, appendMode, op, transferEncoding, this.client.apiVersion(), this.client.acceptLanguage(), parameterizedHost, this.client.userAgent()) - .flatMap(new Func1, Observable>>() { - @Override - public Observable> call(Response response) { - try { - ServiceResponse clientResponse = concurrentAppendDelegate(response); - return Observable.just(clientResponse); - } catch (Throwable t) { - return Observable.error(t); - } - } - }); - } - - private ServiceResponse concurrentAppendDelegate(Response response) throws AdlsErrorException, IOException, IllegalArgumentException { - return new AzureServiceResponseBuilder(this.client.mapperAdapter()) - .register(200, new TypeToken() { }.getType()) - .registerError(AdlsErrorException.class) - .build(response); - } - - /** - * Checks if the specified access is available at the given path. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param path The Data Lake Store path (starting with '/') of the file or directory for which to check access. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - public ServiceResponse checkAccess(String accountName, String path) throws AdlsErrorException, IOException, IllegalArgumentException { - return checkAccessAsync(accountName, path).toBlocking().single(); - } - - /** - * Checks if the specified access is available at the given path. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param path The Data Lake Store path (starting with '/') of the file or directory for which to check access. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - public ServiceCall checkAccessAsync(String accountName, String path, final ServiceCallback serviceCallback) { - return ServiceCall.create(checkAccessAsync(accountName, path), serviceCallback); - } - - /** - * Checks if the specified access is available at the given path. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param path The Data Lake Store path (starting with '/') of the file or directory for which to check access. - * @return the {@link ServiceResponse} object if successful. - */ - public Observable> checkAccessAsync(String accountName, String path) { - if (accountName == null) { - throw new IllegalArgumentException("Parameter accountName is required and cannot be null."); - } - if (this.client.adlsFileSystemDnsSuffix() == null) { - throw new IllegalArgumentException("Parameter this.client.adlsFileSystemDnsSuffix() is required and cannot be null."); - } - if (path == null) { - throw new IllegalArgumentException("Parameter path is required and cannot be null."); - } - if (this.client.apiVersion() == null) { - throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); - } - final String op = "CHECKACCESS"; - final String fsaction = null; - String parameterizedHost = Joiner.on(", ").join("{accountName}", accountName, "{adlsFileSystemDnsSuffix}", this.client.adlsFileSystemDnsSuffix()); - return service.checkAccess(path, fsaction, op, this.client.apiVersion(), this.client.acceptLanguage(), parameterizedHost, this.client.userAgent()) - .flatMap(new Func1, Observable>>() { - @Override - public Observable> call(Response response) { - try { - ServiceResponse clientResponse = checkAccessDelegate(response); - return Observable.just(clientResponse); - } catch (Throwable t) { - return Observable.error(t); - } - } - }); - } - - /** - * Checks if the specified access is available at the given path. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param path The Data Lake Store path (starting with '/') of the file or directory for which to check access. - * @param fsaction File system operation read/write/execute in string form, matching regex pattern '[rwx-]{3}' - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - public ServiceResponse checkAccess(String accountName, String path, String fsaction) throws AdlsErrorException, IOException, IllegalArgumentException { - return checkAccessAsync(accountName, path, fsaction).toBlocking().single(); - } - - /** - * Checks if the specified access is available at the given path. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param path The Data Lake Store path (starting with '/') of the file or directory for which to check access. - * @param fsaction File system operation read/write/execute in string form, matching regex pattern '[rwx-]{3}' - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - public ServiceCall checkAccessAsync(String accountName, String path, String fsaction, final ServiceCallback serviceCallback) { - return ServiceCall.create(checkAccessAsync(accountName, path, fsaction), serviceCallback); - } - - /** - * Checks if the specified access is available at the given path. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param path The Data Lake Store path (starting with '/') of the file or directory for which to check access. - * @param fsaction File system operation read/write/execute in string form, matching regex pattern '[rwx-]{3}' - * @return the {@link ServiceResponse} object if successful. - */ - public Observable> checkAccessAsync(String accountName, String path, String fsaction) { - if (accountName == null) { - throw new IllegalArgumentException("Parameter accountName is required and cannot be null."); - } - if (this.client.adlsFileSystemDnsSuffix() == null) { - throw new IllegalArgumentException("Parameter this.client.adlsFileSystemDnsSuffix() is required and cannot be null."); - } - if (path == null) { - throw new IllegalArgumentException("Parameter path is required and cannot be null."); - } - if (this.client.apiVersion() == null) { - throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); - } - final String op = "CHECKACCESS"; - String parameterizedHost = Joiner.on(", ").join("{accountName}", accountName, "{adlsFileSystemDnsSuffix}", this.client.adlsFileSystemDnsSuffix()); - return service.checkAccess(path, fsaction, op, this.client.apiVersion(), this.client.acceptLanguage(), parameterizedHost, this.client.userAgent()) - .flatMap(new Func1, Observable>>() { - @Override - public Observable> call(Response response) { - try { - ServiceResponse clientResponse = checkAccessDelegate(response); - return Observable.just(clientResponse); - } catch (Throwable t) { - return Observable.error(t); - } - } - }); - } - - private ServiceResponse checkAccessDelegate(Response response) throws AdlsErrorException, IOException, IllegalArgumentException { - return new AzureServiceResponseBuilder(this.client.mapperAdapter()) - .register(200, new TypeToken() { }.getType()) - .registerError(AdlsErrorException.class) - .build(response); - } - - /** - * Creates a directory. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param path The Data Lake Store path (starting with '/') of the directory to create. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the FileOperationResult object wrapped in {@link ServiceResponse} if successful. - */ - public ServiceResponse mkdirs(String accountName, String path) throws AdlsErrorException, IOException, IllegalArgumentException { - return mkdirsAsync(accountName, path).toBlocking().single(); - } - - /** - * Creates a directory. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param path The Data Lake Store path (starting with '/') of the directory to create. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - public ServiceCall mkdirsAsync(String accountName, String path, final ServiceCallback serviceCallback) { - return ServiceCall.create(mkdirsAsync(accountName, path), serviceCallback); - } - - /** - * Creates a directory. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param path The Data Lake Store path (starting with '/') of the directory to create. - * @return the observable to the FileOperationResult object - */ - public Observable> mkdirsAsync(String accountName, String path) { - if (accountName == null) { - throw new IllegalArgumentException("Parameter accountName is required and cannot be null."); - } - if (this.client.adlsFileSystemDnsSuffix() == null) { - throw new IllegalArgumentException("Parameter this.client.adlsFileSystemDnsSuffix() is required and cannot be null."); - } - if (path == null) { - throw new IllegalArgumentException("Parameter path is required and cannot be null."); - } - if (this.client.apiVersion() == null) { - throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); - } - final String op = "MKDIRS"; - String parameterizedHost = Joiner.on(", ").join("{accountName}", accountName, "{adlsFileSystemDnsSuffix}", this.client.adlsFileSystemDnsSuffix()); - return service.mkdirs(path, op, this.client.apiVersion(), this.client.acceptLanguage(), parameterizedHost, this.client.userAgent()) - .flatMap(new Func1, Observable>>() { - @Override - public Observable> call(Response response) { - try { - ServiceResponse clientResponse = mkdirsDelegate(response); - return Observable.just(clientResponse); - } catch (Throwable t) { - return Observable.error(t); - } - } - }); - } - - private ServiceResponse mkdirsDelegate(Response response) throws AdlsErrorException, IOException, IllegalArgumentException { - return new AzureServiceResponseBuilder(this.client.mapperAdapter()) - .register(200, new TypeToken() { }.getType()) - .registerError(AdlsErrorException.class) - .build(response); - } - - /** - * Concatenates the list of source files into the destination file, removing all source files upon success. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param destinationPath The Data Lake Store path (starting with '/') of the destination file resulting from the concatenation. - * @param sources A list of comma seperated Data Lake Store paths (starting with '/') of the files to concatenate, in the order in which they should be concatenated. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - public ServiceResponse concat(String accountName, String destinationPath, List sources) throws AdlsErrorException, IOException, IllegalArgumentException { - return concatAsync(accountName, destinationPath, sources).toBlocking().single(); - } - - /** - * Concatenates the list of source files into the destination file, removing all source files upon success. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param destinationPath The Data Lake Store path (starting with '/') of the destination file resulting from the concatenation. - * @param sources A list of comma seperated Data Lake Store paths (starting with '/') of the files to concatenate, in the order in which they should be concatenated. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - public ServiceCall concatAsync(String accountName, String destinationPath, List sources, final ServiceCallback serviceCallback) { - return ServiceCall.create(concatAsync(accountName, destinationPath, sources), serviceCallback); - } - - /** - * Concatenates the list of source files into the destination file, removing all source files upon success. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param destinationPath The Data Lake Store path (starting with '/') of the destination file resulting from the concatenation. - * @param sources A list of comma seperated Data Lake Store paths (starting with '/') of the files to concatenate, in the order in which they should be concatenated. - * @return the {@link ServiceResponse} object if successful. - */ - public Observable> concatAsync(String accountName, String destinationPath, List sources) { - if (accountName == null) { - throw new IllegalArgumentException("Parameter accountName is required and cannot be null."); - } - if (this.client.adlsFileSystemDnsSuffix() == null) { - throw new IllegalArgumentException("Parameter this.client.adlsFileSystemDnsSuffix() is required and cannot be null."); - } - if (destinationPath == null) { - throw new IllegalArgumentException("Parameter destinationPath is required and cannot be null."); - } - if (sources == null) { - throw new IllegalArgumentException("Parameter sources is required and cannot be null."); - } - if (this.client.apiVersion() == null) { - throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); - } - Validator.validate(sources); - final String op = "CONCAT"; - String parameterizedHost = Joiner.on(", ").join("{accountName}", accountName, "{adlsFileSystemDnsSuffix}", this.client.adlsFileSystemDnsSuffix()); - String sourcesConverted = this.client.mapperAdapter().serializeList(sources, CollectionFormat.CSV); - return service.concat(destinationPath, sourcesConverted, op, this.client.apiVersion(), this.client.acceptLanguage(), parameterizedHost, this.client.userAgent()) - .flatMap(new Func1, Observable>>() { - @Override - public Observable> call(Response response) { - try { - ServiceResponse clientResponse = concatDelegate(response); - return Observable.just(clientResponse); - } catch (Throwable t) { - return Observable.error(t); - } - } - }); - } - - private ServiceResponse concatDelegate(Response response) throws AdlsErrorException, IOException, IllegalArgumentException { - return new AzureServiceResponseBuilder(this.client.mapperAdapter()) - .register(200, new TypeToken() { }.getType()) - .registerError(AdlsErrorException.class) - .build(response); - } - - /** - * Concatenates the list of source files into the destination file, deleting all source files upon success. This method accepts more source file paths than the Concat method. This method and the parameters it accepts are subject to change for usability in an upcoming version. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param msConcatDestinationPath The Data Lake Store path (starting with '/') of the destination file resulting from the concatenation. - * @param streamContents A list of Data Lake Store paths (starting with '/') of the source files. Must be in the format: sources=<comma separated list> - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - public ServiceResponse msConcat(String accountName, String msConcatDestinationPath, byte[] streamContents) throws AdlsErrorException, IOException, IllegalArgumentException { - return msConcatAsync(accountName, msConcatDestinationPath, streamContents).toBlocking().single(); - } - - /** - * Concatenates the list of source files into the destination file, deleting all source files upon success. This method accepts more source file paths than the Concat method. This method and the parameters it accepts are subject to change for usability in an upcoming version. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param msConcatDestinationPath The Data Lake Store path (starting with '/') of the destination file resulting from the concatenation. - * @param streamContents A list of Data Lake Store paths (starting with '/') of the source files. Must be in the format: sources=<comma separated list> - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - public ServiceCall msConcatAsync(String accountName, String msConcatDestinationPath, byte[] streamContents, final ServiceCallback serviceCallback) { - return ServiceCall.create(msConcatAsync(accountName, msConcatDestinationPath, streamContents), serviceCallback); - } - - /** - * Concatenates the list of source files into the destination file, deleting all source files upon success. This method accepts more source file paths than the Concat method. This method and the parameters it accepts are subject to change for usability in an upcoming version. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param msConcatDestinationPath The Data Lake Store path (starting with '/') of the destination file resulting from the concatenation. - * @param streamContents A list of Data Lake Store paths (starting with '/') of the source files. Must be in the format: sources=<comma separated list> - * @return the {@link ServiceResponse} object if successful. - */ - public Observable> msConcatAsync(String accountName, String msConcatDestinationPath, byte[] streamContents) { - if (accountName == null) { - throw new IllegalArgumentException("Parameter accountName is required and cannot be null."); - } - if (this.client.adlsFileSystemDnsSuffix() == null) { - throw new IllegalArgumentException("Parameter this.client.adlsFileSystemDnsSuffix() is required and cannot be null."); - } - if (msConcatDestinationPath == null) { - throw new IllegalArgumentException("Parameter msConcatDestinationPath is required and cannot be null."); - } - if (streamContents == null) { - throw new IllegalArgumentException("Parameter streamContents is required and cannot be null."); - } - if (this.client.apiVersion() == null) { - throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); - } - final String op = "MSCONCAT"; - final Boolean deleteSourceDirectory = null; - String parameterizedHost = Joiner.on(", ").join("{accountName}", accountName, "{adlsFileSystemDnsSuffix}", this.client.adlsFileSystemDnsSuffix()); - RequestBody streamContentsConverted = RequestBody.create(MediaType.parse("application/octet-stream"), streamContents); - return service.msConcat(msConcatDestinationPath, deleteSourceDirectory, streamContentsConverted, op, this.client.apiVersion(), this.client.acceptLanguage(), parameterizedHost, this.client.userAgent()) - .flatMap(new Func1, Observable>>() { - @Override - public Observable> call(Response response) { - try { - ServiceResponse clientResponse = msConcatDelegate(response); - return Observable.just(clientResponse); - } catch (Throwable t) { - return Observable.error(t); - } - } - }); - } - - /** - * Concatenates the list of source files into the destination file, deleting all source files upon success. This method accepts more source file paths than the Concat method. This method and the parameters it accepts are subject to change for usability in an upcoming version. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param msConcatDestinationPath The Data Lake Store path (starting with '/') of the destination file resulting from the concatenation. - * @param streamContents A list of Data Lake Store paths (starting with '/') of the source files. Must be in the format: sources=<comma separated list> - * @param deleteSourceDirectory Indicates that as an optimization instead of deleting each individual source stream, delete the source stream folder if all streams are in the same folder instead. This results in a substantial performance improvement when the only streams in the folder are part of the concatenation operation. WARNING: This includes the deletion of any other files that are not source files. Only set this to true when source files are the only files in the source directory. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - public ServiceResponse msConcat(String accountName, String msConcatDestinationPath, byte[] streamContents, Boolean deleteSourceDirectory) throws AdlsErrorException, IOException, IllegalArgumentException { - return msConcatAsync(accountName, msConcatDestinationPath, streamContents, deleteSourceDirectory).toBlocking().single(); - } - - /** - * Concatenates the list of source files into the destination file, deleting all source files upon success. This method accepts more source file paths than the Concat method. This method and the parameters it accepts are subject to change for usability in an upcoming version. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param msConcatDestinationPath The Data Lake Store path (starting with '/') of the destination file resulting from the concatenation. - * @param streamContents A list of Data Lake Store paths (starting with '/') of the source files. Must be in the format: sources=<comma separated list> - * @param deleteSourceDirectory Indicates that as an optimization instead of deleting each individual source stream, delete the source stream folder if all streams are in the same folder instead. This results in a substantial performance improvement when the only streams in the folder are part of the concatenation operation. WARNING: This includes the deletion of any other files that are not source files. Only set this to true when source files are the only files in the source directory. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - public ServiceCall msConcatAsync(String accountName, String msConcatDestinationPath, byte[] streamContents, Boolean deleteSourceDirectory, final ServiceCallback serviceCallback) { - return ServiceCall.create(msConcatAsync(accountName, msConcatDestinationPath, streamContents, deleteSourceDirectory), serviceCallback); - } - - /** - * Concatenates the list of source files into the destination file, deleting all source files upon success. This method accepts more source file paths than the Concat method. This method and the parameters it accepts are subject to change for usability in an upcoming version. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param msConcatDestinationPath The Data Lake Store path (starting with '/') of the destination file resulting from the concatenation. - * @param streamContents A list of Data Lake Store paths (starting with '/') of the source files. Must be in the format: sources=<comma separated list> - * @param deleteSourceDirectory Indicates that as an optimization instead of deleting each individual source stream, delete the source stream folder if all streams are in the same folder instead. This results in a substantial performance improvement when the only streams in the folder are part of the concatenation operation. WARNING: This includes the deletion of any other files that are not source files. Only set this to true when source files are the only files in the source directory. - * @return the {@link ServiceResponse} object if successful. - */ - public Observable> msConcatAsync(String accountName, String msConcatDestinationPath, byte[] streamContents, Boolean deleteSourceDirectory) { - if (accountName == null) { - throw new IllegalArgumentException("Parameter accountName is required and cannot be null."); - } - if (this.client.adlsFileSystemDnsSuffix() == null) { - throw new IllegalArgumentException("Parameter this.client.adlsFileSystemDnsSuffix() is required and cannot be null."); - } - if (msConcatDestinationPath == null) { - throw new IllegalArgumentException("Parameter msConcatDestinationPath is required and cannot be null."); - } - if (streamContents == null) { - throw new IllegalArgumentException("Parameter streamContents is required and cannot be null."); - } - if (this.client.apiVersion() == null) { - throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); - } - final String op = "MSCONCAT"; - String parameterizedHost = Joiner.on(", ").join("{accountName}", accountName, "{adlsFileSystemDnsSuffix}", this.client.adlsFileSystemDnsSuffix()); - RequestBody streamContentsConverted = RequestBody.create(MediaType.parse("application/octet-stream"), streamContents); - return service.msConcat(msConcatDestinationPath, deleteSourceDirectory, streamContentsConverted, op, this.client.apiVersion(), this.client.acceptLanguage(), parameterizedHost, this.client.userAgent()) - .flatMap(new Func1, Observable>>() { - @Override - public Observable> call(Response response) { - try { - ServiceResponse clientResponse = msConcatDelegate(response); - return Observable.just(clientResponse); - } catch (Throwable t) { - return Observable.error(t); - } - } - }); - } - - private ServiceResponse msConcatDelegate(Response response) throws AdlsErrorException, IOException, IllegalArgumentException { - return new AzureServiceResponseBuilder(this.client.mapperAdapter()) - .register(200, new TypeToken() { }.getType()) - .registerError(AdlsErrorException.class) - .build(response); - } - - /** - * Get the list of file status objects specified by the file path, with optional pagination parameters. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param listFilePath The Data Lake Store path (starting with '/') of the directory to list. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the FileStatusesResult object wrapped in {@link ServiceResponse} if successful. - */ - public ServiceResponse listFileStatus(String accountName, String listFilePath) throws AdlsErrorException, IOException, IllegalArgumentException { - return listFileStatusAsync(accountName, listFilePath).toBlocking().single(); - } - - /** - * Get the list of file status objects specified by the file path, with optional pagination parameters. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param listFilePath The Data Lake Store path (starting with '/') of the directory to list. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - public ServiceCall listFileStatusAsync(String accountName, String listFilePath, final ServiceCallback serviceCallback) { - return ServiceCall.create(listFileStatusAsync(accountName, listFilePath), serviceCallback); - } - - /** - * Get the list of file status objects specified by the file path, with optional pagination parameters. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param listFilePath The Data Lake Store path (starting with '/') of the directory to list. - * @return the observable to the FileStatusesResult object - */ - public Observable> listFileStatusAsync(String accountName, String listFilePath) { - if (accountName == null) { - throw new IllegalArgumentException("Parameter accountName is required and cannot be null."); - } - if (this.client.adlsFileSystemDnsSuffix() == null) { - throw new IllegalArgumentException("Parameter this.client.adlsFileSystemDnsSuffix() is required and cannot be null."); - } - if (listFilePath == null) { - throw new IllegalArgumentException("Parameter listFilePath is required and cannot be null."); - } - if (this.client.apiVersion() == null) { - throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); - } - final String op = "MSLISTSTATUS"; - final Integer listSize = null; - final String listAfter = null; - final String listBefore = null; - String parameterizedHost = Joiner.on(", ").join("{accountName}", accountName, "{adlsFileSystemDnsSuffix}", this.client.adlsFileSystemDnsSuffix()); - return service.listFileStatus(listFilePath, listSize, listAfter, listBefore, op, this.client.apiVersion(), this.client.acceptLanguage(), parameterizedHost, this.client.userAgent()) - .flatMap(new Func1, Observable>>() { - @Override - public Observable> call(Response response) { - try { - ServiceResponse clientResponse = listFileStatusDelegate(response); - return Observable.just(clientResponse); - } catch (Throwable t) { - return Observable.error(t); - } - } - }); - } - - /** - * Get the list of file status objects specified by the file path, with optional pagination parameters. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param listFilePath The Data Lake Store path (starting with '/') of the directory to list. - * @param listSize Gets or sets the number of items to return. Optional. - * @param listAfter Gets or sets the item or lexographical index after which to begin returning results. For example, a file list of 'a','b','d' and listAfter='b' will return 'd', and a listAfter='c' will also return 'd'. Optional. - * @param listBefore Gets or sets the item or lexographical index before which to begin returning results. For example, a file list of 'a','b','d' and listBefore='d' will return 'a','b', and a listBefore='c' will also return 'a','b'. Optional. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the FileStatusesResult object wrapped in {@link ServiceResponse} if successful. - */ - public ServiceResponse listFileStatus(String accountName, String listFilePath, Integer listSize, String listAfter, String listBefore) throws AdlsErrorException, IOException, IllegalArgumentException { - return listFileStatusAsync(accountName, listFilePath, listSize, listAfter, listBefore).toBlocking().single(); - } - - /** - * Get the list of file status objects specified by the file path, with optional pagination parameters. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param listFilePath The Data Lake Store path (starting with '/') of the directory to list. - * @param listSize Gets or sets the number of items to return. Optional. - * @param listAfter Gets or sets the item or lexographical index after which to begin returning results. For example, a file list of 'a','b','d' and listAfter='b' will return 'd', and a listAfter='c' will also return 'd'. Optional. - * @param listBefore Gets or sets the item or lexographical index before which to begin returning results. For example, a file list of 'a','b','d' and listBefore='d' will return 'a','b', and a listBefore='c' will also return 'a','b'. Optional. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - public ServiceCall listFileStatusAsync(String accountName, String listFilePath, Integer listSize, String listAfter, String listBefore, final ServiceCallback serviceCallback) { - return ServiceCall.create(listFileStatusAsync(accountName, listFilePath, listSize, listAfter, listBefore), serviceCallback); - } - - /** - * Get the list of file status objects specified by the file path, with optional pagination parameters. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param listFilePath The Data Lake Store path (starting with '/') of the directory to list. - * @param listSize Gets or sets the number of items to return. Optional. - * @param listAfter Gets or sets the item or lexographical index after which to begin returning results. For example, a file list of 'a','b','d' and listAfter='b' will return 'd', and a listAfter='c' will also return 'd'. Optional. - * @param listBefore Gets or sets the item or lexographical index before which to begin returning results. For example, a file list of 'a','b','d' and listBefore='d' will return 'a','b', and a listBefore='c' will also return 'a','b'. Optional. - * @return the observable to the FileStatusesResult object - */ - public Observable> listFileStatusAsync(String accountName, String listFilePath, Integer listSize, String listAfter, String listBefore) { - if (accountName == null) { - throw new IllegalArgumentException("Parameter accountName is required and cannot be null."); - } - if (this.client.adlsFileSystemDnsSuffix() == null) { - throw new IllegalArgumentException("Parameter this.client.adlsFileSystemDnsSuffix() is required and cannot be null."); - } - if (listFilePath == null) { - throw new IllegalArgumentException("Parameter listFilePath is required and cannot be null."); - } - if (this.client.apiVersion() == null) { - throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); - } - final String op = "MSLISTSTATUS"; - String parameterizedHost = Joiner.on(", ").join("{accountName}", accountName, "{adlsFileSystemDnsSuffix}", this.client.adlsFileSystemDnsSuffix()); - return service.listFileStatus(listFilePath, listSize, listAfter, listBefore, op, this.client.apiVersion(), this.client.acceptLanguage(), parameterizedHost, this.client.userAgent()) - .flatMap(new Func1, Observable>>() { - @Override - public Observable> call(Response response) { - try { - ServiceResponse clientResponse = listFileStatusDelegate(response); - return Observable.just(clientResponse); - } catch (Throwable t) { - return Observable.error(t); - } - } - }); - } - - private ServiceResponse listFileStatusDelegate(Response response) throws AdlsErrorException, IOException, IllegalArgumentException { - return new AzureServiceResponseBuilder(this.client.mapperAdapter()) - .register(200, new TypeToken() { }.getType()) - .registerError(AdlsErrorException.class) - .build(response); - } - - /** - * Gets the file content summary object specified by the file path. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param getContentSummaryFilePath The Data Lake Store path (starting with '/') of the file for which to retrieve the summary. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the ContentSummaryResult object wrapped in {@link ServiceResponse} if successful. - */ - public ServiceResponse getContentSummary(String accountName, String getContentSummaryFilePath) throws AdlsErrorException, IOException, IllegalArgumentException { - return getContentSummaryAsync(accountName, getContentSummaryFilePath).toBlocking().single(); - } - - /** - * Gets the file content summary object specified by the file path. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param getContentSummaryFilePath The Data Lake Store path (starting with '/') of the file for which to retrieve the summary. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - public ServiceCall getContentSummaryAsync(String accountName, String getContentSummaryFilePath, final ServiceCallback serviceCallback) { - return ServiceCall.create(getContentSummaryAsync(accountName, getContentSummaryFilePath), serviceCallback); - } - - /** - * Gets the file content summary object specified by the file path. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param getContentSummaryFilePath The Data Lake Store path (starting with '/') of the file for which to retrieve the summary. - * @return the observable to the ContentSummaryResult object - */ - public Observable> getContentSummaryAsync(String accountName, String getContentSummaryFilePath) { - if (accountName == null) { - throw new IllegalArgumentException("Parameter accountName is required and cannot be null."); - } - if (this.client.adlsFileSystemDnsSuffix() == null) { - throw new IllegalArgumentException("Parameter this.client.adlsFileSystemDnsSuffix() is required and cannot be null."); - } - if (getContentSummaryFilePath == null) { - throw new IllegalArgumentException("Parameter getContentSummaryFilePath is required and cannot be null."); - } - if (this.client.apiVersion() == null) { - throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); - } - final String op = "GETCONTENTSUMMARY"; - String parameterizedHost = Joiner.on(", ").join("{accountName}", accountName, "{adlsFileSystemDnsSuffix}", this.client.adlsFileSystemDnsSuffix()); - return service.getContentSummary(getContentSummaryFilePath, op, this.client.apiVersion(), this.client.acceptLanguage(), parameterizedHost, this.client.userAgent()) - .flatMap(new Func1, Observable>>() { - @Override - public Observable> call(Response response) { - try { - ServiceResponse clientResponse = getContentSummaryDelegate(response); - return Observable.just(clientResponse); - } catch (Throwable t) { - return Observable.error(t); - } - } - }); - } - - private ServiceResponse getContentSummaryDelegate(Response response) throws AdlsErrorException, IOException, IllegalArgumentException { - return new AzureServiceResponseBuilder(this.client.mapperAdapter()) - .register(200, new TypeToken() { }.getType()) - .registerError(AdlsErrorException.class) - .build(response); - } - - /** - * Get the file status object specified by the file path. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param getFilePath The Data Lake Store path (starting with '/') of the file or directory for which to retrieve the status. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the FileStatusResult object wrapped in {@link ServiceResponse} if successful. - */ - public ServiceResponse getFileStatus(String accountName, String getFilePath) throws AdlsErrorException, IOException, IllegalArgumentException { - return getFileStatusAsync(accountName, getFilePath).toBlocking().single(); - } - - /** - * Get the file status object specified by the file path. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param getFilePath The Data Lake Store path (starting with '/') of the file or directory for which to retrieve the status. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - public ServiceCall getFileStatusAsync(String accountName, String getFilePath, final ServiceCallback serviceCallback) { - return ServiceCall.create(getFileStatusAsync(accountName, getFilePath), serviceCallback); - } - - /** - * Get the file status object specified by the file path. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param getFilePath The Data Lake Store path (starting with '/') of the file or directory for which to retrieve the status. - * @return the observable to the FileStatusResult object - */ - public Observable> getFileStatusAsync(String accountName, String getFilePath) { - if (accountName == null) { - throw new IllegalArgumentException("Parameter accountName is required and cannot be null."); - } - if (this.client.adlsFileSystemDnsSuffix() == null) { - throw new IllegalArgumentException("Parameter this.client.adlsFileSystemDnsSuffix() is required and cannot be null."); - } - if (getFilePath == null) { - throw new IllegalArgumentException("Parameter getFilePath is required and cannot be null."); - } - if (this.client.apiVersion() == null) { - throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); - } - final String op = "GETFILESTATUS"; - String parameterizedHost = Joiner.on(", ").join("{accountName}", accountName, "{adlsFileSystemDnsSuffix}", this.client.adlsFileSystemDnsSuffix()); - return service.getFileStatus(getFilePath, op, this.client.apiVersion(), this.client.acceptLanguage(), parameterizedHost, this.client.userAgent()) - .flatMap(new Func1, Observable>>() { - @Override - public Observable> call(Response response) { - try { - ServiceResponse clientResponse = getFileStatusDelegate(response); - return Observable.just(clientResponse); - } catch (Throwable t) { - return Observable.error(t); - } - } - }); - } - - private ServiceResponse getFileStatusDelegate(Response response) throws AdlsErrorException, IOException, IllegalArgumentException { - return new AzureServiceResponseBuilder(this.client.mapperAdapter()) - .register(200, new TypeToken() { }.getType()) - .registerError(AdlsErrorException.class) - .build(response); - } - - /** - * Appends to the specified file. This method does not support multiple concurrent appends to the file. NOTE: Concurrent append and normal (serial) append CANNOT be used interchangeably. Once a file has been appended to using either append option, it can only be appended to using that append option. Use the ConcurrentAppend option if you would like support for concurrent appends. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to which to append. - * @param streamContents The file contents to include when appending to the file. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - public ServiceResponse append(String accountName, String directFilePath, byte[] streamContents) throws AdlsErrorException, IOException, IllegalArgumentException { - return appendAsync(accountName, directFilePath, streamContents).toBlocking().single(); - } - - /** - * Appends to the specified file. This method does not support multiple concurrent appends to the file. NOTE: Concurrent append and normal (serial) append CANNOT be used interchangeably. Once a file has been appended to using either append option, it can only be appended to using that append option. Use the ConcurrentAppend option if you would like support for concurrent appends. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to which to append. - * @param streamContents The file contents to include when appending to the file. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - public ServiceCall appendAsync(String accountName, String directFilePath, byte[] streamContents, final ServiceCallback serviceCallback) { - return ServiceCall.create(appendAsync(accountName, directFilePath, streamContents), serviceCallback); - } - - /** - * Appends to the specified file. This method does not support multiple concurrent appends to the file. NOTE: Concurrent append and normal (serial) append CANNOT be used interchangeably. Once a file has been appended to using either append option, it can only be appended to using that append option. Use the ConcurrentAppend option if you would like support for concurrent appends. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to which to append. - * @param streamContents The file contents to include when appending to the file. - * @return the {@link ServiceResponse} object if successful. - */ - public Observable> appendAsync(String accountName, String directFilePath, byte[] streamContents) { - if (accountName == null) { - throw new IllegalArgumentException("Parameter accountName is required and cannot be null."); - } - if (this.client.adlsFileSystemDnsSuffix() == null) { - throw new IllegalArgumentException("Parameter this.client.adlsFileSystemDnsSuffix() is required and cannot be null."); - } - if (directFilePath == null) { - throw new IllegalArgumentException("Parameter directFilePath is required and cannot be null."); - } - if (streamContents == null) { - throw new IllegalArgumentException("Parameter streamContents is required and cannot be null."); - } - if (this.client.apiVersion() == null) { - throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); - } - final String op = "APPEND"; - final String append = "true"; - final String transferEncoding = "chunked"; - final Long offset = null; - String parameterizedHost = Joiner.on(", ").join("{accountName}", accountName, "{adlsFileSystemDnsSuffix}", this.client.adlsFileSystemDnsSuffix()); - RequestBody streamContentsConverted = RequestBody.create(MediaType.parse("application/octet-stream"), streamContents); - return service.append(directFilePath, streamContentsConverted, offset, op, append, transferEncoding, this.client.apiVersion(), this.client.acceptLanguage(), parameterizedHost, this.client.userAgent()) - .flatMap(new Func1, Observable>>() { - @Override - public Observable> call(Response response) { - try { - ServiceResponse clientResponse = appendDelegate(response); - return Observable.just(clientResponse); - } catch (Throwable t) { - return Observable.error(t); - } - } - }); - } - - /** - * Appends to the specified file. This method does not support multiple concurrent appends to the file. NOTE: Concurrent append and normal (serial) append CANNOT be used interchangeably. Once a file has been appended to using either append option, it can only be appended to using that append option. Use the ConcurrentAppend option if you would like support for concurrent appends. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to which to append. - * @param streamContents The file contents to include when appending to the file. - * @param offset The optional offset in the stream to begin the append operation. Default is to append at the end of the stream. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - public ServiceResponse append(String accountName, String directFilePath, byte[] streamContents, Long offset) throws AdlsErrorException, IOException, IllegalArgumentException { - return appendAsync(accountName, directFilePath, streamContents, offset).toBlocking().single(); - } - - /** - * Appends to the specified file. This method does not support multiple concurrent appends to the file. NOTE: Concurrent append and normal (serial) append CANNOT be used interchangeably. Once a file has been appended to using either append option, it can only be appended to using that append option. Use the ConcurrentAppend option if you would like support for concurrent appends. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to which to append. - * @param streamContents The file contents to include when appending to the file. - * @param offset The optional offset in the stream to begin the append operation. Default is to append at the end of the stream. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - public ServiceCall appendAsync(String accountName, String directFilePath, byte[] streamContents, Long offset, final ServiceCallback serviceCallback) { - return ServiceCall.create(appendAsync(accountName, directFilePath, streamContents, offset), serviceCallback); - } - - /** - * Appends to the specified file. This method does not support multiple concurrent appends to the file. NOTE: Concurrent append and normal (serial) append CANNOT be used interchangeably. Once a file has been appended to using either append option, it can only be appended to using that append option. Use the ConcurrentAppend option if you would like support for concurrent appends. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to which to append. - * @param streamContents The file contents to include when appending to the file. - * @param offset The optional offset in the stream to begin the append operation. Default is to append at the end of the stream. - * @return the {@link ServiceResponse} object if successful. - */ - public Observable> appendAsync(String accountName, String directFilePath, byte[] streamContents, Long offset) { - if (accountName == null) { - throw new IllegalArgumentException("Parameter accountName is required and cannot be null."); - } - if (this.client.adlsFileSystemDnsSuffix() == null) { - throw new IllegalArgumentException("Parameter this.client.adlsFileSystemDnsSuffix() is required and cannot be null."); - } - if (directFilePath == null) { - throw new IllegalArgumentException("Parameter directFilePath is required and cannot be null."); - } - if (streamContents == null) { - throw new IllegalArgumentException("Parameter streamContents is required and cannot be null."); - } - if (this.client.apiVersion() == null) { - throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); - } - final String op = "APPEND"; - final String append = "true"; - final String transferEncoding = "chunked"; - String parameterizedHost = Joiner.on(", ").join("{accountName}", accountName, "{adlsFileSystemDnsSuffix}", this.client.adlsFileSystemDnsSuffix()); - RequestBody streamContentsConverted = RequestBody.create(MediaType.parse("application/octet-stream"), streamContents); - return service.append(directFilePath, streamContentsConverted, offset, op, append, transferEncoding, this.client.apiVersion(), this.client.acceptLanguage(), parameterizedHost, this.client.userAgent()) - .flatMap(new Func1, Observable>>() { - @Override - public Observable> call(Response response) { - try { - ServiceResponse clientResponse = appendDelegate(response); - return Observable.just(clientResponse); - } catch (Throwable t) { - return Observable.error(t); - } - } - }); - } - - private ServiceResponse appendDelegate(Response response) throws AdlsErrorException, IOException, IllegalArgumentException { - return new AzureServiceResponseBuilder(this.client.mapperAdapter()) - .register(200, new TypeToken() { }.getType()) - .registerError(AdlsErrorException.class) - .build(response); - } - - /** - * Creates a file with optionally specified content. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to create. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - public ServiceResponse create(String accountName, String directFilePath) throws AdlsErrorException, IOException, IllegalArgumentException { - return createAsync(accountName, directFilePath).toBlocking().single(); - } - - /** - * Creates a file with optionally specified content. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to create. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - public ServiceCall createAsync(String accountName, String directFilePath, final ServiceCallback serviceCallback) { - return ServiceCall.create(createAsync(accountName, directFilePath), serviceCallback); - } - - /** - * Creates a file with optionally specified content. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to create. - * @return the {@link ServiceResponse} object if successful. - */ - public Observable> createAsync(String accountName, String directFilePath) { - if (accountName == null) { - throw new IllegalArgumentException("Parameter accountName is required and cannot be null."); - } - if (this.client.adlsFileSystemDnsSuffix() == null) { - throw new IllegalArgumentException("Parameter this.client.adlsFileSystemDnsSuffix() is required and cannot be null."); - } - if (directFilePath == null) { - throw new IllegalArgumentException("Parameter directFilePath is required and cannot be null."); - } - if (this.client.apiVersion() == null) { - throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); - } - final String op = "CREATE"; - final String write = "true"; - final String transferEncoding = "chunked"; - final byte[] streamContents = new byte[0]; - final Boolean overwrite = null; - String parameterizedHost = Joiner.on(", ").join("{accountName}", accountName, "{adlsFileSystemDnsSuffix}", this.client.adlsFileSystemDnsSuffix()); - RequestBody streamContentsConverted = RequestBody.create(MediaType.parse("application/octet-stream"), new byte[0]); - if (streamContents != null) { - streamContentsConverted = RequestBody.create(MediaType.parse("application/octet-stream"), streamContents); - } - return service.create(directFilePath, streamContentsConverted, overwrite, op, write, transferEncoding, this.client.apiVersion(), this.client.acceptLanguage(), parameterizedHost, this.client.userAgent()) - .flatMap(new Func1, Observable>>() { - @Override - public Observable> call(Response response) { - try { - ServiceResponse clientResponse = createDelegate(response); - return Observable.just(clientResponse); - } catch (Throwable t) { - return Observable.error(t); - } - } - }); - } - - /** - * Creates a file with optionally specified content. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to create. - * @param streamContents The file contents to include when creating the file. This parameter is optional, resulting in an empty file if not specified. - * @param overwrite The indication of if the file should be overwritten. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - public ServiceResponse create(String accountName, String directFilePath, byte[] streamContents, Boolean overwrite) throws AdlsErrorException, IOException, IllegalArgumentException { - return createAsync(accountName, directFilePath, streamContents, overwrite).toBlocking().single(); - } - - /** - * Creates a file with optionally specified content. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to create. - * @param streamContents The file contents to include when creating the file. This parameter is optional, resulting in an empty file if not specified. - * @param overwrite The indication of if the file should be overwritten. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - public ServiceCall createAsync(String accountName, String directFilePath, byte[] streamContents, Boolean overwrite, final ServiceCallback serviceCallback) { - return ServiceCall.create(createAsync(accountName, directFilePath, streamContents, overwrite), serviceCallback); - } - - /** - * Creates a file with optionally specified content. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to create. - * @param streamContents The file contents to include when creating the file. This parameter is optional, resulting in an empty file if not specified. - * @param overwrite The indication of if the file should be overwritten. - * @return the {@link ServiceResponse} object if successful. - */ - public Observable> createAsync(String accountName, String directFilePath, byte[] streamContents, Boolean overwrite) { - if (accountName == null) { - throw new IllegalArgumentException("Parameter accountName is required and cannot be null."); - } - if (this.client.adlsFileSystemDnsSuffix() == null) { - throw new IllegalArgumentException("Parameter this.client.adlsFileSystemDnsSuffix() is required and cannot be null."); - } - if (directFilePath == null) { - throw new IllegalArgumentException("Parameter directFilePath is required and cannot be null."); - } - if (this.client.apiVersion() == null) { - throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); - } - final String op = "CREATE"; - final String write = "true"; - final String transferEncoding = "chunked"; - String parameterizedHost = Joiner.on(", ").join("{accountName}", accountName, "{adlsFileSystemDnsSuffix}", this.client.adlsFileSystemDnsSuffix()); - RequestBody streamContentsConverted = RequestBody.create(MediaType.parse("application/octet-stream"), new byte[0]); - if (streamContents != null) { - streamContentsConverted = RequestBody.create(MediaType.parse("application/octet-stream"), streamContents); - } - return service.create(directFilePath, streamContentsConverted, overwrite, op, write, transferEncoding, this.client.apiVersion(), this.client.acceptLanguage(), parameterizedHost, this.client.userAgent()) - .flatMap(new Func1, Observable>>() { - @Override - public Observable> call(Response response) { - try { - ServiceResponse clientResponse = createDelegate(response); - return Observable.just(clientResponse); - } catch (Throwable t) { - return Observable.error(t); - } - } - }); - } - - private ServiceResponse createDelegate(Response response) throws AdlsErrorException, IOException, IllegalArgumentException { - return new AzureServiceResponseBuilder(this.client.mapperAdapter()) - .register(201, new TypeToken() { }.getType()) - .registerError(AdlsErrorException.class) - .build(response); - } - - /** - * Opens and reads from the specified file. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to open. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the InputStream object wrapped in {@link ServiceResponse} if successful. - */ - public ServiceResponse open(String accountName, String directFilePath) throws AdlsErrorException, IOException, IllegalArgumentException { - return openAsync(accountName, directFilePath).toBlocking().single(); - } - - /** - * Opens and reads from the specified file. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to open. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - public ServiceCall openAsync(String accountName, String directFilePath, final ServiceCallback serviceCallback) { - return ServiceCall.create(openAsync(accountName, directFilePath), serviceCallback); - } - - /** - * Opens and reads from the specified file. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to open. - * @return the observable to the InputStream object - */ - public Observable> openAsync(String accountName, String directFilePath) { - if (accountName == null) { - throw new IllegalArgumentException("Parameter accountName is required and cannot be null."); - } - if (this.client.adlsFileSystemDnsSuffix() == null) { - throw new IllegalArgumentException("Parameter this.client.adlsFileSystemDnsSuffix() is required and cannot be null."); - } - if (directFilePath == null) { - throw new IllegalArgumentException("Parameter directFilePath is required and cannot be null."); - } - if (this.client.apiVersion() == null) { - throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); - } - final String op = "OPEN"; - final String read = "true"; - final Long length = null; - final Long offset = null; - String parameterizedHost = Joiner.on(", ").join("{accountName}", accountName, "{adlsFileSystemDnsSuffix}", this.client.adlsFileSystemDnsSuffix()); - return service.open(directFilePath, length, offset, op, read, this.client.apiVersion(), this.client.acceptLanguage(), parameterizedHost, this.client.userAgent()) - .flatMap(new Func1, Observable>>() { - @Override - public Observable> call(Response response) { - try { - ServiceResponse clientResponse = openDelegate(response); - return Observable.just(clientResponse); - } catch (Throwable t) { - return Observable.error(t); - } - } - }); - } - - /** - * Opens and reads from the specified file. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to open. - * @param length the Long value - * @param offset the Long value - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the InputStream object wrapped in {@link ServiceResponse} if successful. - */ - public ServiceResponse open(String accountName, String directFilePath, Long length, Long offset) throws AdlsErrorException, IOException, IllegalArgumentException { - return openAsync(accountName, directFilePath, length, offset).toBlocking().single(); - } - - /** - * Opens and reads from the specified file. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to open. - * @param length the Long value - * @param offset the Long value - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - public ServiceCall openAsync(String accountName, String directFilePath, Long length, Long offset, final ServiceCallback serviceCallback) { - return ServiceCall.create(openAsync(accountName, directFilePath, length, offset), serviceCallback); - } - - /** - * Opens and reads from the specified file. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param directFilePath The Data Lake Store path (starting with '/') of the file to open. - * @param length the Long value - * @param offset the Long value - * @return the observable to the InputStream object - */ - public Observable> openAsync(String accountName, String directFilePath, Long length, Long offset) { - if (accountName == null) { - throw new IllegalArgumentException("Parameter accountName is required and cannot be null."); - } - if (this.client.adlsFileSystemDnsSuffix() == null) { - throw new IllegalArgumentException("Parameter this.client.adlsFileSystemDnsSuffix() is required and cannot be null."); - } - if (directFilePath == null) { - throw new IllegalArgumentException("Parameter directFilePath is required and cannot be null."); - } - if (this.client.apiVersion() == null) { - throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); - } - final String op = "OPEN"; - final String read = "true"; - String parameterizedHost = Joiner.on(", ").join("{accountName}", accountName, "{adlsFileSystemDnsSuffix}", this.client.adlsFileSystemDnsSuffix()); - return service.open(directFilePath, length, offset, op, read, this.client.apiVersion(), this.client.acceptLanguage(), parameterizedHost, this.client.userAgent()) - .flatMap(new Func1, Observable>>() { - @Override - public Observable> call(Response response) { - try { - ServiceResponse clientResponse = openDelegate(response); - return Observable.just(clientResponse); - } catch (Throwable t) { - return Observable.error(t); - } - } - }); - } - - private ServiceResponse openDelegate(Response response) throws AdlsErrorException, IOException, IllegalArgumentException { - return new AzureServiceResponseBuilder(this.client.mapperAdapter()) - .register(200, new TypeToken() { }.getType()) - .registerError(AdlsErrorException.class) - .build(response); - } - - /** - * Sets the Access Control List (ACL) for a file or folder. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param setAclFilePath The Data Lake Store path (starting with '/') of the file or directory on which to set the ACL. - * @param aclspec The ACL spec included in ACL creation operations in the format '[default:]user|group|other::r|-w|-x|-' - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - public ServiceResponse setAcl(String accountName, String setAclFilePath, String aclspec) throws AdlsErrorException, IOException, IllegalArgumentException { - return setAclAsync(accountName, setAclFilePath, aclspec).toBlocking().single(); - } - - /** - * Sets the Access Control List (ACL) for a file or folder. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param setAclFilePath The Data Lake Store path (starting with '/') of the file or directory on which to set the ACL. - * @param aclspec The ACL spec included in ACL creation operations in the format '[default:]user|group|other::r|-w|-x|-' - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - public ServiceCall setAclAsync(String accountName, String setAclFilePath, String aclspec, final ServiceCallback serviceCallback) { - return ServiceCall.create(setAclAsync(accountName, setAclFilePath, aclspec), serviceCallback); - } - - /** - * Sets the Access Control List (ACL) for a file or folder. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param setAclFilePath The Data Lake Store path (starting with '/') of the file or directory on which to set the ACL. - * @param aclspec The ACL spec included in ACL creation operations in the format '[default:]user|group|other::r|-w|-x|-' - * @return the {@link ServiceResponse} object if successful. - */ - public Observable> setAclAsync(String accountName, String setAclFilePath, String aclspec) { - if (accountName == null) { - throw new IllegalArgumentException("Parameter accountName is required and cannot be null."); - } - if (this.client.adlsFileSystemDnsSuffix() == null) { - throw new IllegalArgumentException("Parameter this.client.adlsFileSystemDnsSuffix() is required and cannot be null."); - } - if (setAclFilePath == null) { - throw new IllegalArgumentException("Parameter setAclFilePath is required and cannot be null."); - } - if (aclspec == null) { - throw new IllegalArgumentException("Parameter aclspec is required and cannot be null."); - } - if (this.client.apiVersion() == null) { - throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); - } - final String op = "SETACL"; - String parameterizedHost = Joiner.on(", ").join("{accountName}", accountName, "{adlsFileSystemDnsSuffix}", this.client.adlsFileSystemDnsSuffix()); - return service.setAcl(setAclFilePath, aclspec, op, this.client.apiVersion(), this.client.acceptLanguage(), parameterizedHost, this.client.userAgent()) - .flatMap(new Func1, Observable>>() { - @Override - public Observable> call(Response response) { - try { - ServiceResponse clientResponse = setAclDelegate(response); - return Observable.just(clientResponse); - } catch (Throwable t) { - return Observable.error(t); - } - } - }); - } - - private ServiceResponse setAclDelegate(Response response) throws AdlsErrorException, IOException, IllegalArgumentException { - return new AzureServiceResponseBuilder(this.client.mapperAdapter()) - .register(200, new TypeToken() { }.getType()) - .registerError(AdlsErrorException.class) - .build(response); - } - - /** - * Modifies existing Access Control List (ACL) entries on a file or folder. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param modifyAclFilePath The Data Lake Store path (starting with '/') of the file or directory with the ACL being modified. - * @param aclspec The ACL specification included in ACL modification operations in the format '[default:]user|group|other::r|-w|-x|-' - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - public ServiceResponse modifyAclEntries(String accountName, String modifyAclFilePath, String aclspec) throws AdlsErrorException, IOException, IllegalArgumentException { - return modifyAclEntriesAsync(accountName, modifyAclFilePath, aclspec).toBlocking().single(); - } - - /** - * Modifies existing Access Control List (ACL) entries on a file or folder. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param modifyAclFilePath The Data Lake Store path (starting with '/') of the file or directory with the ACL being modified. - * @param aclspec The ACL specification included in ACL modification operations in the format '[default:]user|group|other::r|-w|-x|-' - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - public ServiceCall modifyAclEntriesAsync(String accountName, String modifyAclFilePath, String aclspec, final ServiceCallback serviceCallback) { - return ServiceCall.create(modifyAclEntriesAsync(accountName, modifyAclFilePath, aclspec), serviceCallback); - } - - /** - * Modifies existing Access Control List (ACL) entries on a file or folder. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param modifyAclFilePath The Data Lake Store path (starting with '/') of the file or directory with the ACL being modified. - * @param aclspec The ACL specification included in ACL modification operations in the format '[default:]user|group|other::r|-w|-x|-' - * @return the {@link ServiceResponse} object if successful. - */ - public Observable> modifyAclEntriesAsync(String accountName, String modifyAclFilePath, String aclspec) { - if (accountName == null) { - throw new IllegalArgumentException("Parameter accountName is required and cannot be null."); - } - if (this.client.adlsFileSystemDnsSuffix() == null) { - throw new IllegalArgumentException("Parameter this.client.adlsFileSystemDnsSuffix() is required and cannot be null."); - } - if (modifyAclFilePath == null) { - throw new IllegalArgumentException("Parameter modifyAclFilePath is required and cannot be null."); - } - if (aclspec == null) { - throw new IllegalArgumentException("Parameter aclspec is required and cannot be null."); - } - if (this.client.apiVersion() == null) { - throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); - } - final String op = "MODIFYACLENTRIES"; - String parameterizedHost = Joiner.on(", ").join("{accountName}", accountName, "{adlsFileSystemDnsSuffix}", this.client.adlsFileSystemDnsSuffix()); - return service.modifyAclEntries(modifyAclFilePath, aclspec, op, this.client.apiVersion(), this.client.acceptLanguage(), parameterizedHost, this.client.userAgent()) - .flatMap(new Func1, Observable>>() { - @Override - public Observable> call(Response response) { - try { - ServiceResponse clientResponse = modifyAclEntriesDelegate(response); - return Observable.just(clientResponse); - } catch (Throwable t) { - return Observable.error(t); - } - } - }); - } - - private ServiceResponse modifyAclEntriesDelegate(Response response) throws AdlsErrorException, IOException, IllegalArgumentException { - return new AzureServiceResponseBuilder(this.client.mapperAdapter()) - .register(200, new TypeToken() { }.getType()) - .registerError(AdlsErrorException.class) - .build(response); - } - - /** - * Removes existing Access Control List (ACL) entries for a file or folder. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param removeAclFilePath The Data Lake Store path (starting with '/') of the file or directory with the ACL being removed. - * @param aclspec The ACL spec included in ACL removal operations in the format '[default:]user|group|other' - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - public ServiceResponse removeAclEntries(String accountName, String removeAclFilePath, String aclspec) throws AdlsErrorException, IOException, IllegalArgumentException { - return removeAclEntriesAsync(accountName, removeAclFilePath, aclspec).toBlocking().single(); - } - - /** - * Removes existing Access Control List (ACL) entries for a file or folder. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param removeAclFilePath The Data Lake Store path (starting with '/') of the file or directory with the ACL being removed. - * @param aclspec The ACL spec included in ACL removal operations in the format '[default:]user|group|other' - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - public ServiceCall removeAclEntriesAsync(String accountName, String removeAclFilePath, String aclspec, final ServiceCallback serviceCallback) { - return ServiceCall.create(removeAclEntriesAsync(accountName, removeAclFilePath, aclspec), serviceCallback); - } - - /** - * Removes existing Access Control List (ACL) entries for a file or folder. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param removeAclFilePath The Data Lake Store path (starting with '/') of the file or directory with the ACL being removed. - * @param aclspec The ACL spec included in ACL removal operations in the format '[default:]user|group|other' - * @return the {@link ServiceResponse} object if successful. - */ - public Observable> removeAclEntriesAsync(String accountName, String removeAclFilePath, String aclspec) { - if (accountName == null) { - throw new IllegalArgumentException("Parameter accountName is required and cannot be null."); - } - if (this.client.adlsFileSystemDnsSuffix() == null) { - throw new IllegalArgumentException("Parameter this.client.adlsFileSystemDnsSuffix() is required and cannot be null."); - } - if (removeAclFilePath == null) { - throw new IllegalArgumentException("Parameter removeAclFilePath is required and cannot be null."); - } - if (aclspec == null) { - throw new IllegalArgumentException("Parameter aclspec is required and cannot be null."); - } - if (this.client.apiVersion() == null) { - throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); - } - final String op = "REMOVEACLENTRIES"; - String parameterizedHost = Joiner.on(", ").join("{accountName}", accountName, "{adlsFileSystemDnsSuffix}", this.client.adlsFileSystemDnsSuffix()); - return service.removeAclEntries(removeAclFilePath, aclspec, op, this.client.apiVersion(), this.client.acceptLanguage(), parameterizedHost, this.client.userAgent()) - .flatMap(new Func1, Observable>>() { - @Override - public Observable> call(Response response) { - try { - ServiceResponse clientResponse = removeAclEntriesDelegate(response); - return Observable.just(clientResponse); - } catch (Throwable t) { - return Observable.error(t); - } - } - }); - } - - private ServiceResponse removeAclEntriesDelegate(Response response) throws AdlsErrorException, IOException, IllegalArgumentException { - return new AzureServiceResponseBuilder(this.client.mapperAdapter()) - .register(200, new TypeToken() { }.getType()) - .registerError(AdlsErrorException.class) - .build(response); - } - - /** - * Gets Access Control List (ACL) entries for the specified file or directory. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param aclFilePath The Data Lake Store path (starting with '/') of the file or directory for which to get the ACL. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the AclStatusResult object wrapped in {@link ServiceResponse} if successful. - */ - public ServiceResponse getAclStatus(String accountName, String aclFilePath) throws AdlsErrorException, IOException, IllegalArgumentException { - return getAclStatusAsync(accountName, aclFilePath).toBlocking().single(); - } - - /** - * Gets Access Control List (ACL) entries for the specified file or directory. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param aclFilePath The Data Lake Store path (starting with '/') of the file or directory for which to get the ACL. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - public ServiceCall getAclStatusAsync(String accountName, String aclFilePath, final ServiceCallback serviceCallback) { - return ServiceCall.create(getAclStatusAsync(accountName, aclFilePath), serviceCallback); - } - - /** - * Gets Access Control List (ACL) entries for the specified file or directory. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param aclFilePath The Data Lake Store path (starting with '/') of the file or directory for which to get the ACL. - * @return the observable to the AclStatusResult object - */ - public Observable> getAclStatusAsync(String accountName, String aclFilePath) { - if (accountName == null) { - throw new IllegalArgumentException("Parameter accountName is required and cannot be null."); - } - if (this.client.adlsFileSystemDnsSuffix() == null) { - throw new IllegalArgumentException("Parameter this.client.adlsFileSystemDnsSuffix() is required and cannot be null."); - } - if (aclFilePath == null) { - throw new IllegalArgumentException("Parameter aclFilePath is required and cannot be null."); - } - if (this.client.apiVersion() == null) { - throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); - } - final String op = "GETACLSTATUS"; - String parameterizedHost = Joiner.on(", ").join("{accountName}", accountName, "{adlsFileSystemDnsSuffix}", this.client.adlsFileSystemDnsSuffix()); - return service.getAclStatus(aclFilePath, op, this.client.apiVersion(), this.client.acceptLanguage(), parameterizedHost, this.client.userAgent()) - .flatMap(new Func1, Observable>>() { - @Override - public Observable> call(Response response) { - try { - ServiceResponse clientResponse = getAclStatusDelegate(response); - return Observable.just(clientResponse); - } catch (Throwable t) { - return Observable.error(t); - } - } - }); - } - - private ServiceResponse getAclStatusDelegate(Response response) throws AdlsErrorException, IOException, IllegalArgumentException { - return new AzureServiceResponseBuilder(this.client.mapperAdapter()) - .register(200, new TypeToken() { }.getType()) - .registerError(AdlsErrorException.class) - .build(response); - } - - /** - * Deletes the requested file or directory, optionally recursively. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param filePath The Data Lake Store path (starting with '/') of the file or directory to delete. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the FileOperationResult object wrapped in {@link ServiceResponse} if successful. - */ - public ServiceResponse delete(String accountName, String filePath) throws AdlsErrorException, IOException, IllegalArgumentException { - return deleteAsync(accountName, filePath).toBlocking().single(); - } - - /** - * Deletes the requested file or directory, optionally recursively. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param filePath The Data Lake Store path (starting with '/') of the file or directory to delete. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - public ServiceCall deleteAsync(String accountName, String filePath, final ServiceCallback serviceCallback) { - return ServiceCall.create(deleteAsync(accountName, filePath), serviceCallback); - } - - /** - * Deletes the requested file or directory, optionally recursively. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param filePath The Data Lake Store path (starting with '/') of the file or directory to delete. - * @return the observable to the FileOperationResult object - */ - public Observable> deleteAsync(String accountName, String filePath) { - if (accountName == null) { - throw new IllegalArgumentException("Parameter accountName is required and cannot be null."); - } - if (this.client.adlsFileSystemDnsSuffix() == null) { - throw new IllegalArgumentException("Parameter this.client.adlsFileSystemDnsSuffix() is required and cannot be null."); - } - if (filePath == null) { - throw new IllegalArgumentException("Parameter filePath is required and cannot be null."); - } - if (this.client.apiVersion() == null) { - throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); - } - final String op = "DELETE"; - final Boolean recursive = null; - String parameterizedHost = Joiner.on(", ").join("{accountName}", accountName, "{adlsFileSystemDnsSuffix}", this.client.adlsFileSystemDnsSuffix()); - return service.delete(filePath, recursive, op, this.client.apiVersion(), this.client.acceptLanguage(), parameterizedHost, this.client.userAgent()) - .flatMap(new Func1, Observable>>() { - @Override - public Observable> call(Response response) { - try { - ServiceResponse clientResponse = deleteDelegate(response); - return Observable.just(clientResponse); - } catch (Throwable t) { - return Observable.error(t); - } - } - }); - } - - /** - * Deletes the requested file or directory, optionally recursively. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param filePath The Data Lake Store path (starting with '/') of the file or directory to delete. - * @param recursive The optional switch indicating if the delete should be recursive - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the FileOperationResult object wrapped in {@link ServiceResponse} if successful. - */ - public ServiceResponse delete(String accountName, String filePath, Boolean recursive) throws AdlsErrorException, IOException, IllegalArgumentException { - return deleteAsync(accountName, filePath, recursive).toBlocking().single(); - } - - /** - * Deletes the requested file or directory, optionally recursively. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param filePath The Data Lake Store path (starting with '/') of the file or directory to delete. - * @param recursive The optional switch indicating if the delete should be recursive - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - public ServiceCall deleteAsync(String accountName, String filePath, Boolean recursive, final ServiceCallback serviceCallback) { - return ServiceCall.create(deleteAsync(accountName, filePath, recursive), serviceCallback); - } - - /** - * Deletes the requested file or directory, optionally recursively. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param filePath The Data Lake Store path (starting with '/') of the file or directory to delete. - * @param recursive The optional switch indicating if the delete should be recursive - * @return the observable to the FileOperationResult object - */ - public Observable> deleteAsync(String accountName, String filePath, Boolean recursive) { - if (accountName == null) { - throw new IllegalArgumentException("Parameter accountName is required and cannot be null."); - } - if (this.client.adlsFileSystemDnsSuffix() == null) { - throw new IllegalArgumentException("Parameter this.client.adlsFileSystemDnsSuffix() is required and cannot be null."); - } - if (filePath == null) { - throw new IllegalArgumentException("Parameter filePath is required and cannot be null."); - } - if (this.client.apiVersion() == null) { - throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); - } - final String op = "DELETE"; - String parameterizedHost = Joiner.on(", ").join("{accountName}", accountName, "{adlsFileSystemDnsSuffix}", this.client.adlsFileSystemDnsSuffix()); - return service.delete(filePath, recursive, op, this.client.apiVersion(), this.client.acceptLanguage(), parameterizedHost, this.client.userAgent()) - .flatMap(new Func1, Observable>>() { - @Override - public Observable> call(Response response) { - try { - ServiceResponse clientResponse = deleteDelegate(response); - return Observable.just(clientResponse); - } catch (Throwable t) { - return Observable.error(t); - } - } - }); - } - - private ServiceResponse deleteDelegate(Response response) throws AdlsErrorException, IOException, IllegalArgumentException { - return new AzureServiceResponseBuilder(this.client.mapperAdapter()) - .register(200, new TypeToken() { }.getType()) - .registerError(AdlsErrorException.class) - .build(response); - } - - /** - * Rename a file or directory. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param renameFilePath The Data Lake Store path (starting with '/') of the file or directory to move/rename. - * @param destination The path to move/rename the file or folder to - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the FileOperationResult object wrapped in {@link ServiceResponse} if successful. - */ - public ServiceResponse rename(String accountName, String renameFilePath, String destination) throws AdlsErrorException, IOException, IllegalArgumentException { - return renameAsync(accountName, renameFilePath, destination).toBlocking().single(); - } - - /** - * Rename a file or directory. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param renameFilePath The Data Lake Store path (starting with '/') of the file or directory to move/rename. - * @param destination The path to move/rename the file or folder to - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - public ServiceCall renameAsync(String accountName, String renameFilePath, String destination, final ServiceCallback serviceCallback) { - return ServiceCall.create(renameAsync(accountName, renameFilePath, destination), serviceCallback); - } - - /** - * Rename a file or directory. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param renameFilePath The Data Lake Store path (starting with '/') of the file or directory to move/rename. - * @param destination The path to move/rename the file or folder to - * @return the observable to the FileOperationResult object - */ - public Observable> renameAsync(String accountName, String renameFilePath, String destination) { - if (accountName == null) { - throw new IllegalArgumentException("Parameter accountName is required and cannot be null."); - } - if (this.client.adlsFileSystemDnsSuffix() == null) { - throw new IllegalArgumentException("Parameter this.client.adlsFileSystemDnsSuffix() is required and cannot be null."); - } - if (renameFilePath == null) { - throw new IllegalArgumentException("Parameter renameFilePath is required and cannot be null."); - } - if (destination == null) { - throw new IllegalArgumentException("Parameter destination is required and cannot be null."); - } - if (this.client.apiVersion() == null) { - throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); - } - final String op = "RENAME"; - String parameterizedHost = Joiner.on(", ").join("{accountName}", accountName, "{adlsFileSystemDnsSuffix}", this.client.adlsFileSystemDnsSuffix()); - return service.rename(renameFilePath, destination, op, this.client.apiVersion(), this.client.acceptLanguage(), parameterizedHost, this.client.userAgent()) - .flatMap(new Func1, Observable>>() { - @Override - public Observable> call(Response response) { - try { - ServiceResponse clientResponse = renameDelegate(response); - return Observable.just(clientResponse); - } catch (Throwable t) { - return Observable.error(t); - } - } - }); - } - - private ServiceResponse renameDelegate(Response response) throws AdlsErrorException, IOException, IllegalArgumentException { - return new AzureServiceResponseBuilder(this.client.mapperAdapter()) - .register(200, new TypeToken() { }.getType()) - .registerError(AdlsErrorException.class) - .build(response); - } - - /** - * Sets the owner of a file or directory. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param setOwnerFilePath The Data Lake Store path (starting with '/') of the file or directory for which to set the owner. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - public ServiceResponse setOwner(String accountName, String setOwnerFilePath) throws AdlsErrorException, IOException, IllegalArgumentException { - return setOwnerAsync(accountName, setOwnerFilePath).toBlocking().single(); - } - - /** - * Sets the owner of a file or directory. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param setOwnerFilePath The Data Lake Store path (starting with '/') of the file or directory for which to set the owner. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - public ServiceCall setOwnerAsync(String accountName, String setOwnerFilePath, final ServiceCallback serviceCallback) { - return ServiceCall.create(setOwnerAsync(accountName, setOwnerFilePath), serviceCallback); - } - - /** - * Sets the owner of a file or directory. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param setOwnerFilePath The Data Lake Store path (starting with '/') of the file or directory for which to set the owner. - * @return the {@link ServiceResponse} object if successful. - */ - public Observable> setOwnerAsync(String accountName, String setOwnerFilePath) { - if (accountName == null) { - throw new IllegalArgumentException("Parameter accountName is required and cannot be null."); - } - if (this.client.adlsFileSystemDnsSuffix() == null) { - throw new IllegalArgumentException("Parameter this.client.adlsFileSystemDnsSuffix() is required and cannot be null."); - } - if (setOwnerFilePath == null) { - throw new IllegalArgumentException("Parameter setOwnerFilePath is required and cannot be null."); - } - if (this.client.apiVersion() == null) { - throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); - } - final String op = "SETOWNER"; - final String owner = null; - final String group = null; - String parameterizedHost = Joiner.on(", ").join("{accountName}", accountName, "{adlsFileSystemDnsSuffix}", this.client.adlsFileSystemDnsSuffix()); - return service.setOwner(setOwnerFilePath, owner, group, op, this.client.apiVersion(), this.client.acceptLanguage(), parameterizedHost, this.client.userAgent()) - .flatMap(new Func1, Observable>>() { - @Override - public Observable> call(Response response) { - try { - ServiceResponse clientResponse = setOwnerDelegate(response); - return Observable.just(clientResponse); - } catch (Throwable t) { - return Observable.error(t); - } - } - }); - } - - /** - * Sets the owner of a file or directory. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param setOwnerFilePath The Data Lake Store path (starting with '/') of the file or directory for which to set the owner. - * @param owner The AAD Object ID of the user owner of the file or directory. If empty, the property will remain unchanged. - * @param group The AAD Object ID of the group owner of the file or directory. If empty, the property will remain unchanged. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - public ServiceResponse setOwner(String accountName, String setOwnerFilePath, String owner, String group) throws AdlsErrorException, IOException, IllegalArgumentException { - return setOwnerAsync(accountName, setOwnerFilePath, owner, group).toBlocking().single(); - } - - /** - * Sets the owner of a file or directory. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param setOwnerFilePath The Data Lake Store path (starting with '/') of the file or directory for which to set the owner. - * @param owner The AAD Object ID of the user owner of the file or directory. If empty, the property will remain unchanged. - * @param group The AAD Object ID of the group owner of the file or directory. If empty, the property will remain unchanged. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - public ServiceCall setOwnerAsync(String accountName, String setOwnerFilePath, String owner, String group, final ServiceCallback serviceCallback) { - return ServiceCall.create(setOwnerAsync(accountName, setOwnerFilePath, owner, group), serviceCallback); - } - - /** - * Sets the owner of a file or directory. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param setOwnerFilePath The Data Lake Store path (starting with '/') of the file or directory for which to set the owner. - * @param owner The AAD Object ID of the user owner of the file or directory. If empty, the property will remain unchanged. - * @param group The AAD Object ID of the group owner of the file or directory. If empty, the property will remain unchanged. - * @return the {@link ServiceResponse} object if successful. - */ - public Observable> setOwnerAsync(String accountName, String setOwnerFilePath, String owner, String group) { - if (accountName == null) { - throw new IllegalArgumentException("Parameter accountName is required and cannot be null."); - } - if (this.client.adlsFileSystemDnsSuffix() == null) { - throw new IllegalArgumentException("Parameter this.client.adlsFileSystemDnsSuffix() is required and cannot be null."); - } - if (setOwnerFilePath == null) { - throw new IllegalArgumentException("Parameter setOwnerFilePath is required and cannot be null."); - } - if (this.client.apiVersion() == null) { - throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); - } - final String op = "SETOWNER"; - String parameterizedHost = Joiner.on(", ").join("{accountName}", accountName, "{adlsFileSystemDnsSuffix}", this.client.adlsFileSystemDnsSuffix()); - return service.setOwner(setOwnerFilePath, owner, group, op, this.client.apiVersion(), this.client.acceptLanguage(), parameterizedHost, this.client.userAgent()) - .flatMap(new Func1, Observable>>() { - @Override - public Observable> call(Response response) { - try { - ServiceResponse clientResponse = setOwnerDelegate(response); - return Observable.just(clientResponse); - } catch (Throwable t) { - return Observable.error(t); - } - } - }); - } - - private ServiceResponse setOwnerDelegate(Response response) throws AdlsErrorException, IOException, IllegalArgumentException { - return new AzureServiceResponseBuilder(this.client.mapperAdapter()) - .register(200, new TypeToken() { }.getType()) - .registerError(AdlsErrorException.class) - .build(response); - } - - /** - * Sets the permission of the file or folder. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param setPermissionFilePath The Data Lake Store path (starting with '/') of the file or directory for which to set the permission. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - public ServiceResponse setPermission(String accountName, String setPermissionFilePath) throws AdlsErrorException, IOException, IllegalArgumentException { - return setPermissionAsync(accountName, setPermissionFilePath).toBlocking().single(); - } - - /** - * Sets the permission of the file or folder. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param setPermissionFilePath The Data Lake Store path (starting with '/') of the file or directory for which to set the permission. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - public ServiceCall setPermissionAsync(String accountName, String setPermissionFilePath, final ServiceCallback serviceCallback) { - return ServiceCall.create(setPermissionAsync(accountName, setPermissionFilePath), serviceCallback); - } - - /** - * Sets the permission of the file or folder. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param setPermissionFilePath The Data Lake Store path (starting with '/') of the file or directory for which to set the permission. - * @return the {@link ServiceResponse} object if successful. - */ - public Observable> setPermissionAsync(String accountName, String setPermissionFilePath) { - if (accountName == null) { - throw new IllegalArgumentException("Parameter accountName is required and cannot be null."); - } - if (this.client.adlsFileSystemDnsSuffix() == null) { - throw new IllegalArgumentException("Parameter this.client.adlsFileSystemDnsSuffix() is required and cannot be null."); - } - if (setPermissionFilePath == null) { - throw new IllegalArgumentException("Parameter setPermissionFilePath is required and cannot be null."); - } - if (this.client.apiVersion() == null) { - throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); - } - final String op = "SETPERMISSION"; - final String permission = null; - String parameterizedHost = Joiner.on(", ").join("{accountName}", accountName, "{adlsFileSystemDnsSuffix}", this.client.adlsFileSystemDnsSuffix()); - return service.setPermission(setPermissionFilePath, permission, op, this.client.apiVersion(), this.client.acceptLanguage(), parameterizedHost, this.client.userAgent()) - .flatMap(new Func1, Observable>>() { - @Override - public Observable> call(Response response) { - try { - ServiceResponse clientResponse = setPermissionDelegate(response); - return Observable.just(clientResponse); - } catch (Throwable t) { - return Observable.error(t); - } - } - }); - } - - /** - * Sets the permission of the file or folder. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param setPermissionFilePath The Data Lake Store path (starting with '/') of the file or directory for which to set the permission. - * @param permission A string representation of the permission (i.e 'rwx'). If empty, this property remains unchanged. - * @throws AdlsErrorException exception thrown from REST call - * @throws IOException exception thrown from serialization/deserialization - * @throws IllegalArgumentException exception thrown from invalid parameters - * @return the {@link ServiceResponse} object if successful. - */ - public ServiceResponse setPermission(String accountName, String setPermissionFilePath, String permission) throws AdlsErrorException, IOException, IllegalArgumentException { - return setPermissionAsync(accountName, setPermissionFilePath, permission).toBlocking().single(); - } - - /** - * Sets the permission of the file or folder. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param setPermissionFilePath The Data Lake Store path (starting with '/') of the file or directory for which to set the permission. - * @param permission A string representation of the permission (i.e 'rwx'). If empty, this property remains unchanged. - * @param serviceCallback the async ServiceCallback to handle successful and failed responses. - * @return the {@link ServiceCall} object - */ - public ServiceCall setPermissionAsync(String accountName, String setPermissionFilePath, String permission, final ServiceCallback serviceCallback) { - return ServiceCall.create(setPermissionAsync(accountName, setPermissionFilePath, permission), serviceCallback); - } - - /** - * Sets the permission of the file or folder. - * - * @param accountName The Azure Data Lake Store account to execute filesystem operations on. - * @param setPermissionFilePath The Data Lake Store path (starting with '/') of the file or directory for which to set the permission. - * @param permission A string representation of the permission (i.e 'rwx'). If empty, this property remains unchanged. - * @return the {@link ServiceResponse} object if successful. - */ - public Observable> setPermissionAsync(String accountName, String setPermissionFilePath, String permission) { - if (accountName == null) { - throw new IllegalArgumentException("Parameter accountName is required and cannot be null."); - } - if (this.client.adlsFileSystemDnsSuffix() == null) { - throw new IllegalArgumentException("Parameter this.client.adlsFileSystemDnsSuffix() is required and cannot be null."); - } - if (setPermissionFilePath == null) { - throw new IllegalArgumentException("Parameter setPermissionFilePath is required and cannot be null."); - } - if (this.client.apiVersion() == null) { - throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); - } - final String op = "SETPERMISSION"; - String parameterizedHost = Joiner.on(", ").join("{accountName}", accountName, "{adlsFileSystemDnsSuffix}", this.client.adlsFileSystemDnsSuffix()); - return service.setPermission(setPermissionFilePath, permission, op, this.client.apiVersion(), this.client.acceptLanguage(), parameterizedHost, this.client.userAgent()) - .flatMap(new Func1, Observable>>() { - @Override - public Observable> call(Response response) { - try { - ServiceResponse clientResponse = setPermissionDelegate(response); - return Observable.just(clientResponse); - } catch (Throwable t) { - return Observable.error(t); - } - } - }); - } - - private ServiceResponse setPermissionDelegate(Response response) throws AdlsErrorException, IOException, IllegalArgumentException { - return new AzureServiceResponseBuilder(this.client.mapperAdapter()) - .register(200, new TypeToken() { }.getType()) - .registerError(AdlsErrorException.class) - .build(response); - } - -} diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/implementation/package-info.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/implementation/package-info.java index f841c3b32617..61e54f8803eb 100644 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/implementation/package-info.java +++ b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/implementation/package-info.java @@ -5,7 +5,7 @@ // Code generated by Microsoft (R) AutoRest Code Generator. /** - * This package contains the implementation classes for DataLakeStoreFileSystemManagementClient. - * Creates an Azure Data Lake Store filesystem client. + * This package contains the implementation classes for DataLakeStoreAccountManagementClient. + * Creates an Azure Data Lake Store account management client. */ package com.microsoft.azure.management.datalake.store.implementation; diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AclStatus.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AclStatus.java deleted file mode 100644 index 5a995667054b..000000000000 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AclStatus.java +++ /dev/null @@ -1,117 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - */ - -package com.microsoft.azure.management.datalake.store.models; - -import java.util.List; - -/** - * Data Lake Store file or directory Access Control List information. - */ -public class AclStatus { - /** - * the list of ACLSpec entries on a file or directory. - */ - private List entries; - - /** - * the group owner, an AAD Object ID. - */ - private String group; - - /** - * the user owner, an AAD Object ID. - */ - private String owner; - - /** - * the indicator of whether the sticky bit is on or off. - */ - private Boolean stickyBit; - - /** - * Get the entries value. - * - * @return the entries value - */ - public List entries() { - return this.entries; - } - - /** - * Set the entries value. - * - * @param entries the entries value to set - * @return the AclStatus object itself. - */ - public AclStatus withEntries(List entries) { - this.entries = entries; - return this; - } - - /** - * Get the group value. - * - * @return the group value - */ - public String group() { - return this.group; - } - - /** - * Set the group value. - * - * @param group the group value to set - * @return the AclStatus object itself. - */ - public AclStatus withGroup(String group) { - this.group = group; - return this; - } - - /** - * Get the owner value. - * - * @return the owner value - */ - public String owner() { - return this.owner; - } - - /** - * Set the owner value. - * - * @param owner the owner value to set - * @return the AclStatus object itself. - */ - public AclStatus withOwner(String owner) { - this.owner = owner; - return this; - } - - /** - * Get the stickyBit value. - * - * @return the stickyBit value - */ - public Boolean stickyBit() { - return this.stickyBit; - } - - /** - * Set the stickyBit value. - * - * @param stickyBit the stickyBit value to set - * @return the AclStatus object itself. - */ - public AclStatus withStickyBit(Boolean stickyBit) { - this.stickyBit = stickyBit; - return this; - } - -} diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AclStatusResult.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AclStatusResult.java deleted file mode 100644 index 9a760a46f156..000000000000 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AclStatusResult.java +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - */ - -package com.microsoft.azure.management.datalake.store.models; - -import com.fasterxml.jackson.annotation.JsonProperty; - -/** - * Data Lake Store file or directory Access Control List information. - */ -public class AclStatusResult { - /** - * the AclStatus object for a given file or directory. - */ - @JsonProperty(value = "AclStatus") - private AclStatus aclStatus; - - /** - * Get the aclStatus value. - * - * @return the aclStatus value - */ - public AclStatus aclStatus() { - return this.aclStatus; - } - - /** - * Set the aclStatus value. - * - * @param aclStatus the aclStatus value to set - * @return the AclStatusResult object itself. - */ - public AclStatusResult withAclStatus(AclStatus aclStatus) { - this.aclStatus = aclStatus; - return this; - } - -} diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsAccessControlException.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsAccessControlException.java deleted file mode 100644 index c003a68f5d70..000000000000 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsAccessControlException.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - */ - -package com.microsoft.azure.management.datalake.store.models; - -import com.fasterxml.jackson.annotation.JsonTypeInfo; -import com.fasterxml.jackson.annotation.JsonTypeName; - -/** - * A WebHDFS exception thrown indicating that access is denied due to - * insufficient permissions. Thrown when a 403 error response code is - * returned (forbidden). - */ -@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, include = JsonTypeInfo.As.PROPERTY, property = "exception") -@JsonTypeName("AccessControlException") -public class AdlsAccessControlException extends AdlsRemoteException { -} diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsBadOffsetException.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsBadOffsetException.java deleted file mode 100644 index fd0ec0893481..000000000000 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsBadOffsetException.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - */ - -package com.microsoft.azure.management.datalake.store.models; - -import com.fasterxml.jackson.annotation.JsonTypeInfo; -import com.fasterxml.jackson.annotation.JsonTypeName; - -/** - * A WebHDFS exception thrown indicating the append or read is from a bad - * offset. Thrown when a 400 error response code is returned for append and - * open operations (Bad request). - */ -@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, include = JsonTypeInfo.As.PROPERTY, property = "exception") -@JsonTypeName("BadOffsetException") -public class AdlsBadOffsetException extends AdlsRemoteException { -} diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsError.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsError.java deleted file mode 100644 index fcaa8b2e6a06..000000000000 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsError.java +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - */ - -package com.microsoft.azure.management.datalake.store.models; - -import com.fasterxml.jackson.annotation.JsonProperty; - -/** - * Data Lake Store filesystem error containing a specific WebHDFS exception. - */ -public class AdlsError { - /** - * the object representing the actual WebHDFS exception being returned. - */ - @JsonProperty(value = "RemoteException", access = JsonProperty.Access.WRITE_ONLY) - private AdlsRemoteException remoteException; - - /** - * Get the remoteException value. - * - * @return the remoteException value - */ - public AdlsRemoteException remoteException() { - return this.remoteException; - } - -} diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsErrorException.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsErrorException.java deleted file mode 100644 index 4c70a5fde718..000000000000 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsErrorException.java +++ /dev/null @@ -1,87 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - */ - -package com.microsoft.azure.management.datalake.store.models; - -import com.microsoft.rest.RestException; -import retrofit2.Response; - -/** - * Exception thrown for an invalid response with AdlsError information. - */ -public class AdlsErrorException extends RestException { - /** - * Information about the associated HTTP response. - */ - private Response response; - /** - * The actual response body. - */ - private AdlsError body; - /** - * Initializes a new instance of the AdlsErrorException class. - */ - public AdlsErrorException() { } - /** - * Initializes a new instance of the AdlsErrorException class. - * - * @param message The exception message. - */ - public AdlsErrorException(final String message) { - super(message); - } - /** - * Initializes a new instance of the AdlsErrorException class. - * - * @param message the exception message - * @param cause exception that caused this exception to occur - */ - public AdlsErrorException(final String message, final Throwable cause) { - super(message, cause); - } - /** - * Initializes a new instance of the AdlsErrorException class. - * - * @param cause exception that caused this exception to occur - */ - public AdlsErrorException(final Throwable cause) { - super(cause); - } - /** - * Gets information about the associated HTTP response. - * - * @return the HTTP response - */ - public Response getResponse() { - return response; - } - /** - * Gets the HTTP response body. - * - * @return the response body - */ - public AdlsError getBody() { - return body; - } - /** - * Sets the HTTP response. - * - * @param response the HTTP response - */ - public void setResponse(Response response) { - this.response = response; - } - /** - * Sets the HTTP response body. - * - * @param body the response body - */ - public void setBody(AdlsError body) { - this.body = body; - } -} diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsFileAlreadyExistsException.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsFileAlreadyExistsException.java deleted file mode 100644 index c6c6dbdc85a2..000000000000 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsFileAlreadyExistsException.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - */ - -package com.microsoft.azure.management.datalake.store.models; - -import com.fasterxml.jackson.annotation.JsonTypeInfo; -import com.fasterxml.jackson.annotation.JsonTypeName; - -/** - * A WebHDFS exception thrown indicating the file or folder already exists. - * Thrown when a 403 error response code is returned (forbidden). - */ -@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, include = JsonTypeInfo.As.PROPERTY, property = "exception") -@JsonTypeName("FileAlreadyExistsException") -public class AdlsFileAlreadyExistsException extends AdlsRemoteException { -} diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsFileNotFoundException.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsFileNotFoundException.java deleted file mode 100644 index fc8683215f2a..000000000000 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsFileNotFoundException.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - */ - -package com.microsoft.azure.management.datalake.store.models; - -import com.fasterxml.jackson.annotation.JsonTypeInfo; -import com.fasterxml.jackson.annotation.JsonTypeName; - -/** - * A WebHDFS exception thrown indicating the file or folder could not be - * found. Thrown when a 404 error response code is returned (not found). - */ -@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, include = JsonTypeInfo.As.PROPERTY, property = "exception") -@JsonTypeName("FileNotFoundException") -public class AdlsFileNotFoundException extends AdlsRemoteException { -} diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsIOException.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsIOException.java deleted file mode 100644 index df0ea68ab12c..000000000000 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsIOException.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - */ - -package com.microsoft.azure.management.datalake.store.models; - -import com.fasterxml.jackson.annotation.JsonTypeInfo; -import com.fasterxml.jackson.annotation.JsonTypeName; - -/** - * A WebHDFS exception thrown indicating there was an IO (read or write) - * error. Thrown when a 403 error response code is returned (forbidden). - */ -@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, include = JsonTypeInfo.As.PROPERTY, property = "exception") -@JsonTypeName("IOException") -public class AdlsIOException extends AdlsRemoteException { -} diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsIllegalArgumentException.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsIllegalArgumentException.java deleted file mode 100644 index 52701e72b091..000000000000 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsIllegalArgumentException.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - */ - -package com.microsoft.azure.management.datalake.store.models; - -import com.fasterxml.jackson.annotation.JsonTypeInfo; -import com.fasterxml.jackson.annotation.JsonTypeName; - -/** - * A WebHDFS exception thrown indicating that one more arguments is incorrect. - * Thrown when a 400 error response code is returned (bad request). - */ -@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, include = JsonTypeInfo.As.PROPERTY, property = "exception") -@JsonTypeName("IllegalArgumentException") -public class AdlsIllegalArgumentException extends AdlsRemoteException { -} diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsRemoteException.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsRemoteException.java deleted file mode 100644 index 1df0e22dcf02..000000000000 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsRemoteException.java +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - */ - -package com.microsoft.azure.management.datalake.store.models; - -import com.fasterxml.jackson.annotation.JsonProperty; -import com.fasterxml.jackson.annotation.JsonTypeInfo; -import com.fasterxml.jackson.annotation.JsonTypeName; -import com.fasterxml.jackson.annotation.JsonSubTypes; - -/** - * Data Lake Store filesystem exception based on the WebHDFS definition for - * RemoteExceptions. - */ -@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, include = JsonTypeInfo.As.PROPERTY, property = "exception") -@JsonTypeName("AdlsRemoteException") -@JsonSubTypes({ - @JsonSubTypes.Type(name = "IllegalArgumentException", value = AdlsIllegalArgumentException.class), - @JsonSubTypes.Type(name = "UnsupportedOperationException", value = AdlsUnsupportedOperationException.class), - @JsonSubTypes.Type(name = "SecurityException", value = AdlsSecurityException.class), - @JsonSubTypes.Type(name = "IOException", value = AdlsIOException.class), - @JsonSubTypes.Type(name = "FileNotFoundException", value = AdlsFileNotFoundException.class), - @JsonSubTypes.Type(name = "FileAlreadyExistsException", value = AdlsFileAlreadyExistsException.class), - @JsonSubTypes.Type(name = "BadOffsetException", value = AdlsBadOffsetException.class), - @JsonSubTypes.Type(name = "RuntimeException", value = AdlsRuntimeException.class), - @JsonSubTypes.Type(name = "AccessControlException", value = AdlsAccessControlException.class) -}) -public class AdlsRemoteException { - /** - * the full class package name for the exception thrown, such as - * 'java.lang.IllegalArgumentException'. - */ - @JsonProperty(access = JsonProperty.Access.WRITE_ONLY) - private String javaClassName; - - /** - * the message associated with the exception that was thrown, such as - * 'Invalid value for webhdfs parameter "permission":...'. - */ - @JsonProperty(access = JsonProperty.Access.WRITE_ONLY) - private String message; - - /** - * Get the javaClassName value. - * - * @return the javaClassName value - */ - public String javaClassName() { - return this.javaClassName; - } - - /** - * Get the message value. - * - * @return the message value - */ - public String message() { - return this.message; - } - -} diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsRuntimeException.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsRuntimeException.java deleted file mode 100644 index 5beb92e5ad54..000000000000 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsRuntimeException.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - */ - -package com.microsoft.azure.management.datalake.store.models; - -import com.fasterxml.jackson.annotation.JsonTypeInfo; -import com.fasterxml.jackson.annotation.JsonTypeName; - -/** - * A WebHDFS exception thrown when an unexpected error occurs during an - * operation. Thrown when a 500 error response code is returned (Internal - * server error). - */ -@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, include = JsonTypeInfo.As.PROPERTY, property = "exception") -@JsonTypeName("RuntimeException") -public class AdlsRuntimeException extends AdlsRemoteException { -} diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsSecurityException.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsSecurityException.java deleted file mode 100644 index 7b6bed7049df..000000000000 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsSecurityException.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - */ - -package com.microsoft.azure.management.datalake.store.models; - -import com.fasterxml.jackson.annotation.JsonTypeInfo; -import com.fasterxml.jackson.annotation.JsonTypeName; - -/** - * A WebHDFS exception thrown indicating that access is denied. Thrown when a - * 401 error response code is returned (Unauthorized). - */ -@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, include = JsonTypeInfo.As.PROPERTY, property = "exception") -@JsonTypeName("SecurityException") -public class AdlsSecurityException extends AdlsRemoteException { -} diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsUnsupportedOperationException.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsUnsupportedOperationException.java deleted file mode 100644 index 18864c0bc01c..000000000000 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AdlsUnsupportedOperationException.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - */ - -package com.microsoft.azure.management.datalake.store.models; - -import com.fasterxml.jackson.annotation.JsonTypeInfo; -import com.fasterxml.jackson.annotation.JsonTypeName; - -/** - * A WebHDFS exception thrown indicating that the requested operation is not - * supported. Thrown when a 400 error response code is returned (bad request). - */ -@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, include = JsonTypeInfo.As.PROPERTY, property = "exception") -@JsonTypeName("UnsupportedOperationException") -public class AdlsUnsupportedOperationException extends AdlsRemoteException { -} diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/ContentSummary.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/ContentSummary.java deleted file mode 100644 index a56bf8606a8d..000000000000 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/ContentSummary.java +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - */ - -package com.microsoft.azure.management.datalake.store.models; - -import com.fasterxml.jackson.annotation.JsonProperty; - -/** - * Data Lake Store content summary information. - */ -public class ContentSummary { - /** - * the number of directories. - */ - @JsonProperty(access = JsonProperty.Access.WRITE_ONLY) - private Long directoryCount; - - /** - * the number of files. - */ - @JsonProperty(access = JsonProperty.Access.WRITE_ONLY) - private Long fileCount; - - /** - * the number of bytes used by the contet. - */ - @JsonProperty(access = JsonProperty.Access.WRITE_ONLY) - private Long length; - - /** - * the disk space consumed by the content. - */ - @JsonProperty(access = JsonProperty.Access.WRITE_ONLY) - private Long spaceConsumed; - - /** - * Get the directoryCount value. - * - * @return the directoryCount value - */ - public Long directoryCount() { - return this.directoryCount; - } - - /** - * Get the fileCount value. - * - * @return the fileCount value - */ - public Long fileCount() { - return this.fileCount; - } - - /** - * Get the length value. - * - * @return the length value - */ - public Long length() { - return this.length; - } - - /** - * Get the spaceConsumed value. - * - * @return the spaceConsumed value - */ - public Long spaceConsumed() { - return this.spaceConsumed; - } - -} diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/ContentSummaryResult.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/ContentSummaryResult.java deleted file mode 100644 index 25d65e4438f4..000000000000 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/ContentSummaryResult.java +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - */ - -package com.microsoft.azure.management.datalake.store.models; - -import com.fasterxml.jackson.annotation.JsonProperty; - -/** - * Data Lake Store filesystem content summary information response. - */ -public class ContentSummaryResult { - /** - * the content summary for the specified path. - */ - @JsonProperty(value = "ContentSummary", access = JsonProperty.Access.WRITE_ONLY) - private ContentSummary contentSummary; - - /** - * Get the contentSummary value. - * - * @return the contentSummary value - */ - public ContentSummary contentSummary() { - return this.contentSummary; - } - -} diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/DataLakeStoreAccount.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/DataLakeStoreAccount.java index 78594383187c..dbac8f76dc35 100644 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/DataLakeStoreAccount.java +++ b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/DataLakeStoreAccount.java @@ -37,6 +37,11 @@ public class DataLakeStoreAccount { @JsonProperty(access = JsonProperty.Access.WRITE_ONLY) private String id; + /** + * The Key vault encryption identity, if any. + */ + private EncryptionIdentity identity; + /** * the value of custom properties. */ @@ -105,6 +110,26 @@ public String id() { return this.id; } + /** + * Get the identity value. + * + * @return the identity value + */ + public EncryptionIdentity identity() { + return this.identity; + } + + /** + * Set the identity value. + * + * @param identity the identity value to set + * @return the DataLakeStoreAccount object itself. + */ + public DataLakeStoreAccount withIdentity(EncryptionIdentity identity) { + this.identity = identity; + return this; + } + /** * Get the tags value. * diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/DataLakeStoreAccountProperties.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/DataLakeStoreAccountProperties.java index 26fcb9c7a2f7..85875098389c 100644 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/DataLakeStoreAccountProperties.java +++ b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/DataLakeStoreAccountProperties.java @@ -36,6 +36,24 @@ public class DataLakeStoreAccountProperties { @JsonProperty(access = JsonProperty.Access.WRITE_ONLY) private DateTime creationTime; + /** + * The current state of encryption for this Data Lake store account. + * Possible values include: 'Enabled', 'Disabled'. + */ + private EncryptionState encryptionState; + + /** + * The current state of encryption provisioning for this Data Lake store + * account. Possible values include: 'Creating', 'Succeeded'. + */ + @JsonProperty(access = JsonProperty.Access.WRITE_ONLY) + private EncryptionProvisioningState encryptionProvisioningState; + + /** + * The Key vault encryption configuration. + */ + private EncryptionConfig encryptionConfig; + /** * the account last modified time. */ @@ -80,6 +98,55 @@ public DateTime creationTime() { return this.creationTime; } + /** + * Get the encryptionState value. + * + * @return the encryptionState value + */ + public EncryptionState encryptionState() { + return this.encryptionState; + } + + /** + * Set the encryptionState value. + * + * @param encryptionState the encryptionState value to set + * @return the DataLakeStoreAccountProperties object itself. + */ + public DataLakeStoreAccountProperties withEncryptionState(EncryptionState encryptionState) { + this.encryptionState = encryptionState; + return this; + } + + /** + * Get the encryptionProvisioningState value. + * + * @return the encryptionProvisioningState value + */ + public EncryptionProvisioningState encryptionProvisioningState() { + return this.encryptionProvisioningState; + } + + /** + * Get the encryptionConfig value. + * + * @return the encryptionConfig value + */ + public EncryptionConfig encryptionConfig() { + return this.encryptionConfig; + } + + /** + * Set the encryptionConfig value. + * + * @param encryptionConfig the encryptionConfig value to set + * @return the DataLakeStoreAccountProperties object itself. + */ + public DataLakeStoreAccountProperties withEncryptionConfig(EncryptionConfig encryptionConfig) { + this.encryptionConfig = encryptionConfig; + return this; + } + /** * Get the lastModifiedTime value. * diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/EncryptionConfig.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/EncryptionConfig.java new file mode 100644 index 000000000000..4947b0f8e2f4 --- /dev/null +++ b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/EncryptionConfig.java @@ -0,0 +1,69 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.management.datalake.store.models; + + +/** + * The EncryptionConfig model. + */ +public class EncryptionConfig { + /** + * The type of encryption configuration being used. Currently the only + * supported types are 'UserManaged' and 'ServiceManaged'. Possible + * values include: 'UserManaged', 'ServiceManaged'. + */ + private EncryptionConfigType type; + + /** + * The Key vault information for connecting to user managed encryption + * keys. + */ + private KeyVaultMetaInfo keyVaultMetaInfo; + + /** + * Get the type value. + * + * @return the type value + */ + public EncryptionConfigType type() { + return this.type; + } + + /** + * Set the type value. + * + * @param type the type value to set + * @return the EncryptionConfig object itself. + */ + public EncryptionConfig withType(EncryptionConfigType type) { + this.type = type; + return this; + } + + /** + * Get the keyVaultMetaInfo value. + * + * @return the keyVaultMetaInfo value + */ + public KeyVaultMetaInfo keyVaultMetaInfo() { + return this.keyVaultMetaInfo; + } + + /** + * Set the keyVaultMetaInfo value. + * + * @param keyVaultMetaInfo the keyVaultMetaInfo value to set + * @return the EncryptionConfig object itself. + */ + public EncryptionConfig withKeyVaultMetaInfo(KeyVaultMetaInfo keyVaultMetaInfo) { + this.keyVaultMetaInfo = keyVaultMetaInfo; + return this; + } + +} diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/EncryptionConfigType.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/EncryptionConfigType.java new file mode 100644 index 000000000000..7c4933b150c5 --- /dev/null +++ b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/EncryptionConfigType.java @@ -0,0 +1,53 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.management.datalake.store.models; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; + +/** + * Defines values for EncryptionConfigType. + */ +public enum EncryptionConfigType { + /** Enum value UserManaged. */ + USER_MANAGED("UserManaged"), + + /** Enum value ServiceManaged. */ + SERVICE_MANAGED("ServiceManaged"); + + /** The actual serialized value for a EncryptionConfigType instance. */ + private String value; + + EncryptionConfigType(String value) { + this.value = value; + } + + /** + * Parses a serialized value to a EncryptionConfigType instance. + * + * @param value the serialized value to parse. + * @return the parsed EncryptionConfigType object, or null if unable to parse. + */ + @JsonCreator + public static EncryptionConfigType fromString(String value) { + EncryptionConfigType[] items = EncryptionConfigType.values(); + for (EncryptionConfigType item : items) { + if (item.toString().equalsIgnoreCase(value)) { + return item; + } + } + return null; + } + + @JsonValue + @Override + public String toString() { + return this.value; + } +} diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/EncryptionIdentity.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/EncryptionIdentity.java new file mode 100644 index 000000000000..1d0b8a6a2c1d --- /dev/null +++ b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/EncryptionIdentity.java @@ -0,0 +1,74 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.management.datalake.store.models; + +import java.util.UUID; +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * The EncryptionIdentity model. + */ +public class EncryptionIdentity { + /** + * The type of encryption being used. Currently the only supported type is + * 'SystemAssigned'. Possible values include: 'SystemAssigned'. + */ + private EncryptionIdentityType type; + + /** + * The principal identifier associated with the encryption. + */ + @JsonProperty(access = JsonProperty.Access.WRITE_ONLY) + private UUID principalId; + + /** + * The tenant identifier associated with the encryption. + */ + @JsonProperty(access = JsonProperty.Access.WRITE_ONLY) + private UUID tenantId; + + /** + * Get the type value. + * + * @return the type value + */ + public EncryptionIdentityType type() { + return this.type; + } + + /** + * Set the type value. + * + * @param type the type value to set + * @return the EncryptionIdentity object itself. + */ + public EncryptionIdentity withType(EncryptionIdentityType type) { + this.type = type; + return this; + } + + /** + * Get the principalId value. + * + * @return the principalId value + */ + public UUID principalId() { + return this.principalId; + } + + /** + * Get the tenantId value. + * + * @return the tenantId value + */ + public UUID tenantId() { + return this.tenantId; + } + +} diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AppendModeType.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/EncryptionIdentityType.java similarity index 55% rename from azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AppendModeType.java rename to azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/EncryptionIdentityType.java index f0ebfbdb1c2d..874a7028b8c9 100644 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/AppendModeType.java +++ b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/EncryptionIdentityType.java @@ -12,29 +12,29 @@ import com.fasterxml.jackson.annotation.JsonValue; /** - * Defines values for AppendModeType. + * Defines values for EncryptionIdentityType. */ -public enum AppendModeType { - /** Enum value autocreate. */ - AUTOCREATE("autocreate"); +public enum EncryptionIdentityType { + /** Enum value SystemAssigned. */ + SYSTEM_ASSIGNED("SystemAssigned"); - /** The actual serialized value for a AppendModeType instance. */ + /** The actual serialized value for a EncryptionIdentityType instance. */ private String value; - AppendModeType(String value) { + EncryptionIdentityType(String value) { this.value = value; } /** - * Parses a serialized value to a AppendModeType instance. + * Parses a serialized value to a EncryptionIdentityType instance. * * @param value the serialized value to parse. - * @return the parsed AppendModeType object, or null if unable to parse. + * @return the parsed EncryptionIdentityType object, or null if unable to parse. */ @JsonCreator - public static AppendModeType fromString(String value) { - AppendModeType[] items = AppendModeType.values(); - for (AppendModeType item : items) { + public static EncryptionIdentityType fromString(String value) { + EncryptionIdentityType[] items = EncryptionIdentityType.values(); + for (EncryptionIdentityType item : items) { if (item.toString().equalsIgnoreCase(value)) { return item; } diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/EncryptionProvisioningState.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/EncryptionProvisioningState.java new file mode 100644 index 000000000000..b7c4468fa82f --- /dev/null +++ b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/EncryptionProvisioningState.java @@ -0,0 +1,53 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.management.datalake.store.models; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; + +/** + * Defines values for EncryptionProvisioningState. + */ +public enum EncryptionProvisioningState { + /** Enum value Creating. */ + CREATING("Creating"), + + /** Enum value Succeeded. */ + SUCCEEDED("Succeeded"); + + /** The actual serialized value for a EncryptionProvisioningState instance. */ + private String value; + + EncryptionProvisioningState(String value) { + this.value = value; + } + + /** + * Parses a serialized value to a EncryptionProvisioningState instance. + * + * @param value the serialized value to parse. + * @return the parsed EncryptionProvisioningState object, or null if unable to parse. + */ + @JsonCreator + public static EncryptionProvisioningState fromString(String value) { + EncryptionProvisioningState[] items = EncryptionProvisioningState.values(); + for (EncryptionProvisioningState item : items) { + if (item.toString().equalsIgnoreCase(value)) { + return item; + } + } + return null; + } + + @JsonValue + @Override + public String toString() { + return this.value; + } +} diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/FileType.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/EncryptionState.java similarity index 57% rename from azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/FileType.java rename to azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/EncryptionState.java index d897942c82f1..510e1d06b096 100644 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/FileType.java +++ b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/EncryptionState.java @@ -12,32 +12,32 @@ import com.fasterxml.jackson.annotation.JsonValue; /** - * Defines values for FileType. + * Defines values for EncryptionState. */ -public enum FileType { - /** Enum value FILE. */ - FILE("FILE"), +public enum EncryptionState { + /** Enum value Enabled. */ + ENABLED("Enabled"), - /** Enum value DIRECTORY. */ - DIRECTORY("DIRECTORY"); + /** Enum value Disabled. */ + DISABLED("Disabled"); - /** The actual serialized value for a FileType instance. */ + /** The actual serialized value for a EncryptionState instance. */ private String value; - FileType(String value) { + EncryptionState(String value) { this.value = value; } /** - * Parses a serialized value to a FileType instance. + * Parses a serialized value to a EncryptionState instance. * * @param value the serialized value to parse. - * @return the parsed FileType object, or null if unable to parse. + * @return the parsed EncryptionState object, or null if unable to parse. */ @JsonCreator - public static FileType fromString(String value) { - FileType[] items = FileType.values(); - for (FileType item : items) { + public static EncryptionState fromString(String value) { + EncryptionState[] items = EncryptionState.values(); + for (EncryptionState item : items) { if (item.toString().equalsIgnoreCase(value)) { return item; } diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/FileOperationResult.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/FileOperationResult.java deleted file mode 100644 index 4224d6fa34af..000000000000 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/FileOperationResult.java +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - */ - -package com.microsoft.azure.management.datalake.store.models; - -import com.fasterxml.jackson.annotation.JsonProperty; - -/** - * The result of the request or operation. - */ -public class FileOperationResult { - /** - * the result of the operation or request. - */ - @JsonProperty(value = "boolean", access = JsonProperty.Access.WRITE_ONLY) - private Boolean operationResult; - - /** - * Get the operationResult value. - * - * @return the operationResult value - */ - public Boolean operationResult() { - return this.operationResult; - } - -} diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/FileStatusProperties.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/FileStatusProperties.java deleted file mode 100644 index f05030cf0b7d..000000000000 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/FileStatusProperties.java +++ /dev/null @@ -1,168 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - */ - -package com.microsoft.azure.management.datalake.store.models; - -import com.fasterxml.jackson.annotation.JsonProperty; - -/** - * Data Lake Store file or directory information. - */ -public class FileStatusProperties { - /** - * the last access time as ticks since the epoch. - */ - @JsonProperty(access = JsonProperty.Access.WRITE_ONLY) - private Long accessTime; - - /** - * the block size for the file. - */ - @JsonProperty(access = JsonProperty.Access.WRITE_ONLY) - private Long blockSize; - - /** - * the number of children in the directory. - */ - @JsonProperty(access = JsonProperty.Access.WRITE_ONLY) - private Long childrenNum; - - /** - * the group owner. - */ - @JsonProperty(access = JsonProperty.Access.WRITE_ONLY) - private String group; - - /** - * the number of bytes in a file. - */ - @JsonProperty(access = JsonProperty.Access.WRITE_ONLY) - private Long length; - - /** - * the modification time as ticks since the epoch. - */ - @JsonProperty(access = JsonProperty.Access.WRITE_ONLY) - private Long modificationTime; - - /** - * the user who is the owner. - */ - @JsonProperty(access = JsonProperty.Access.WRITE_ONLY) - private String owner; - - /** - * the path suffix. - */ - @JsonProperty(access = JsonProperty.Access.WRITE_ONLY) - private String pathSuffix; - - /** - * the permission represented as an string. - */ - @JsonProperty(access = JsonProperty.Access.WRITE_ONLY) - private String permission; - - /** - * the type of the path object. Possible values include: 'FILE', - * 'DIRECTORY'. - */ - @JsonProperty(access = JsonProperty.Access.WRITE_ONLY) - private FileType type; - - /** - * Get the accessTime value. - * - * @return the accessTime value - */ - public Long accessTime() { - return this.accessTime; - } - - /** - * Get the blockSize value. - * - * @return the blockSize value - */ - public Long blockSize() { - return this.blockSize; - } - - /** - * Get the childrenNum value. - * - * @return the childrenNum value - */ - public Long childrenNum() { - return this.childrenNum; - } - - /** - * Get the group value. - * - * @return the group value - */ - public String group() { - return this.group; - } - - /** - * Get the length value. - * - * @return the length value - */ - public Long length() { - return this.length; - } - - /** - * Get the modificationTime value. - * - * @return the modificationTime value - */ - public Long modificationTime() { - return this.modificationTime; - } - - /** - * Get the owner value. - * - * @return the owner value - */ - public String owner() { - return this.owner; - } - - /** - * Get the pathSuffix value. - * - * @return the pathSuffix value - */ - public String pathSuffix() { - return this.pathSuffix; - } - - /** - * Get the permission value. - * - * @return the permission value - */ - public String permission() { - return this.permission; - } - - /** - * Get the type value. - * - * @return the type value - */ - public FileType type() { - return this.type; - } - -} diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/FileStatusResult.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/FileStatusResult.java deleted file mode 100644 index 1289ba9fd357..000000000000 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/FileStatusResult.java +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - */ - -package com.microsoft.azure.management.datalake.store.models; - -import com.fasterxml.jackson.annotation.JsonProperty; - -/** - * Data Lake Store filesystem file status information response. - */ -public class FileStatusResult { - /** - * the file status object associated with the specified path. - */ - @JsonProperty(value = "FileStatus", access = JsonProperty.Access.WRITE_ONLY) - private FileStatusProperties fileStatus; - - /** - * Get the fileStatus value. - * - * @return the fileStatus value - */ - public FileStatusProperties fileStatus() { - return this.fileStatus; - } - -} diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/FileStatuses.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/FileStatuses.java deleted file mode 100644 index ecfb3cba2e86..000000000000 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/FileStatuses.java +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - */ - -package com.microsoft.azure.management.datalake.store.models; - -import java.util.List; -import com.fasterxml.jackson.annotation.JsonProperty; - -/** - * Data Lake Store file status list information. - */ -public class FileStatuses { - /** - * the object containing the list of properties of the files. - */ - @JsonProperty(value = "FileStatus", access = JsonProperty.Access.WRITE_ONLY) - private List fileStatus; - - /** - * Get the fileStatus value. - * - * @return the fileStatus value - */ - public List fileStatus() { - return this.fileStatus; - } - -} diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/FileStatusesResult.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/FileStatusesResult.java deleted file mode 100644 index 8e8271a6fc0d..000000000000 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/FileStatusesResult.java +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for - * license information. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - */ - -package com.microsoft.azure.management.datalake.store.models; - -import com.fasterxml.jackson.annotation.JsonProperty; - -/** - * Data Lake Store filesystem file status list information response. - */ -public class FileStatusesResult { - /** - * the object representing the list of file statuses. - */ - @JsonProperty(value = "FileStatuses", access = JsonProperty.Access.WRITE_ONLY) - private FileStatuses fileStatuses; - - /** - * Get the fileStatuses value. - * - * @return the fileStatuses value - */ - public FileStatuses fileStatuses() { - return this.fileStatuses; - } - -} diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/KeyVaultMetaInfo.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/KeyVaultMetaInfo.java new file mode 100644 index 000000000000..4d843dfa206d --- /dev/null +++ b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/KeyVaultMetaInfo.java @@ -0,0 +1,92 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.management.datalake.store.models; + + +/** + * The KeyVaultMetaInfo model. + */ +public class KeyVaultMetaInfo { + /** + * The resource identifier for the user managed Key Vault being used to + * encrypt. + */ + private String keyVaultResourceId; + + /** + * The name of the user managed encryption key. + */ + private String encryptionKeyName; + + /** + * The version of the user managed encryption key. + */ + private String encryptionKeyVersion; + + /** + * Get the keyVaultResourceId value. + * + * @return the keyVaultResourceId value + */ + public String keyVaultResourceId() { + return this.keyVaultResourceId; + } + + /** + * Set the keyVaultResourceId value. + * + * @param keyVaultResourceId the keyVaultResourceId value to set + * @return the KeyVaultMetaInfo object itself. + */ + public KeyVaultMetaInfo withKeyVaultResourceId(String keyVaultResourceId) { + this.keyVaultResourceId = keyVaultResourceId; + return this; + } + + /** + * Get the encryptionKeyName value. + * + * @return the encryptionKeyName value + */ + public String encryptionKeyName() { + return this.encryptionKeyName; + } + + /** + * Set the encryptionKeyName value. + * + * @param encryptionKeyName the encryptionKeyName value to set + * @return the KeyVaultMetaInfo object itself. + */ + public KeyVaultMetaInfo withEncryptionKeyName(String encryptionKeyName) { + this.encryptionKeyName = encryptionKeyName; + return this; + } + + /** + * Get the encryptionKeyVersion value. + * + * @return the encryptionKeyVersion value + */ + public String encryptionKeyVersion() { + return this.encryptionKeyVersion; + } + + /** + * Set the encryptionKeyVersion value. + * + * @param encryptionKeyVersion the encryptionKeyVersion value to set + * @return the KeyVaultMetaInfo object itself. + */ + public KeyVaultMetaInfo withEncryptionKeyVersion(String encryptionKeyVersion) { + this.encryptionKeyVersion = encryptionKeyVersion; + return this; + } + +} diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/package-info.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/package-info.java index 409ffccbef6c..2a6ac57ad553 100644 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/package-info.java +++ b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/models/package-info.java @@ -5,7 +5,7 @@ // Code generated by Microsoft (R) AutoRest Code Generator. /** - * This package contains the models classes for DataLakeStoreFileSystemManagementClient. - * Creates an Azure Data Lake Store filesystem client. + * This package contains the models classes for DataLakeStoreAccountManagementClient. + * Creates an Azure Data Lake Store account management client. */ package com.microsoft.azure.management.datalake.store.models; diff --git a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/package-info.java b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/package-info.java index 316b5e475819..93d350292df8 100644 --- a/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/package-info.java +++ b/azure-mgmt-datalake-store/src/main/java/com/microsoft/azure/management/datalake/store/package-info.java @@ -5,7 +5,7 @@ // Code generated by Microsoft (R) AutoRest Code Generator. /** - * This package contains the classes for DataLakeStoreFileSystemManagementClient. - * Creates an Azure Data Lake Store filesystem client. + * This package contains the classes for DataLakeStoreAccountManagementClient. + * Creates an Azure Data Lake Store account management client. */ package com.microsoft.azure.management.datalake.store; diff --git a/gulpfile.js b/gulpfile.js index 2ecf5685ad9a..053161b5cec8 100644 --- a/gulpfile.js +++ b/gulpfile.js @@ -74,12 +74,6 @@ var mappings = { 'package': 'com.microsoft.azure.management.search', 'args': '-FT 1' }, - 'datalake.store.filesystem': { - 'dir': 'azure-mgmt-datalake-store', - 'source': 'arm-datalake-store/filesystem/2015-10-01-preview/swagger/filesystem.json', - 'package': 'com.microsoft.azure.management.datalake.store', - 'fluent': false - }, 'datalake.store.account': { 'dir': 'azure-mgmt-datalake-store', 'source': 'arm-datalake-store/account/2015-10-01-preview/swagger/account.json', @@ -145,7 +139,7 @@ var isMac = (process.platform.lastIndexOf('darwin') === 0); var specRoot = args['spec-root'] || "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/master"; var projects = args['projects']; -var autoRestVersion = '0.17.0-Nightly20160830'; // default +var autoRestVersion = '0.17.0-Nightly20161010'; // default if (args['autorest'] !== undefined) { autoRestVersion = args['autorest']; } From 7f80815fddfa4a3e485733487e7f3892c89793c1 Mon Sep 17 00:00:00 2001 From: begoldsm Date: Tue, 11 Oct 2016 16:29:45 -0700 Subject: [PATCH 2/3] Minimal changes due to regen of ADLS. --- .../DataLakeStoreAccountOperationsTests.java | 10 +- ...ataLakeStoreFilesystemOperationsTests.java | 767 ------------------ .../DataLakeStoreManagementTestBase.java | 4 - 3 files changed, 5 insertions(+), 776 deletions(-) delete mode 100644 azure-mgmt-datalake-store/src/test/java/com/microsoft/azure/management/datalake/store/DataLakeStoreFilesystemOperationsTests.java diff --git a/azure-mgmt-datalake-store/src/test/java/com/microsoft/azure/management/datalake/store/DataLakeStoreAccountOperationsTests.java b/azure-mgmt-datalake-store/src/test/java/com/microsoft/azure/management/datalake/store/DataLakeStoreAccountOperationsTests.java index 966b52417596..a053dfef3ebc 100644 --- a/azure-mgmt-datalake-store/src/test/java/com/microsoft/azure/management/datalake/store/DataLakeStoreAccountOperationsTests.java +++ b/azure-mgmt-datalake-store/src/test/java/com/microsoft/azure/management/datalake/store/DataLakeStoreAccountOperationsTests.java @@ -45,7 +45,7 @@ public void canCreateGetUpdateDeleteAdlsAccount() throws Exception { createParams.withTags(new HashMap()); createParams.tags().put("testkey", "testvalue"); - DataLakeStoreAccount createResponse = dataLakeStoreAccountManagementClient.accounts().create(rgName, adlsAcct, createParams).getBody(); + DataLakeStoreAccount createResponse = dataLakeStoreAccountManagementClient.accounts().create(rgName, adlsAcct, createParams); Assert.assertEquals(location, createResponse.location()); Assert.assertEquals("Microsoft.DataLakeStore/accounts", createResponse.type()); Assert.assertNotNull(createResponse.id()); @@ -55,7 +55,7 @@ public void canCreateGetUpdateDeleteAdlsAccount() throws Exception { // update the tags createParams.tags().put("testkey2", "testvalue2"); createParams.withProperties(null); - DataLakeStoreAccount updateResponse = dataLakeStoreAccountManagementClient.accounts().update(rgName, adlsAcct, createParams).getBody(); + DataLakeStoreAccount updateResponse = dataLakeStoreAccountManagementClient.accounts().update(rgName, adlsAcct, createParams); Assert.assertEquals(location, updateResponse.location()); Assert.assertEquals("Microsoft.DataLakeStore/accounts", updateResponse.type()); Assert.assertNotNull(updateResponse.id()); @@ -63,7 +63,7 @@ public void canCreateGetUpdateDeleteAdlsAccount() throws Exception { Assert.assertEquals(2, updateResponse.tags().size()); // get the account - DataLakeStoreAccount getResponse = dataLakeStoreAccountManagementClient.accounts().get(rgName, adlsAcct).getBody(); + DataLakeStoreAccount getResponse = dataLakeStoreAccountManagementClient.accounts().get(rgName, adlsAcct); Assert.assertEquals(location, getResponse.location()); Assert.assertEquals("Microsoft.DataLakeStore/accounts", getResponse.type()); Assert.assertNotNull(getResponse.id()); @@ -71,7 +71,7 @@ public void canCreateGetUpdateDeleteAdlsAccount() throws Exception { Assert.assertEquals(2, getResponse.tags().size()); // list all accounts and make sure there is one. - List listResult = dataLakeStoreAccountManagementClient.accounts().list().getBody(); + List listResult = dataLakeStoreAccountManagementClient.accounts().list(); DataLakeStoreAccount discoveredAcct = null; for (DataLakeStoreAccount acct : listResult) { if (acct.name().equals(adlsAcct)) { @@ -91,7 +91,7 @@ public void canCreateGetUpdateDeleteAdlsAccount() throws Exception { Assert.assertNull(discoveredAcct.properties().defaultGroup()); // list within a resource group - listResult = dataLakeStoreAccountManagementClient.accounts().listByResourceGroup(rgName).getBody(); + listResult = dataLakeStoreAccountManagementClient.accounts().listByResourceGroup(rgName); discoveredAcct = null; for (DataLakeStoreAccount acct : listResult) { if (acct.name().equals(adlsAcct)) { diff --git a/azure-mgmt-datalake-store/src/test/java/com/microsoft/azure/management/datalake/store/DataLakeStoreFilesystemOperationsTests.java b/azure-mgmt-datalake-store/src/test/java/com/microsoft/azure/management/datalake/store/DataLakeStoreFilesystemOperationsTests.java deleted file mode 100644 index c81e80b2d77d..000000000000 --- a/azure-mgmt-datalake-store/src/test/java/com/microsoft/azure/management/datalake/store/DataLakeStoreFilesystemOperationsTests.java +++ /dev/null @@ -1,767 +0,0 @@ -package com.microsoft.azure.management.datalake.store; - -import com.microsoft.azure.CloudException; -import com.microsoft.azure.management.datalake.store.models.AclStatusResult; -import com.microsoft.azure.management.datalake.store.models.DataLakeStoreAccount; -import com.microsoft.azure.management.datalake.store.models.FileOperationResult; -import com.microsoft.azure.management.datalake.store.models.FileStatusProperties; -import com.microsoft.azure.management.datalake.store.models.FileStatusResult; -import com.microsoft.azure.management.datalake.store.models.FileStatusesResult; -import com.microsoft.azure.management.datalake.store.models.FileType; -import com.microsoft.azure.management.resources.implementation.ResourceGroupInner; -import org.apache.commons.lang3.StringUtils; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.BufferedReader; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.Reader; -import java.io.StringWriter; -import java.io.Writer; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.UUID; - -public class DataLakeStoreFilesystemOperationsTests extends DataLakeStoreManagementTestBase { - // constants - private static String folderToCreate = "SDKTestFolder01"; - private static String fileToCreateWithContents = "SDKTestFile02.txt"; - private static String fileToCopy = "SDKTestCopyFile01.txt"; - private static String fileToConcatTo = "SDKTestConcatFile01.txt"; - - private static String fileContentsToAdd = "These are some random test contents 1234!@"; - - private static String rgName = generateName("javaadlsrg"); - private static String adlsAcct = generateName("javaadlsacct"); - - - @BeforeClass - public static void setup() throws Exception { - createClients(); - ResourceGroupInner group = new ResourceGroupInner(); - String location = "eastus2"; - group.withLocation(location); - resourceManagementClient.resourceGroups().createOrUpdate(rgName, group); - - // create storage and ADLS accounts, setting the accessKey - DataLakeStoreAccount adlsAccount = new DataLakeStoreAccount(); - adlsAccount.withLocation(location); - adlsAccount.withName(adlsAcct); - dataLakeStoreAccountManagementClient.accounts().create(rgName, adlsAcct, adlsAccount); - } - - @AfterClass - public static void cleanup() throws Exception { - try { - resourceManagementClient.resourceGroups().delete(rgName); - } - catch (Exception e) { - // ignore failures during cleanup, as it is best effort - } - } - - // tests - @Test - public void DataLakeStoreFileSystemFolderCreate() throws Exception - { - String folderPath = CreateFolder(adlsAcct, true); - GetAndCompareFileOrFolder(adlsAcct, folderPath, - FileType.DIRECTORY, 0); - } - - /* - TODO: Re-enable code when Expiry is live on the server again - @Test - public void DataLakeStoreFileSystemSetAndRemoveExpiry() - { - const long maxTimeInMilliseconds = 253402300800000; - var filePath = CreateFile(adlsAcct, false, true); - GetAndCompareFileOrFolder(adlsAcct, filePath, FileType.FILE, 0); - - // verify it does not have an expiration - var fileInfo = dataLakeStoreFileSystemManagementClient.fileSystems().GetFileInfo(adlsAcct, filePath); - Assert.assertTrue(fileInfo.FileInfo.ExpirationTime <= 0 || fileInfo.FileInfo.ExpirationTime == maxTimeInMilliseconds, "Expiration time was not equal to 0 or DateTime.MaxValue.Ticks! Actual value reported: " + fileInfo.FileInfo.ExpirationTime); - - // set the expiration time as an absolute value - - var toSetAbsolute = ToUnixTimeStampMs(HttpMockServer.GetVariable("absoluteTime", DateTime.Now.AddSeconds(120).ToString())); - dataLakeStoreFileSystemManagementClient.fileSystems().SetFileExpiry(adlsAcct, filePath, ExpiryOptionType.Absolute, toSetAbsolute); - fileInfo = dataLakeStoreFileSystemManagementClient.fileSystems().GetFileInfo(adlsAcct, filePath); - VerifyTimeInAcceptableRange(toSetAbsolute, fileInfo.FileInfo.ExpirationTime.Value); - - // set the expiration time relative to now - var toSetRelativeToNow = ToUnixTimeStampMs(HttpMockServer.GetVariable("relativeTime", DateTime.Now.AddSeconds(120).ToString())); - dataLakeStoreFileSystemManagementClient.fileSystems().SetFileExpiry(adlsAcct, filePath, ExpiryOptionType.RelativeToNow, 120 * 1000); - fileInfo = dataLakeStoreFileSystemManagementClient.fileSystems().GetFileInfo(adlsAcct, filePath); - VerifyTimeInAcceptableRange(toSetRelativeToNow, fileInfo.FileInfo.ExpirationTime.Value); - - // set expiration time relative to the creation time - var toSetRelativeCreationTime = fileInfo.FileInfo.CreationTime.Value + (120 * 1000); - dataLakeStoreFileSystemManagementClient.fileSystems().SetFileExpiry(adlsAcct, filePath, ExpiryOptionType.RelativeToCreationDate, 120 * 1000); - fileInfo = dataLakeStoreFileSystemManagementClient.fileSystems().GetFileInfo(adlsAcct, filePath); - VerifyTimeInAcceptableRange(toSetRelativeCreationTime, fileInfo.FileInfo.ExpirationTime.Value); - - // reset expiration time to never - dataLakeStoreFileSystemManagementClient.fileSystems().SetFileExpiry(adlsAcct, filePath, ExpiryOptionType.NeverExpire); - fileInfo = dataLakeStoreFileSystemManagementClient.fileSystems().GetFileInfo(adlsAcct, filePath); - Assert.assertTrue(fileInfo.FileInfo.ExpirationTime <= 0 || fileInfo.FileInfo.ExpirationTime == maxTimeInMilliseconds, "Expiration time was not equal to 0 or DateTime.MaxValue.Ticks! Actual value reported: " + fileInfo.FileInfo.ExpirationTime); - } - - @Test - public void DataLakeStoreFileSystemNegativeExpiry() - { - const long maxTimeInMilliseconds = 253402300800000; - var filePath = CreateFile(adlsAcct, false, true); - GetAndCompareFileOrFolder(adlsAcct, filePath, FileType.FILE, 0); - - // verify it does not have an expiration - var fileInfo = dataLakeStoreFileSystemManagementClient.fileSystems().GetFileInfo(adlsAcct, filePath); - Assert.assertTrue(fileInfo.FileInfo.ExpirationTime <= 0 || fileInfo.FileInfo.ExpirationTime == maxTimeInMilliseconds, "Expiration time was not equal to 0 or DateTime.MaxValue.Ticks! Actual value reported: " + fileInfo.FileInfo.ExpirationTime); - - // set the expiration time as an absolute value that is less than the creation time - var toSetAbsolute = ToUnixTimeStampMs(HttpMockServer.GetVariable("absoluteNegativeTime", DateTime.Now.AddSeconds(-120).ToString())); - Assert.assertThrows(() => dataLakeStoreFileSystemManagementClient.fileSystems().SetFileExpiry(adlsAcct, filePath, ExpiryOptionType.Absolute, toSetAbsolute)); - - // set the expiration time as an absolute value that is greater than max allowed time - toSetAbsolute = ToUnixTimeStampMs(DateTime.MaxValue.ToString()) + 1000; - Assert.assertThrows(() => dataLakeStoreFileSystemManagementClient.fileSystems().SetFileExpiry(adlsAcct, filePath, ExpiryOptionType.Absolute, toSetAbsolute)); - - // reset expiration time to never with a value and confirm the value is not honored - dataLakeStoreFileSystemManagementClient.fileSystems().SetFileExpiry(adlsAcct, filePath, ExpiryOptionType.NeverExpire, 400); - fileInfo = dataLakeStoreFileSystemManagementClient.fileSystems().GetFileInfo(adlsAcct, filePath); - Assert.assertTrue(fileInfo.FileInfo.ExpirationTime <= 0 || fileInfo.FileInfo.ExpirationTime == maxTimeInMilliseconds, "Expiration time was not equal to 0 or DateTime.MaxValue.Ticks! Actual value reported: " + fileInfo.FileInfo.ExpirationTime); - } - */ - - @Test - public void DataLakeStoreFileSystemListFolderContents() throws Exception - { - String folderPath = CreateFolder(adlsAcct, true); - GetAndCompareFileOrFolder(adlsAcct, folderPath, - FileType.DIRECTORY, 0); - - String filePath = CreateFile(adlsAcct, false, true, folderPath); - GetAndCompareFileOrFolder(adlsAcct, filePath, FileType.FILE, 0); - - // List all the contents in the folder - FileStatusesResult listFolderResponse = dataLakeStoreFileSystemManagementClient.fileSystems().listFileStatus(adlsAcct, folderPath).getBody(); - - // We know that this directory is brand new, so the contents should only be the one file. - Assert.assertEquals(1, listFolderResponse.fileStatuses().fileStatus().size()); - Assert.assertEquals(FileType.FILE, listFolderResponse.fileStatuses().fileStatus().get(0).type()); - } - - @Test - public void DataLakeStoreFileSystemEmptyFileCreate() throws Exception - { - String filePath = CreateFile(adlsAcct, false, true, folderToCreate); - GetAndCompareFileOrFolder(adlsAcct, filePath, FileType.FILE, 0); - } - - @Test - public void DataLakeStoreFileSystemFileCreateWithContents() throws Exception - { - String filePath = CreateFile(adlsAcct, true, true, folderToCreate); - GetAndCompareFileOrFolder(adlsAcct, filePath, FileType.FILE, - fileContentsToAdd.length()); - CompareFileContents(adlsAcct, filePath, - fileContentsToAdd); - } - - @Test - public void DataLakeStoreFileSystemAppendToFile() throws Exception - { - String filePath = CreateFile(adlsAcct, false, true, folderToCreate); - GetAndCompareFileOrFolder(adlsAcct, filePath, FileType.FILE, 0); - - // Append to the file that we created - String fileContentsToAppend = "More test contents, that were appended!"; - dataLakeStoreFileSystemManagementClient.fileSystems().append(adlsAcct, filePath, fileContentsToAppend.getBytes()); - - GetAndCompareFileOrFolder(adlsAcct, filePath, FileType.FILE, - fileContentsToAppend.length()); - } - - @Test - public void DataLakeStoreFileSystemConcatenateFiles() throws Exception - { - String filePath1 = CreateFile(adlsAcct, true, true, folderToCreate); - GetAndCompareFileOrFolder(adlsAcct, filePath1, FileType.FILE, - fileContentsToAdd.length()); - - String filePath2 = CreateFile(adlsAcct, true, true, folderToCreate); - GetAndCompareFileOrFolder(adlsAcct, filePath2, FileType.FILE, - fileContentsToAdd.length()); - - String targetFolder = CreateFolder(adlsAcct, true); - - dataLakeStoreFileSystemManagementClient.fileSystems().concat( - adlsAcct, - String.format("%s/%s", targetFolder, fileToConcatTo), - Arrays.asList(new String[]{filePath1, filePath2}) - ); - - GetAndCompareFileOrFolder(adlsAcct, - String.format("%s/%s", targetFolder, fileToConcatTo), - FileType.FILE, - fileContentsToAdd.length() * 2); - - // Attempt to get the files that were concatted together, which should fail and throw - try { - dataLakeStoreFileSystemManagementClient.fileSystems().getFileStatus(adlsAcct, filePath1); - Assert.assertTrue("Able to get the old file after concat", false); - } - catch (Exception e) { - Assert.assertTrue(e instanceof CloudException); - } - - try { - dataLakeStoreFileSystemManagementClient.fileSystems().getFileStatus(adlsAcct, filePath2); - Assert.assertTrue("Able to get the old file after concat", false); - } - catch (Exception e) { - Assert.assertTrue(e instanceof CloudException); - } - } - - @Test - public void DataLakeStoreFileSystemMsConcatenateFiles() throws Exception - { - String filePath1 = CreateFile(adlsAcct, true, true, folderToCreate); - GetAndCompareFileOrFolder(adlsAcct, filePath1, FileType.FILE, - fileContentsToAdd.length()); - - String filePath2 = CreateFile(adlsAcct, true, true, folderToCreate); - GetAndCompareFileOrFolder(adlsAcct, filePath2, FileType.FILE, - fileContentsToAdd.length()); - - String targetFolder = CreateFolder(adlsAcct, true); - - dataLakeStoreFileSystemManagementClient.fileSystems().msConcat( - adlsAcct, - String.format("%s/%s", targetFolder, fileToConcatTo), - String.format("sources=%s,%s", filePath1, filePath2).getBytes(), - false); - - GetAndCompareFileOrFolder(adlsAcct, - String.format("%s/%s", targetFolder, fileToConcatTo), - FileType.FILE, - fileContentsToAdd.length() * 2); - - // Attempt to get the files that were concatted together, which should fail and throw - try { - dataLakeStoreFileSystemManagementClient.fileSystems().getFileStatus(adlsAcct, filePath1); - Assert.assertTrue("Able to get the old file after concat", false); - } - catch (Exception e) { - Assert.assertTrue(e instanceof CloudException); - } - - try { - dataLakeStoreFileSystemManagementClient.fileSystems().getFileStatus(adlsAcct, filePath2); - Assert.assertTrue("Able to get the old file after concat", false); - } - catch (Exception e) { - Assert.assertTrue(e instanceof CloudException); - } - } - - @Test - public void DataLakeStoreFileSystemMsConcatDeleteDir() throws Exception - { - String concatFolderPath = String.format("%s/%s", folderToCreate, - "msconcatFolder"); - String filePath1 = CreateFile(adlsAcct, true, true, - concatFolderPath); - GetAndCompareFileOrFolder(adlsAcct, filePath1, FileType.FILE, - fileContentsToAdd.length()); - - String filePath2 = CreateFile(adlsAcct, true, true, - concatFolderPath); - GetAndCompareFileOrFolder(adlsAcct, filePath2, FileType.FILE, - fileContentsToAdd.length()); - - String targetFolder = CreateFolder(adlsAcct, true); - - String destination = String.format("%s/%s", targetFolder, fileToConcatTo); - - dataLakeStoreFileSystemManagementClient.fileSystems().msConcat( - adlsAcct, - destination, - String.format("sources=%s,%s", filePath1, filePath2).getBytes(), - true); - - GetAndCompareFileOrFolder(adlsAcct, - String.format("%s/%s", targetFolder, fileToConcatTo), - FileType.FILE, - fileContentsToAdd.length()*2); - - // Attempt to get the files that were concatted together, which should fail and throw - try { - dataLakeStoreFileSystemManagementClient.fileSystems().getFileStatus(adlsAcct, filePath1); - Assert.assertTrue("Able to get the old file after concat", false); - } - catch (Exception e) { - Assert.assertTrue(e instanceof CloudException); - } - - try { - dataLakeStoreFileSystemManagementClient.fileSystems().getFileStatus(adlsAcct, filePath2); - Assert.assertTrue("Able to get the old file after concat", false); - } - catch (Exception e) { - Assert.assertTrue(e instanceof CloudException); - } - - // Attempt to get the folder that was created for concat, which should fail and be deleted. - try { - dataLakeStoreFileSystemManagementClient.fileSystems().getFileStatus(adlsAcct, concatFolderPath); - Assert.assertTrue("Able to get the old folder after concat", false); - } - catch (Exception e) { - Assert.assertTrue(e instanceof CloudException); - } - } - - @Test - public void DataLakeStoreFileSystemMoveFileAndFolder() throws Exception - { - String filePath = CreateFile(adlsAcct, true, true, folderToCreate); - GetAndCompareFileOrFolder(adlsAcct, filePath, FileType.FILE, - fileContentsToAdd.length()); - - String targetFolder1 = CreateFolder(adlsAcct, true); - String folderToMove = "SDKTestMoveFolder01"; - String targetFolder2 = generateName(folderToMove); - - // Move file first - String fileToMove = "SDKTestMoveFile01.txt"; - FileOperationResult moveFileResponse = dataLakeStoreFileSystemManagementClient.fileSystems().rename( - adlsAcct, - filePath, - String.format("%s/%s", targetFolder1, fileToMove)).getBody(); - Assert.assertTrue(moveFileResponse.operationResult()); - GetAndCompareFileOrFolder(adlsAcct, - String.format("%s/%s", targetFolder1, fileToMove), - FileType.FILE, - fileContentsToAdd.length()); - - // Ensure the old file is gone - try { - dataLakeStoreFileSystemManagementClient.fileSystems().getFileStatus(adlsAcct, filePath); - Assert.assertTrue("Able to get the old file after rename", false); - } - catch (Exception e) { - Assert.assertTrue(e instanceof CloudException); - } - - // Now move folder completely. - FileOperationResult moveFolderResponse = dataLakeStoreFileSystemManagementClient.fileSystems().rename( - adlsAcct, - targetFolder1, - targetFolder2).getBody(); - Assert.assertTrue(moveFolderResponse.operationResult()); - - GetAndCompareFileOrFolder(adlsAcct, targetFolder2, - FileType.DIRECTORY, 0); - - // ensure all the contents of the folder moved - // List all the contents in the folder - FileStatusesResult listFolderResponse = dataLakeStoreFileSystemManagementClient.fileSystems().listFileStatus( - adlsAcct, - targetFolder2).getBody(); - - // We know that this directory is brand new, so the contents should only be the one file. - Assert.assertEquals(1, listFolderResponse.fileStatuses().fileStatus().size()); - Assert.assertEquals(FileType.FILE, listFolderResponse.fileStatuses().fileStatus().get(0).type()); - - try { - dataLakeStoreFileSystemManagementClient.fileSystems().getFileStatus(adlsAcct, targetFolder1); - Assert.assertTrue("Able to get the old folder after rename", false); - } - catch (Exception e) { - Assert.assertTrue(e instanceof CloudException); - } - } - - @Test - public void DataLakeStoreFileSystemDeleteFolder() throws Exception - { - String folderPath = CreateFolder(adlsAcct, true); - GetAndCompareFileOrFolder(adlsAcct, folderPath, - FileType.DIRECTORY, 0); - DeleteFolder(adlsAcct, folderPath, true, false); - //WORK AROUND: Bug 4717659 makes it so even empty folders have contents. - - // delete again expecting failure. - DeleteFolder(adlsAcct, folderPath, false, true); - - // delete a folder with contents - String filePath = CreateFile(adlsAcct, true, true, folderPath); - GetAndCompareFileOrFolder(adlsAcct, filePath, FileType.FILE, - fileContentsToAdd.length()); - - // should fail if recurse is not set - DeleteFolder(adlsAcct, folderPath, false, true); - - // Now actually delete - DeleteFolder(adlsAcct, folderPath, true, false); - - // delete again expecting failure. - DeleteFolder(adlsAcct, folderPath, true, true); - } - - @Test - public void DataLakeStoreFileSystemDeleteFile() throws Exception - { - String filePath = CreateFile(adlsAcct, true, true, folderToCreate); - GetAndCompareFileOrFolder(adlsAcct, filePath, FileType.FILE, - fileContentsToAdd.length()); - DeleteFile(adlsAcct, filePath, false); - - // try to delete it again, which should fail - DeleteFile(adlsAcct, filePath, true); - } - - @Test - public void DataLakeStoreFileSystemGetAndSetAcl() throws Exception - { - AclStatusResult currentAcl = dataLakeStoreFileSystemManagementClient.fileSystems().getAclStatus(adlsAcct, "/").getBody(); - - List aclToReplaceWith = new ArrayList(currentAcl.aclStatus().entries()); - String originalOther = ""; - String toReplace = "other::rwx"; - for (int i = 0; i < aclToReplaceWith.size(); i++) - { - if (aclToReplaceWith.get(i).startsWith("other")) - { - originalOther = aclToReplaceWith.get(i); - aclToReplaceWith.set(i, toReplace); - break; - } - } - - Assert.assertFalse(originalOther == null || StringUtils.isEmpty(originalOther)); - - // Set the other acl to RWX - dataLakeStoreFileSystemManagementClient.fileSystems().setAcl(adlsAcct, "/", - StringUtils.join(aclToReplaceWith, ",")); - - AclStatusResult newAcl = dataLakeStoreFileSystemManagementClient.fileSystems().getAclStatus(adlsAcct, "/").getBody(); - // verify the ACL actually changed - - // Check the access first and assert that it returns OK (note: this is currently only for the user making the request, so it is not testing "other") - dataLakeStoreFileSystemManagementClient.fileSystems().checkAccess( - adlsAcct, - "/", - "rwx"); - - boolean foundIt = false; - for (String entry: newAcl.aclStatus().entries()) - { - if(entry.startsWith("other")) { - Assert.assertEquals(toReplace, entry); - foundIt = true; - break; - } - } - - Assert.assertTrue(foundIt); - - // Set it back using specific entry - dataLakeStoreFileSystemManagementClient.fileSystems().modifyAclEntries( - adlsAcct, - "/", - originalOther); - - // Now confirm that it equals the original ACL - List finalEntries = dataLakeStoreFileSystemManagementClient.fileSystems().getAclStatus(adlsAcct, "/").getBody() - .aclStatus().entries(); - for (String entry: finalEntries) - { - boolean found = false; - for(String curEntry: currentAcl.aclStatus().entries()) { - if(curEntry.toUpperCase().equals(entry.toUpperCase())) { - found = true; - break; - } - } - - Assert.assertTrue(found); - } - - Assert.assertEquals(finalEntries.size(), currentAcl.aclStatus().entries().size()); - } - - @Test - public void DataLakeStoreFileSystemSetFileProperties() throws Exception - { - // This test simply tests that all bool/empty return actions return successfully - - String filePath = CreateFile(adlsAcct, true, true, folderToCreate); - FileStatusProperties originalFileStatus = - dataLakeStoreFileSystemManagementClient.fileSystems().getFileStatus(adlsAcct, filePath).getBody().fileStatus(); - // TODO: Set replication on file, this has been removed until it is confirmed as a supported API. - /* - var replicationResponse = dataLakeStoreFileSystemManagementClient.fileSystems().SetReplication(adlsAcct, filePath, 3); - Assert.assertTrue(replicationResponse.Boolean); - */ - - /* - * This API is available but all values put into it are ignored. Commenting this out until this API is fully functional. - Assert.assertEquals(3, - dataLakeFileSystemClient.FileSystem.getFileStatus(filePath) - .FileStatus.Replication); - */ - - // set the time on the file - // We use a static date for now since we aren't interested in whether the value is set properly, only that the method returns a 200. - /* TODO: Re enable once supported. - var timeToSet = new DateTime(2015, 10, 26, 14, 30, 0).Ticks; - dataLakeStoreFileSystemManagementClient.fileSystems().SetTimes(adlsAcct, filePath, timeToSet, timeToSet); - - var fileStatusAfterTime = - dataLakeStoreFileSystemManagementClient.fileSystems().getFileStatus(adlsAcct, filePath).FileStatus; - */ - - /* - * This API is available but all values put into it are ignored. Commenting this out until this API is fully functional. - Assert.assertTrue( - fileStatusAfterTime.ModificationTime == timeToSet && fileStatusAfterTime.AccessTime == timeToSet); - */ - - // TODO: Symlink creation is explicitly not supported, but when it is this should be enabled. - /* - var symLinkName = generateName("testPath/symlinktest1"); - Assert.assertThrows(() => dataLakeStoreFileSystemManagementClient.fileSystems().CreateSymLink(adlsAcct, filePath, symLinkName, true)); - */ - - // Once symlinks are available, remove the throws test and uncomment out this code. - // Assert.assertTrue(createSymLinkResponse.StatusCode == HttpStatusCode.OK); - // Assert.assertDoesNotThrow(() => dataLakeFileSystemClient.FileSystem.getFileStatus(symLinkName)); - } - - @Test - public void DataLakeStoreFileSystemGetAcl() throws Exception - { - AclStatusResult aclGetResponse = dataLakeStoreFileSystemManagementClient.fileSystems().getAclStatus(adlsAcct, "/").getBody(); - - Assert.assertNotNull(aclGetResponse.aclStatus()); - Assert.assertTrue(aclGetResponse.aclStatus().entries().size() > 0); - Assert.assertTrue(aclGetResponse.aclStatus().owner() != null && StringUtils.isNotEmpty(aclGetResponse.aclStatus().owner())); - Assert.assertTrue(aclGetResponse.aclStatus().group() != null && StringUtils.isNotEmpty(aclGetResponse.aclStatus().group())); - } - - @Test - public void DataLakeStoreFileSystemSetAcl() throws Exception - { - AclStatusResult aclGetResponse = dataLakeStoreFileSystemManagementClient.fileSystems().getAclStatus(adlsAcct, "/").getBody(); - - Assert.assertNotNull(aclGetResponse.aclStatus()); - Assert.assertTrue(aclGetResponse.aclStatus().entries().size() > 0); - - int currentCount = aclGetResponse.aclStatus().entries().size(); - - // add an entry to the ACL Entries - String newAcls = StringUtils.join(aclGetResponse.aclStatus().entries(), ","); - String aclUserId = UUID.randomUUID().toString(); - newAcls += String.format(",user:%s:rwx", aclUserId); - - dataLakeStoreFileSystemManagementClient.fileSystems().setAcl(adlsAcct, - "/", - newAcls); - - // retrieve the ACL again and confirm the new entry is present - aclGetResponse = dataLakeStoreFileSystemManagementClient.fileSystems().getAclStatus(adlsAcct, "/").getBody(); - - Assert.assertNotNull(aclGetResponse.aclStatus()); - Assert.assertTrue(aclGetResponse.aclStatus().entries().size() > 0); - Assert.assertEquals(currentCount + 1, aclGetResponse.aclStatus().entries().size()); - - boolean found = false; - for (String entry: aclGetResponse.aclStatus().entries()) { - if(entry.contains(aclUserId)) { - found = true; - break; - } - } - - Assert.assertTrue(found); - } - - @Test - public void DataLakeStoreFileSystemSetDeleteAclEntry() throws Exception - { - AclStatusResult aclGetResponse = dataLakeStoreFileSystemManagementClient.fileSystems().getAclStatus(adlsAcct, "/").getBody(); - - Assert.assertNotNull(aclGetResponse.aclStatus()); - Assert.assertTrue(aclGetResponse.aclStatus().entries().size() > 0); - - int currentCount = aclGetResponse.aclStatus().entries().size(); - // add an entry to the ACL Entries - String aclUserId = UUID.randomUUID().toString(); - String newAce = String.format("user:%s:rwx", aclUserId); - - dataLakeStoreFileSystemManagementClient.fileSystems().modifyAclEntries(adlsAcct, "", - newAce); - - // retrieve the ACL again and confirm the new entry is present - aclGetResponse = dataLakeStoreFileSystemManagementClient.fileSystems().getAclStatus(adlsAcct, "/").getBody(); - - Assert.assertNotNull(aclGetResponse.aclStatus()); - Assert.assertTrue(aclGetResponse.aclStatus().entries().size() > 0); - Assert.assertEquals(currentCount + 1, aclGetResponse.aclStatus().entries().size()); - - boolean found = false; - for (String entry: aclGetResponse.aclStatus().entries()) { - if(entry.contains(aclUserId)) { - found = true; - break; - } - } - - Assert.assertTrue(found); - - // now remove the entry - String aceToRemove = String.format(",user:%s", aclUserId); - dataLakeStoreFileSystemManagementClient.fileSystems().removeAclEntries( - adlsAcct, - "/", - aceToRemove); - - // retrieve the ACL again and confirm the new entry is present - aclGetResponse = dataLakeStoreFileSystemManagementClient.fileSystems().getAclStatus(adlsAcct, "/").getBody(); - - Assert.assertNotNull(aclGetResponse.aclStatus()); - Assert.assertTrue(aclGetResponse.aclStatus().entries().size() > 0); - Assert.assertEquals(currentCount, aclGetResponse.aclStatus().entries().size()); - - found = false; - for (String entry: aclGetResponse.aclStatus().entries()) { - if(entry.contains(aclUserId)) { - found = true; - break; - } - } - - Assert.assertFalse(found); - } - - // helper methods - private String CreateFolder(String caboAccountName, boolean randomName) throws Exception - { - // Create a folder - String folderPath = randomName - ? generateName(folderToCreate) - : folderToCreate; - - FileOperationResult response = dataLakeStoreFileSystemManagementClient.fileSystems().mkdirs(caboAccountName, folderPath).getBody(); - Assert.assertTrue(response.operationResult()); - - return folderPath; - } - - private String CreateFile(String caboAccountName, boolean withContents, boolean randomName, String folderName) throws Exception - { - String fileToCreate = "SDKTestFile01.txt"; - String filePath = randomName ? generateName(String.format("%s/%s", folderName, fileToCreate)) : String.format("%s/%s", folderName, fileToCreate); - - if (!withContents) - { - dataLakeStoreFileSystemManagementClient.fileSystems().create( - caboAccountName, - filePath); - } - else - { - dataLakeStoreFileSystemManagementClient.fileSystems().create( - caboAccountName, - filePath, - fileContentsToAdd.getBytes(), - true); - } - - return filePath; - } - - private FileStatusResult GetAndCompareFileOrFolder(String caboAccountName, String fileOrFolderPath, FileType expectedType, long expectedLength) throws Exception - { - FileStatusResult getResponse = dataLakeStoreFileSystemManagementClient.fileSystems().getFileStatus(caboAccountName, fileOrFolderPath).getBody(); - Assert.assertEquals(expectedLength, (long) getResponse.fileStatus().length()); - Assert.assertEquals(expectedType, getResponse.fileStatus().type()); - - return getResponse; - } - - private void CompareFileContents(String caboAccountName, String filePath, String expectedContents) throws Exception - { - // download a file and ensure they are equal - InputStream openResponse = dataLakeStoreFileSystemManagementClient.fileSystems().open(caboAccountName, filePath).getBody(); - Assert.assertNotNull(openResponse); - Writer writer = new StringWriter(); - char[] buffer = new char[1024]; - try { - Reader reader = new BufferedReader( - new InputStreamReader(openResponse, "UTF-8")); - int n; - while ((n = reader.read(buffer)) != -1) { - writer.write(buffer, 0, n); - } - } - finally { - openResponse.close(); - } - String fileContents = writer.toString(); - Assert.assertEquals(expectedContents, fileContents); - } - - private void DeleteFolder(String caboAccountName, String folderPath, boolean recursive, boolean failureExpected) throws Exception - { - if (failureExpected) - { - // try to delete a folder that doesn't exist or should fail - try - { - FileOperationResult deleteFolderResponse = dataLakeStoreFileSystemManagementClient.fileSystems().delete(caboAccountName, folderPath, recursive).getBody(); - Assert.assertTrue(!deleteFolderResponse.operationResult()); - } - catch (Exception e) - { - Assert.assertTrue(e instanceof CloudException); - } - } - else - { - // Delete a folder - FileOperationResult deleteFolderResponse = dataLakeStoreFileSystemManagementClient.fileSystems().delete(caboAccountName, folderPath, recursive).getBody(); - Assert.assertTrue(deleteFolderResponse.operationResult()); - } - } - - private void DeleteFile(String caboAccountName, String filePath, boolean failureExpected) throws Exception - { - if (failureExpected) - { - // try to delete a file that doesn't exist - try - { - FileOperationResult deleteFileResponse = dataLakeStoreFileSystemManagementClient.fileSystems().delete(caboAccountName, filePath, false).getBody(); - Assert.assertTrue(!deleteFileResponse.operationResult()); - } - catch (Exception e) - { - Assert.assertTrue(e instanceof CloudException); - } - } - else - { - // Delete a file - FileOperationResult deleteFileResponse = dataLakeStoreFileSystemManagementClient.fileSystems().delete(caboAccountName, filePath, false).getBody(); - Assert.assertTrue(deleteFileResponse.operationResult()); - } - } -} diff --git a/azure-mgmt-datalake-store/src/test/java/com/microsoft/azure/management/datalake/store/DataLakeStoreManagementTestBase.java b/azure-mgmt-datalake-store/src/test/java/com/microsoft/azure/management/datalake/store/DataLakeStoreManagementTestBase.java index 0fca86a3de9b..ab5f99b139d2 100644 --- a/azure-mgmt-datalake-store/src/test/java/com/microsoft/azure/management/datalake/store/DataLakeStoreManagementTestBase.java +++ b/azure-mgmt-datalake-store/src/test/java/com/microsoft/azure/management/datalake/store/DataLakeStoreManagementTestBase.java @@ -3,7 +3,6 @@ import com.microsoft.azure.AzureEnvironment; import com.microsoft.azure.credentials.UserTokenCredentials; import com.microsoft.azure.management.datalake.store.implementation.DataLakeStoreAccountManagementClientImpl; -import com.microsoft.azure.management.datalake.store.implementation.DataLakeStoreFileSystemManagementClientImpl; import com.microsoft.azure.management.resources.implementation.ResourceManagementClientImpl; import com.microsoft.azure.RestClient; import okhttp3.logging.HttpLoggingInterceptor; @@ -11,7 +10,6 @@ public abstract class DataLakeStoreManagementTestBase { protected static ResourceManagementClientImpl resourceManagementClient; protected static DataLakeStoreAccountManagementClientImpl dataLakeStoreAccountManagementClient; - protected static DataLakeStoreFileSystemManagementClientImpl dataLakeStoreFileSystemManagementClient; public static void createClients() { UserTokenCredentials credentials = new UserTokenCredentials( @@ -32,8 +30,6 @@ public static void createClients() { dataLakeStoreAccountManagementClient = new DataLakeStoreAccountManagementClientImpl(restClient); dataLakeStoreAccountManagementClient.withSubscriptionId(System.getenv("arm.subscriptionid")); - - dataLakeStoreFileSystemManagementClient = new DataLakeStoreFileSystemManagementClientImpl(restClient); } public static String generateName(String prefix) { From aae876e168f26d08ee9efac9e24d873b7e24a735 Mon Sep 17 00:00:00 2001 From: begoldsm Date: Tue, 11 Oct 2016 16:44:29 -0700 Subject: [PATCH 3/3] missed uploader in pom. --- pom.xml | 1 - 1 file changed, 1 deletion(-) diff --git a/pom.xml b/pom.xml index 5480c588bccd..701dfaa71b65 100644 --- a/pom.xml +++ b/pom.xml @@ -312,7 +312,6 @@ ./azure-mgmt-website ./azure-mgmt-datalake-analytics ./azure-mgmt-datalake-store - ./azure-mgmt-datalake-store-uploader ./azure-samples ./azure-mgmt-redis ./azure-mgmt-search