From 6e02c1e6ad0c63bb656148ed060fa0e19b347dfb Mon Sep 17 00:00:00 2001 From: begoldsm Date: Wed, 27 Apr 2016 14:23:05 -0700 Subject: [PATCH 1/2] Committing all change based on tests --- .../DataLakeAnalyticsManagementTestBase.java | 2 +- .../DataLakeStoreFrontEndAdapterImpl.java | 18 +- .../store/uploader/DataLakeStoreUploader.java | 16 +- .../uploader/DataLakeUploaderTestBase.java | 48 +++++ .../uploader/PerformanceUploadTests.java | 170 ++++++++++++++++++ 5 files changed, 245 insertions(+), 9 deletions(-) create mode 100644 azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/DataLakeUploaderTestBase.java create mode 100644 azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/PerformanceUploadTests.java diff --git a/azure-mgmt-datalake-analytics/src/test/java/com/microsoft/azure/management/datalake/analytics/DataLakeAnalyticsManagementTestBase.java b/azure-mgmt-datalake-analytics/src/test/java/com/microsoft/azure/management/datalake/analytics/DataLakeAnalyticsManagementTestBase.java index 75abb087d5633..33d84ca2e16db 100644 --- a/azure-mgmt-datalake-analytics/src/test/java/com/microsoft/azure/management/datalake/analytics/DataLakeAnalyticsManagementTestBase.java +++ b/azure-mgmt-datalake-analytics/src/test/java/com/microsoft/azure/management/datalake/analytics/DataLakeAnalyticsManagementTestBase.java @@ -110,7 +110,7 @@ public static void runJobToCompletion(DataLakeAnalyticsJobManagementClient jobCl JobInformation getJobResponse = jobClient.getJobOperations().get(adlaAcct, jobCreateResponse.getJobId()).getBody(); Assert.assertNotNull(getJobResponse); - int maxWaitInSeconds = 180; // 3 minutes should be long enough + int maxWaitInSeconds = 2700; // giving it 45 minutes for now. int curWaitInSeconds = 0; while (getJobResponse.getState() != JobState.ENDED && curWaitInSeconds < maxWaitInSeconds) diff --git a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/DataLakeStoreFrontEndAdapterImpl.java b/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/DataLakeStoreFrontEndAdapterImpl.java index a9e2a5eafc7e7..ce550279ab733 100644 --- a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/DataLakeStoreFrontEndAdapterImpl.java +++ b/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/DataLakeStoreFrontEndAdapterImpl.java @@ -9,7 +9,11 @@ import com.microsoft.azure.management.datalake.store.DataLakeStoreFileSystemManagementClient; import com.microsoft.azure.management.datalake.store.models.FileStatusResult; +import org.apache.commons.lang3.StringUtils; + import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.text.MessageFormat; /** * A front end adapter that communicates with the DataLake Store. @@ -52,7 +56,7 @@ public void CreateStream(String streamPath, boolean overwrite, byte[] data, int toCreate = new byte[byteCount]; System.arraycopy(data, 0, toCreate, 0, byteCount); } - _client.getFileSystemOperations().create(streamPath, _accountName, data, overwrite); + _client.getFileSystemOperations().create(_accountName, streamPath , data, overwrite); } /** @@ -64,7 +68,7 @@ public void CreateStream(String streamPath, boolean overwrite, byte[] data, int * @throws CloudException */ public void DeleteStream(String streamPath, boolean recurse) throws IOException, CloudException { - _client.getFileSystemOperations().delete(streamPath, _accountName, recurse); + _client.getFileSystemOperations().delete(_accountName, streamPath, recurse); } /** @@ -80,7 +84,7 @@ public void DeleteStream(String streamPath, boolean recurse) throws IOException, public void AppendToStream(String streamPath, byte[] data, long offset, int byteCount) throws IOException, CloudException { byte[] toAppend = new byte[byteCount]; System.arraycopy(data, 0, toAppend, 0, byteCount); - _client.getFileSystemOperations().append(streamPath, _accountName, toAppend); + _client.getFileSystemOperations().append(_accountName, streamPath, toAppend); } /** @@ -93,7 +97,7 @@ public void AppendToStream(String streamPath, byte[] data, long offset, int byte */ public boolean StreamExists(String streamPath) throws CloudException, IOException { try { - _client.getFileSystemOperations().getFileStatus(streamPath, _accountName); + _client.getFileSystemOperations().getFileStatus(_accountName, streamPath); } catch (CloudException cloudEx) { if (cloudEx.getResponse().code() == 404) { return false; @@ -114,7 +118,7 @@ public boolean StreamExists(String streamPath) throws CloudException, IOExceptio * @throws CloudException */ public long GetStreamLength(String streamPath) throws IOException, CloudException { - FileStatusResult fileInfoResponse = _client.getFileSystemOperations().getFileStatus(streamPath, _accountName).getBody(); + FileStatusResult fileInfoResponse = _client.getFileSystemOperations().getFileStatus(_accountName, streamPath).getBody(); return fileInfoResponse.getFileStatus().getLength(); } @@ -131,9 +135,9 @@ public void Concatenate(String targetStreamPath, String[] inputStreamPaths) thro // this is required for the current version of the microsoft concatenate // TODO: Improve WebHDFS concatenate to take in the list of paths to concatenate // in the request body. - String paths = "sources=" + String.join(",", inputStreamPaths); + String paths = MessageFormat.format("sources={0}", StringUtils.join(inputStreamPaths, ',')); // For the current implementation, we require UTF8 encoding. - _client.getFileSystemOperations().msConcat(targetStreamPath, _accountName, paths.getBytes(), true); + _client.getFileSystemOperations().msConcat(_accountName, targetStreamPath, paths.getBytes(StandardCharsets.UTF_8), true); } } diff --git a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/DataLakeStoreUploader.java b/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/DataLakeStoreUploader.java index ade77766ffd8d..9362ea3256513 100644 --- a/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/DataLakeStoreUploader.java +++ b/azure-mgmt-datalake-store-uploader/src/main/java/com/microsoft/azure/management/datalake/store/uploader/DataLakeStoreUploader.java @@ -17,6 +17,7 @@ import java.util.List; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; /** * Represents a general purpose file uploader into DataLake. Supports the efficient upload of large files. @@ -342,14 +343,27 @@ public void run() { } inputPaths[finalI] = remoteStreamPath; + } catch (Exception ex) { //collect any exceptions, whether we just generated them above or whether they come from the Front End, - exceptions.add(ex); + synchronized (exceptions) { + exceptions.add(ex); + } } } }); } + exec.shutdown(); + + try { + exec.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); // waits ~292 years for completion or interruption. + } + catch (InterruptedException e) { + // add the exception since it will indicate that it was cancelled. + exceptions.add(e); + } + if (exceptions.size() > 0) { throw new AggregateUploadException("At least one concatenate test failed", exceptions.remove(0), exceptions); } diff --git a/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/DataLakeUploaderTestBase.java b/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/DataLakeUploaderTestBase.java new file mode 100644 index 0000000000000..0af14d58d5d00 --- /dev/null +++ b/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/DataLakeUploaderTestBase.java @@ -0,0 +1,48 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + */ +package com.microsoft.azure.management.datalake.store.uploader; + +import com.microsoft.azure.credentials.AzureEnvironment; +import com.microsoft.azure.credentials.UserTokenCredentials; +import com.microsoft.azure.management.datalake.store.DataLakeStoreAccountManagementClient; +import com.microsoft.azure.management.datalake.store.DataLakeStoreAccountManagementClientImpl; +import com.microsoft.azure.management.datalake.store.DataLakeStoreFileSystemManagementClient; +import com.microsoft.azure.management.datalake.store.DataLakeStoreFileSystemManagementClientImpl; +import com.microsoft.azure.management.resources.ResourceManagementClient; +import com.microsoft.azure.management.resources.ResourceManagementClientImpl; +import okhttp3.logging.HttpLoggingInterceptor; + +public abstract class DataLakeUploaderTestBase { + protected static ResourceManagementClient resourceManagementClient; + protected static DataLakeStoreAccountManagementClient dataLakeStoreAccountManagementClient; + protected static DataLakeStoreFileSystemManagementClient dataLakeStoreFileSystemManagementClient; + + public static void createClients() { + UserTokenCredentials credentials = new UserTokenCredentials( + System.getenv("arm.clientid"), + System.getenv("arm.domain"), + System.getenv("arm.username"), + System.getenv("arm.password"), + null, + AzureEnvironment.AZURE); + + resourceManagementClient = new ResourceManagementClientImpl(credentials); + resourceManagementClient.setSubscriptionId(System.getenv("arm.subscriptionid")); + resourceManagementClient.setLogLevel(HttpLoggingInterceptor.Level.BODY); + + dataLakeStoreAccountManagementClient = new DataLakeStoreAccountManagementClientImpl(credentials); + dataLakeStoreAccountManagementClient.setLogLevel(HttpLoggingInterceptor.Level.BODY); + dataLakeStoreAccountManagementClient.setSubscriptionId(System.getenv("arm.subscriptionid")); + + dataLakeStoreFileSystemManagementClient = new DataLakeStoreFileSystemManagementClientImpl(credentials); + dataLakeStoreFileSystemManagementClient.setLogLevel(HttpLoggingInterceptor.Level.NONE); + } + + public static String generateName(String prefix) { + int randomSuffix = (int)(Math.random() * 1000); + return prefix + randomSuffix; + } +} diff --git a/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/PerformanceUploadTests.java b/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/PerformanceUploadTests.java new file mode 100644 index 0000000000000..185576a8f061f --- /dev/null +++ b/azure-mgmt-datalake-store-uploader/src/test/java/com/microsoft/azure/management/datalake/store/uploader/PerformanceUploadTests.java @@ -0,0 +1,170 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + */ +package com.microsoft.azure.management.datalake.store.uploader; + +import com.google.common.base.Stopwatch; +import com.microsoft.azure.management.datalake.store.DataLakeStoreFileSystemManagementClient; +import com.microsoft.azure.management.datalake.store.models.DataLakeStoreAccount; +import com.microsoft.azure.management.resources.models.ResourceGroup; +import com.sun.jndi.toolkit.url.Uri; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.RandomAccessFile; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.UUID; +import java.util.concurrent.TimeUnit; + +public class PerformanceUploadTests extends DataLakeUploaderTestBase { + + private static String rgName = generateName("javaadlsrg"); + private static String adlsAcct = generateName("javaadlsacct"); + + private static final String location = "East US 2"; + private static String destFolder = generateName("performanceTest"); + + private static final String Local10GbFileName = "C:\\data\\FixedBlockPerfData.txt"; // 10GB perf test binary file. + private static final String localLargeFileName = "C:\\data\\MicrosoftTelemetry.tsv"; // 2.5GB perf test binary file + private static final String localFileName = "C:\\data\\smallFile.txt"; // 4mb perf test binary file + + @BeforeClass + public static void Setup() throws Exception { + createClients(); + + ResourceGroup group = new ResourceGroup(); + String location = "eastus2"; + group.setLocation(location); + resourceManagementClient.getResourceGroupsOperations().createOrUpdate(rgName, group); + + // create storage and ADLS accounts, setting the accessKey + DataLakeStoreAccount adlsAccount = new DataLakeStoreAccount(); + adlsAccount.setLocation(location); + adlsAccount.setName(adlsAcct); + dataLakeStoreAccountManagementClient.getAccountOperations().create(rgName, adlsAcct, adlsAccount); + + File smallFile = new File(localFileName); + if (!smallFile.exists()) { + smallFile.createNewFile(); + try (FileOutputStream stream = new FileOutputStream(smallFile)) { + byte[] contents = new byte[4 * 1024 * 1024]; + Arrays.fill(contents, (byte) 'a'); + stream.write(contents); + } + } + + File largeFile = new File(localLargeFileName); + if (!largeFile.exists()) { + try (RandomAccessFile stream = new RandomAccessFile(largeFile, "rw")) { + stream.setLength((long)(2.5* 1024 * 1024 * 1024) - 10); // 2.5GB minus 10 bytes + byte[] content = new byte[10]; + Arrays.fill(content, (byte)'a'); + stream.write(content); + } + } + + File tenGBFile = new File(Local10GbFileName); + if (!tenGBFile.exists()) { + try (RandomAccessFile stream = new RandomAccessFile(tenGBFile, "rw")) { + stream.setLength((long)(10* 1024 * 1024 * 1024) - 10); // 10GB minus 10 bytes + byte[] content = new byte[10]; + Arrays.fill(content, (byte)'a'); + stream.write(content); + } + } + } + + @AfterClass + public static void cleanup() throws Exception { + try { + resourceManagementClient.getResourceGroupsOperations().delete(rgName); + } + catch (Exception e) { + // ignore failures during cleanup, as it is best effort + } + } + + @Test + public void Test4mbFileUpload() throws Exception { + String folder = "begoldsm"; + ArrayList perfMetrics = new ArrayList(); + + // Upload Rentrak data. + boolean force = true; //Set this to true if you want to overwrite existing data + System.out.println("Uploading 4mb data..."); + for (int i = 0; i < 10; ++i) { + String destLocation = destFolder + "/" + folder + "4mbFile.txt"; + Stopwatch watch = Stopwatch.createStarted(); + UploadFile(dataLakeStoreFileSystemManagementClient, adlsAcct, localFileName, destLocation, force); + watch.stop(); + long elapsedMs = watch.elapsed(TimeUnit.MILLISECONDS); + System.out.println("File Uploaded : " + i); + perfMetrics.add(elapsedMs); + } + + for( long perf: perfMetrics){ + System.out.println(perf); + } + } + + @Test + public void Test2_5gbFileUpload() throws Exception { + String folder = "begoldsm"; + ArrayList perfMetrics = new ArrayList(); + + // Upload Rentrak data. + boolean force = true; //Set this to true if you want to overwrite existing data + System.out.println("Uploading 2.5GB data..."); + for (int i = 0; i < 5; ++i) { + String destLocation = destFolder + "/" + folder + "2_5gbFile.txt"; + Stopwatch watch = Stopwatch.createStarted(); + UploadFile(dataLakeStoreFileSystemManagementClient, adlsAcct, localLargeFileName, destLocation, force); + watch.stop(); + long elapsedMs = watch.elapsed(TimeUnit.MILLISECONDS); + System.out.println("File Uploaded : " + i); + perfMetrics.add(elapsedMs); + } + + for( long perf: perfMetrics){ + System.out.println(perf); + } + } + + @Test + public void Test10gbFileUpload() throws Exception { + String folder = "begoldsm"; + ArrayList perfMetrics = new ArrayList(); + + // Upload Rentrak data. + boolean force = true; //Set this to true if you want to overwrite existing data + System.out.println("Uploading 10GB data..."); + for (int i = 0; i < 3; ++i) { + String destLocation = destFolder + "/" + folder + "10gbFile.txt"; + Stopwatch watch = Stopwatch.createStarted(); + UploadFile(dataLakeStoreFileSystemManagementClient, adlsAcct, Local10GbFileName, destLocation, force); + watch.stop(); + long elapsedMs = watch.elapsed(TimeUnit.MILLISECONDS); + System.out.println("File Uploaded : " + i); + perfMetrics.add(elapsedMs); + } + + for( long perf: perfMetrics){ + System.out.println(perf); + } + } + + public static boolean UploadFile(DataLakeStoreFileSystemManagementClient dataLakeStoreFileSystemClient, String dlAccountName, String srcPath, String destPath, boolean force) throws Exception { + UploadParameters parameters = new UploadParameters(srcPath, destPath, dlAccountName, 40, force, false); + FrontEndAdapter frontend = new DataLakeStoreFrontEndAdapterImpl(dlAccountName, dataLakeStoreFileSystemClient); + DataLakeStoreUploader uploader = new DataLakeStoreUploader(parameters, frontend); + uploader.Execute(); + return true; + } +} From c82391a99f981d1dba984620fd0d6b3ea327d994 Mon Sep 17 00:00:00 2001 From: begoldsm Date: Wed, 27 Apr 2016 14:23:28 -0700 Subject: [PATCH 2/2] Missed a re-format of the catalog script test. --- ...taLakeAnalyticsCatalogOperationsTests.java | 186 +++++++++--------- 1 file changed, 91 insertions(+), 95 deletions(-) diff --git a/azure-mgmt-datalake-analytics/src/test/java/com/microsoft/azure/management/datalake/analytics/DataLakeAnalyticsCatalogOperationsTests.java b/azure-mgmt-datalake-analytics/src/test/java/com/microsoft/azure/management/datalake/analytics/DataLakeAnalyticsCatalogOperationsTests.java index 4340e798062f2..b0bb755d9ae96 100644 --- a/azure-mgmt-datalake-analytics/src/test/java/com/microsoft/azure/management/datalake/analytics/DataLakeAnalyticsCatalogOperationsTests.java +++ b/azure-mgmt-datalake-analytics/src/test/java/com/microsoft/azure/management/datalake/analytics/DataLakeAnalyticsCatalogOperationsTests.java @@ -40,95 +40,97 @@ public class DataLakeAnalyticsCatalogOperationsTests extends DataLakeAnalyticsMa private static String credentialName = generateName("testcred1"); private static String secretName = generateName("testsecret1"); private static String secretPwd = generateName("testsecretpwd1"); - - private static String catalogCreationScript = MessageFormat.format("DROP DATABASE IF EXISTS {0}; CREATE DATABASE {0}; " + - "//Create Table" + - "CREATE TABLE {0}.dbo.{1}" + - "(" + - " //Define schema of table" + - " UserId int, " + - " Start DateTime, " + - " Region string, " + - " Query string, " + - " Duration int, " + - " Urls string, " + - " ClickedUrls string," + - " INDEX idx1 //Name of index" + - " CLUSTERED (Region ASC) //Column to cluster by" + - " PARTITIONED BY HASH (Region) //Column to partition by" + - ");" + - "DROP FUNCTION IF EXISTS {0}.dbo.{2};" + - "" + - "//create table weblogs on space-delimited website log data" + - "CREATE FUNCTION {0}.dbo.{2}()" + - "RETURNS @result TABLE" + - "(" + - " s_date DateTime," + - " s_time string," + - " s_sitename string," + - " cs_method string, " + - " cs_uristem string," + - " cs_uriquery string," + - " s_port int," + - " cs_username string, " + - " c_ip string," + - " cs_useragent string," + - " cs_cookie string," + - " cs_referer string, " + - " cs_host string," + - " sc_status int," + - " sc_substatus int," + - " sc_win32status int, " + - " sc_bytes int," + - " cs_bytes int," + - " s_timetaken int" + - ")" + - "AS" + - "BEGIN" + - "" + - " @result = EXTRACT" + - " s_date DateTime," + - " s_time string," + - " s_sitename string," + - " cs_method string," + - " cs_uristem string," + - " cs_uriquery string," + - " s_port int," + - " cs_username string," + - " c_ip string," + - " cs_useragent string," + - " cs_cookie string," + - " cs_referer string," + - " cs_host string," + - " sc_status int," + - " sc_substatus int," + - " sc_win32status int," + - " sc_bytes int," + - " cs_bytes int," + - " s_timetaken int" + - " FROM @\"/Samples/Data/WebLog.log\"" + - " USING Extractors.Text(delimiter:' ');" + - "" + - "RETURN;" + - "END;" + - "CREATE VIEW {0}.dbo.{3} " + - "AS " + - " SELECT * FROM " + - " (" + - " VALUES(1,2),(2,4)" + - " ) " + - "AS " + - "T(a, b);" + - "CREATE PROCEDURE {0}.dbo.{4}()" + - "AS BEGIN" + - " CREATE VIEW {0}.dbo.{3} " + - " AS " + - " SELECT * FROM " + - " (" + - " VALUES(1,2),(2,4)" + - " ) " + - " AS " + - " T(a, b);" + + private static String catalogCreationScript = MessageFormat.format("DROP DATABASE IF EXISTS {0}; CREATE DATABASE {0}; \r\n" + + "//Create Table\r\n" + + "CREATE TABLE {0}.dbo.{1}\r\n" + + "(\r\n" + + " //Define schema of table\r\n" + + " UserId int, \r\n" + + " Start DateTime, \r\n" + + " Region string, \r\n" + + " Query string, \r\n" + + " Duration int, \r\n" + + " Urls string, \r\n" + + " ClickedUrls string,\r\n" + + " INDEX idx1 //Name of index\r\n" + + " CLUSTERED (Region ASC) //Column to cluster by\r\n" + + " PARTITIONED BY BUCKETS (UserId) HASH (Region) //Column to partition by\r\n" + + ");\r\n" + + "\r\n" + + "ALTER TABLE {0}.dbo.{1} ADD IF NOT EXISTS PARTITION (1);\r\n" + + "\r\n" + + "DROP FUNCTION IF EXISTS {0}.dbo.{2};\r\n" + + "\r\n" + + "//create table weblogs on space-delimited website log data\r\n" + + "CREATE FUNCTION {0}.dbo.{2}()\r\n" + + "RETURNS @result TABLE\r\n" + + "(\r\n" + + " s_date DateTime,\r\n" + + " s_time string,\r\n" + + " s_sitename string,\r\n" + + " cs_method string, \r\n" + + " cs_uristem string,\r\n" + + " cs_uriquery string,\r\n" + + " s_port int,\r\n" + + " cs_username string, \r\n" + + " c_ip string,\r\n" + + " cs_useragent string,\r\n" + + " cs_cookie string,\r\n" + + " cs_referer string, \r\n" + + " cs_host string,\r\n" + + " sc_status int,\r\n" + + " sc_substatus int,\r\n" + + " sc_win32status int, \r\n" + + " sc_bytes int,\r\n" + + " cs_bytes int,\r\n" + + " s_timetaken int\r\n" + + ")\r\n" + + "AS\r\n" + + "BEGIN\r\n" + + "\r\n" + + " @result = EXTRACT\r\n" + + " s_date DateTime,\r\n" + + " s_time string,\r\n" + + " s_sitename string,\r\n" + + " cs_method string,\r\n" + + " cs_uristem string,\r\n" + + " cs_uriquery string,\r\n" + + " s_port int,\r\n" + + " cs_username string,\r\n" + + " c_ip string,\r\n" + + " cs_useragent string,\r\n" + + " cs_cookie string,\r\n" + + " cs_referer string,\r\n" + + " cs_host string,\r\n" + + " sc_status int,\r\n" + + " sc_substatus int,\r\n" + + " sc_win32status int,\r\n" + + " sc_bytes int,\r\n" + + " cs_bytes int,\r\n" + + " s_timetaken int\r\n" + + " FROM @\"/Samples/Data/WebLog.log\"\r\n" + + " USING Extractors.Text(delimiter:''' ''');\r\n" + + "\r\n" + + "RETURN;\r\n" + + "END;\r\n" + + "CREATE VIEW {0}.dbo.{3} \r\n" + + "AS \r\n" + + " SELECT * FROM \r\n" + + " (\r\n" + + " VALUES(1,2),(2,4)\r\n" + + " ) \r\n" + + "AS \r\n" + + "T(a, b);\r\n" + + "CREATE PROCEDURE {0}.dbo.{4}()\r\n" + + "AS BEGIN\r\n" + + " CREATE VIEW {0}.dbo.{3} \r\n" + + " AS \r\n" + + " SELECT * FROM \r\n" + + " (\r\n" + + " VALUES(1,2),(2,4)\r\n" + + " ) \r\n" + + " AS \r\n" + + " T(a, b);\r\n" + "END;", dbName, tableName, tvfName, viewName, procName); @BeforeClass @@ -318,12 +320,6 @@ public void canCreateUpdateDeleteSecretsAndCredentials() throws Exception { USqlSecret secretCreateResponse = dataLakeAnalyticsCatalogManagementClient.getCatalogOperations().createSecret( adlaAcct, dbName, secretName, createParams).getBody(); - try { - - } - catch(Exception e) { - - } // Attempt to create the secret again, which should throw try {