diff --git a/.azure-pipelines/client.yml b/.azure-pipelines/client.yml index 38ec73c6980be..7b19493c55b41 100644 --- a/.azure-pipelines/client.yml +++ b/.azure-pipelines/client.yml @@ -95,10 +95,28 @@ jobs: publishJUnitResults: false goals: 'site:site site:stage' + - script: | + git clone https://github.com/JonathanGiles/DependencyChecker.git + mkdir input && cp dependencies.json input/ + displayName: 'Download dependency checker' + + - task: Maven@3 + displayName: 'Analyze dependencies' + inputs: + mavenPomFile: 'DependencyChecker/pom.xml' + options: '-Dexec.args="-showall"' + mavenOptions: '$(LoggingOptions)' + javaHomeOption: 'JDKVersion' + jdkVersionOption: '1.11' + jdkArchitectureOption: 'x64' + publishJUnitResults: false + goals: 'clean package exec:java' + - powershell: | copy -r target/staging $(Build.ArtifactStagingDirectory) copy eng/code-quality-reports/src/main/resources/index.html $(Build.ArtifactStagingDirectory) copy eng/spotbugs-aggregate-report/target/spotbugs/spotbugsXml.html (Join-Path $(Build.ArtifactStagingDirectory) "staging") + copy output/dependencies.html (Join-Path $(Build.ArtifactStagingDirectory) "staging") displayName: 'Copy reports to artifact staging' - task: PublishBuildArtifacts@1 diff --git a/README.md b/README.md index 77843d2dfe5db..27b268ef33dac 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ | Component | Build Status | | --------- | ------------ | | Management Libraries | [![Build Status](https://travis-ci.org/Azure/azure-sdk-for-java.svg?branch=master)](https://travis-ci.org/Azure/azure-sdk-for-java) | -| Client Libraries | [![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/17?branchName=master)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=17) | +| Client Libraries | [![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/17?branchName=master)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=17)
[![Build Documentation](https://img.shields.io/badge/documentation-published-blue.svg)](https://azuresdkartifacts.blob.core.windows.net/azure-sdk-for-java/index.html)| :+1: [Try Azure for FREE](http://go.microsoft.com/fwlink/?LinkId=330212) diff --git a/authorization/msi-auth-token-provider-jar/src/main/java/com/microsoft/azure/msiAuthTokenProvider/MSICredentials.java b/authorization/msi-auth-token-provider-jar/src/main/java/com/microsoft/azure/msiAuthTokenProvider/MSICredentials.java index eb32fe913e0ea..0ce890e3a531b 100644 --- a/authorization/msi-auth-token-provider-jar/src/main/java/com/microsoft/azure/msiAuthTokenProvider/MSICredentials.java +++ b/authorization/msi-auth-token-provider-jar/src/main/java/com/microsoft/azure/msiAuthTokenProvider/MSICredentials.java @@ -15,6 +15,9 @@ import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.*; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; /** * Managed Service Identity token based credentials for use with a REST Service Client. @@ -24,6 +27,9 @@ public final class MSICredentials{ // private final List retrySlots = new ArrayList<>(); // + private final Lock lock = new ReentrantLock(); + + private final ConcurrentHashMap cache = new ConcurrentHashMap<>(); private final MSIConfigurationForVirtualMachine configForVM; private final MSIConfigurationForAppService configForAppService; private final HostType hostType; @@ -157,7 +163,7 @@ public void updateObjectId(String objectId) { public MSIToken getToken(String tokenAudience) throws IOException, AzureMSICredentialException{ switch (hostType) { case VIRTUAL_MACHINE: - return this.retrieveTokenFromIDMSWithRetry(tokenAudience == null ? this.configForVM.resource() : tokenAudience); + return this.getTokenForVirtualMachineFromIMDSEndpoint(tokenAudience == null ? this.configForVM.resource() : tokenAudience); case APP_SERVICE: return this.getTokenForAppService(tokenAudience); default: @@ -217,6 +223,51 @@ private MSIToken getTokenForAppService(String tokenAudience) throws IOException, } } + private MSIToken getTokenForVirtualMachineFromIMDSEndpoint(String tokenAudience) throws AzureMSICredentialException { + String tokenIdentifier = tokenAudience; + + String extraIdentifier = null; + if (this.configForVM.objectId() != null) + { + extraIdentifier = configForVM.objectId(); + } else if (this.configForVM.clientId() != null) { + extraIdentifier = configForVM.clientId(); + } else if (this.configForVM.identityId() != null) { + extraIdentifier = configForVM.identityId(); + } + + if (extraIdentifier != null) { + tokenIdentifier = tokenIdentifier + "#" + extraIdentifier; + } + + MSIToken token = cache.get(tokenIdentifier); + if (token != null && !token.isExpired()) { + return token; + } + + lock.lock(); + + try { + token = cache.get(tokenIdentifier); + if (token != null && !token.isExpired()) { + return token; + } + + try { + token = retrieveTokenFromIDMSWithRetry(tokenAudience); + if (token != null) { + cache.put(tokenIdentifier, token); + } + } catch (IOException exception) { + throw new AzureMSICredentialException(exception); + } + + return token; + } finally { + lock.unlock(); + } + } + private MSIToken retrieveTokenFromIDMSWithRetry(String tokenAudience) throws AzureMSICredentialException, IOException { StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; diff --git a/dependencies.json b/dependencies.json new file mode 100644 index 0000000000000..69e4ee727541c --- /dev/null +++ b/dependencies.json @@ -0,0 +1,6 @@ +[ + { + "projectName": "azure-sdk-for-java", + "pomUrls": ["file:./pom.client.xml"] + } +] diff --git a/eng/code-quality-reports/src/main/resources/index.html b/eng/code-quality-reports/src/main/resources/index.html index e5c073f6edc9f..31670352ad7cc 100644 --- a/eng/code-quality-reports/src/main/resources/index.html +++ b/eng/code-quality-reports/src/main/resources/index.html @@ -499,6 +499,10 @@

Azure Java SDK

Maven Site +
  • + + Dependencies +
  • SpotBugs (Aggregate) diff --git a/eventhubs/data-plane/.gitattributes b/eventhubs/data-plane/.gitattributes new file mode 100644 index 0000000000000..cdec8a81a3c24 --- /dev/null +++ b/eventhubs/data-plane/.gitattributes @@ -0,0 +1,26 @@ +# Default behavior: if Git thinks a file is text (as opposed to binary), it +# will normalize line endings to LF in the repository, but convert to your +# platform's native line endings on checkout (e.g., CRLF for Windows). +* text=auto + +# Explicitly declare text files you want to always be normalized and converted +# to native line endings on checkout. E.g., +#*.c text + +# Declare files that will always have CRLF line endings on checkout. E.g., +#*.sln text eol=crlf + +# Declare files that will always have LF line endings on checkout. E.g., +*.sh text eol=lf + +# Denote all files that should not have line endings normalized, should not be +# merged, and should not show in a textual diff. +*.docm binary +*.docx binary +*.ico binary +*.lib binary +*.png binary +*.pptx binary +*.snk binary +*.vsdx binary +*.xps binary diff --git a/eventhubs/data-plane/.github/CONTRIBUTING.md b/eventhubs/data-plane/.github/CONTRIBUTING.md new file mode 100644 index 0000000000000..a2d93ae0da710 --- /dev/null +++ b/eventhubs/data-plane/.github/CONTRIBUTING.md @@ -0,0 +1,50 @@ +# Contribute or Provide Feedback for Azure Event Hubs Java library + +## Table of Contents + +- [Code of Conduct](#code-of-conduct) +- [Filing Issues](#filing-issues) +- [Pull Requests](#pull-requests) + - [General guidelines](#general-guidelines) + - [Testing guidelines](#testing-guidelines) + +## Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +## Filing Issues + +You can find all of the issues that have been filed in the [Issues](https://github.com/Azure/azure-event-hubs-java/issues) section of the repository. + +If you encounter any bugs, please file an issue [here](https://github.com/Azure/azure-event-hubs-java/issues/new) and make sure to fill out the provided template with the requested information. + +To suggest a new feature or changes that could be made, file an issue the same way you would for a bug, but remove the provided template and replace it with information about your suggestion. + +### Pull Requests + +If you are thinking about making a large change to this library, **break up the change into small, logical, testable chunks, and organize your pull requests accordingly**. + +You can find all of the pull requests that have been opened in the [Pull Request](https://github.com/Azure/azure-event-hubs-java/pulls) section of the repository. + +To open your own pull request, click [here](https://github.com/Azure/azure-event-hubs-java/compare). When creating a pull request, keep the following in mind: +- Make sure you are pointing to the fork and branch that your changes were made in +- The pull request template that is provided **should be filled out**; this is not something that should just be deleted or ignored when the pull request is created + - Deleting or ignoring this template will elongate the time it takes for your pull request to be reviewed + +#### General guidelines + +The following guidelines must be followed in **EVERY** pull request that is opened. + +- Title of the pull request is clear and informative +- There are a small number of commits that each have an informative message +- A description of the changes the pull request makes is included, and a reference to the bug/issue the pull request fixes is included, if applicable +- All files have the Microsoft copyright header + +#### Testing guidelines + +The following guidelines must be followed in **EVERY** pull request that is opened. + +- Pull request includes test coverage for the included changes +- Tests must use xunit +- Test code should not contain hard coded values for resource names or similar values +- Test should not use App.config files for settings diff --git a/eventhubs/data-plane/.github/ISSUE_TEMPLATE.md b/eventhubs/data-plane/.github/ISSUE_TEMPLATE.md new file mode 100644 index 0000000000000..c2d384f1a663d --- /dev/null +++ b/eventhubs/data-plane/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,11 @@ +## Actual Behavior +1. +2. + +## Expected Behavior +1. +2. + +## Versions +- OS platform and version: +- Maven package version or commit ID: \ No newline at end of file diff --git a/eventhubs/data-plane/.github/PULL_REQUEST_TEMPLATE.md b/eventhubs/data-plane/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000000000..205cf43406a2b --- /dev/null +++ b/eventhubs/data-plane/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,16 @@ +## Description + + +This checklist is used to make sure that common guidelines for a pull request are followed. + +- [ ] **I have read the [contribution guidelines](./CONTRIBUTING.md).** +- [ ] Title of the pull request is clear and informative. +- [ ] There are a small number of commits, each of which have an informative message. This means that previously merged commits do not appear in the history of the PR. +- [ ] The pull request does not introduce breaking changes (unless a major version change occurs in the assembly and module). +- [ ] If applicable, the public code is properly documented. +- [ ] Pull request includes test coverage for the included changes. +- [ ] The code builds without any errors. \ No newline at end of file diff --git a/eventhubs/data-plane/.gitignore b/eventhubs/data-plane/.gitignore new file mode 100755 index 0000000000000..eb1e8bddfe2f0 --- /dev/null +++ b/eventhubs/data-plane/.gitignore @@ -0,0 +1,207 @@ +# Compiled object files +*.o + +# Compiled static libraries +*.a + +## Ignore Visual Studio temporary files, build results, and +## files generated by popular Visual Studio add-ons. + +# Cache/options +.vs/* + +# User-specific files +*.suo +*.user +*.sln.docstates + +# Build results +[Dd]ebug/ +[Rr]elease/ +x64/ +[Bb]in/ +[Oo]bj/ +!/build/release/ +target/ +/c/cmake + +# Jenkins build files +/jenkins/jenkins-cli.jar + +# Enable "build/" folder in the NuGet Packages folder since NuGet packages use it for MSBuild targets +!packages/*/build/ + +# MSTest test Results +[Tt]est[Rr]esult*/ +[Bb]uild[Ll]og.* + +*_i.c +*_p.c +*.ilk +*.meta +*.obj +*.pch +*.pdb +*.pgc +*.pgd +*.rsp +*.sbr +*.tlb +*.tli +*.tlh +*.tmp +*.tmp_proj +*.log +*.vspscc +*.vssscc +.builds +*.pidb +*.log +*.scc + +# Visual C++ cache files +ipch/ +*.aps +*.ncb +*.opensdf +*.sdf +*.cachefile + +# Visual Studio profiler +*.psess +*.vsp +*.vspx + +# eclipse java +.classpath +.project +*.prefs +/.metadata/ + +# intellij java +.idea/ +*.iml + +# Guidance Automation Toolkit +*.gpState + +# ReSharper is a .NET coding add-in +_ReSharper*/ +*.[Rr]e[Ss]harper + +# TeamCity is a build add-in +_TeamCity* + +# DotCover is a Code Coverage Tool +*.dotCover + +# NCrunch +*.ncrunch* +.*crunch*.local.xml + +# Installshield output folder +[Ee]xpress/ + +# DocProject is a documentation generator add-in +DocProject/buildhelp/ +DocProject/Help/*.HxT +DocProject/Help/*.HxC +DocProject/Help/*.hhc +DocProject/Help/*.hhk +DocProject/Help/*.hhp +DocProject/Help/Html2 +DocProject/Help/html + +# Click-Once directory +publish/ + +# Publish Web Output +*.Publish.xml + +# NuGet Packages Directory +packages/ + +# Windows Azure Build Output +csx +*.build.csdef + +# Windows Store app package directory +AppPackages/ + +# Others +sql/ +*.Cache +ClientBin/ +[Ss]tyle[Cc]op.* +~$* +*~ +*.dbmdl +*.[Pp]ublish.xml +*.pfx +*.publishsettings + +# RIA/Silverlight projects +Generated_Code/ + +# Backup & report files from converting an old project file to a newer +# Visual Studio version. Backup files are not needed, because we have git ;-) +_UpgradeReport_Files/ +Backup*/ +UpgradeLog*.XML +UpgradeLog*.htm + +# SQL Server files +App_Data/*.mdf +App_Data/*.ldf + + +#LightSwitch generated files +GeneratedArtifacts/ +_Pvt_Extensions/ +ModelManifest.xml + +# ========================= +# Windows detritus +# ========================= + +# Windows image file caches +Thumbs.db +ehthumbs.db + +# Folder config file +Desktop.ini + +# Recycle Bin used on file shares +$RECYCLE.BIN/ + +# Mac desktop service store files +.DS_Store + +# Visual studio build artifacts +*.tlog +*.lastbuildstate +*.idb +*.exp +*.lib +*.dll +*.lock.json + +#Windows CE build artifacts +Build.err +Build.wrn +Buildx86retail.dat +*.dat + +#Linux build arfifacts +*.opp + +#Tools EXE that doesn't end up in a typical build directory +**/common/tools/macro_utils_h_generator/macro_utils_h_generator.exe + +#version file +**/build/**/version.txt +**/node_modules/ +**/.vscode/ + +# Typescript installed definition files for modules +**/typings/ \ No newline at end of file diff --git a/eventhubs/data-plane/ConsumingEvents.md b/eventhubs/data-plane/ConsumingEvents.md new file mode 100644 index 0000000000000..cff94be57c9eb --- /dev/null +++ b/eventhubs/data-plane/ConsumingEvents.md @@ -0,0 +1,243 @@ +# Consuming Events with the Java client for Azure Event Hubs + +Consuming events from Event Hubs is different from typical messaging infrastuctures like queues or topic +subscriptions where a consumer simply fetches the "next" message. + +Event Hubs puts the consumer in control of the offset from which the log shall be read,and the consumer can repeatedly pick a different or the same offset and read the event stream from chosen offsets while the events are being retained. Each partition is therefore loosely analogous to a tape drive that you can wind back to a particular mark and then play back to the freshest data available. + +Azure Event Hubs consumers need to be aware of the partitioning model chosen for an Event Hub as receivers explicitly +interact with partitions. Any Event Hub's event store is split up into at least 4 partitions, each maintaining a separate event log. You can think of partitions like lanes on a highway. The more events the Event Hub needs to handle, the more lanes (partitions) you have +to add. Each partition can handle at most the equivalent of 1 "throughput unit", equivalent to at most 1000 events per +second and at most 1 Megabyte per second. + +The common consumption model for Event Hubs is that multiple consumers (threads, processes, compute nodes) process events +from a single Event Hub in parallel, and coordinate which consumer is responsible for pulling events from which partition. + +> An upcoming update for this client will also bring the popular and powerful "event processor host" from C# to Java. +> The event processor host dramatically simplifies writing high-scale, high-throughput event consumer applications +> that distribute the processing load over a dynamic cluster of machines. + +## Getting Started + +This library is available for use in Maven projects from the Maven Central Repository, and can be referenced using the +following dependency declaration inside of your Maven project file: + +```XML + + com.microsoft.azure + azure-eventhubs + 2.0.0 + +``` + +For different types of build environments, the latest released JAR files can also be [explicitly obtained from the +Maven Central Repository]() or from [the Release distribution point on GitHub](). + +For a simple event consumer, you'll need to import the *com.microsoft.azure.eventhubs* package for the Event Hub client classes. + +```Java + import com.microsoft.azure.eventhubs.*; +``` + +Event Hubs client library uses qpid proton reactor framework which exposes AMQP connection and message delivery related +state transitions as reactive events. In the process, +the library will need to run many asynchronous tasks while sending and receiving messages to Event Hubs. +So, `EventHubClient` requires an instance of `ScheduledExecutorService`, where all these tasks are run. + + +```Java + ScheduledExecutorService executor = Executors.newScheduledThreadPool(8) +``` + +The receiver code creates an *EventHubClient* from a given connecting string + +```Java + ConnectionStringBuilder connStr = new ConnectionStringBuilder() + .setNamespaceName("----ServiceBusNamespaceName-----") + .setEventHubName("----EventHubName-----") + .setSasKeyName("-----SharedAccessSignatureKeyName-----") + .setSasKey("---SharedAccessSignatureKey----"); + + EventHubClient ehClient = EventHubClient.createSync(connStr.toString(), executor); +``` + +The receiver code then creates (at least) one *PartitionReceiver* that will receive the data. The receiver is seeded with an offset, in the snippet below it's simply the start of the log. + +```Java + String partitionId = "0"; + PartitionReceiver receiver = ehClient.createReceiverSync( + EventHubClient.DEFAULT_CONSUMER_GROUP_NAME, + partitionId, + EventPosition.fromStartOfStream()); + + receiver.setReceiveTimeout(Duration.ofSeconds(20)); +``` + +Once the receiver is initialized, getting events is just a matter of calling the *receive()* method in a loop. Each call +to *receive()* will fetch an iterable batch of events to process. + +```Java + Iterable receivedEvents = receiver.receiveSync(maxEventCount); +``` + +## Consumer Groups + +Event Hub receivers always receive via Consumer Groups. A consumer group is a named entity on an Event Hub that is +conceptually similar to a Messaging Topic subscription, even though it provides no server-side filtering capabilities. + +Each Event Hub has a "default consumer group" that is created with the Event Hub, which is also the one used in +the samples. + +The primary function of consumers groups is to provide a shared coordination context for multiple concurrent consumers +processing the same event stream in parallel. There can be at most 5 concurrent readers on a partition per consumer group; +it is however *recommended* that there is only one active receiver on a partition per consumer group. The [Ownership, Failover, +and Epochs](#ownership-failover-and-epochs) section below explains how to ensure this. + +You can create up to 20 such consumer groups on an Event Hub via the Azure portal or the HTTP API. + +## Using Offsets + +Each Event Hub has a configurable event retention period, which defaults to one day and can be extended to seven days. +By contacting Microsoft product support you can ask for further extend the retention period to up to 30 days. + +There are several options for a consumer to pick at which point into the retained event stream it wants to +begin receiving events: + +1. **Start of stream** Receive from the start of the retained stream, as shown in the example above. This option will start + with the oldest available retained event in the partition and then continuously deliver events until all available events + have been read. + +2. **Time offset**. This option will start with the oldest event in the partition that has been received into the Event Hub + after the given instant. + + ``` Java + PartitionReceiver receiver = ehClient.createReceiverSync( + EventHubClient.DEFAULT_CONSUMER_GROUP_NAME, + partitionId, + EventPosition.fromEnqueuedTime(instant)); + ``` + +3. **Absolute offset** This option is commonly used to resume receiving events after a previous receiver on the partition + has been aborted or suspended for any reason. The offset is a system-supplied string that should not be interpreted by + the application. The next section will discuss scenarios for using this option. + + ``` Java + PartitionReceiver receiver = ehClient.createReceiverSync( + EventHubClient.DEFAULT_CONSUMER_GROUP_NAME, + partitionId, + EventPosition.fromOffset(savedOffset)); + ``` + +4. **End of stream** While this option is self explanatory, one point to remember here is that, this call is designed to be + more performant than using `EventPosition.fromEnqueuedTime(Instant.now())`. + +5. **Sequence number** This option is baked into the API to provide better integration with stream processing technologies + (ex: APACHE SPARK). + +## Ownership, Failover, and Epochs +As mentioned in the overview above, the common consumption model for Event Hubs is that multiple consumers process events +from a single Event Hub in parallel. Depending on the amount of processing work required and the data volume that has to be +worked through, and also dependent on how resilient the system needs to be against failures, these consumers may be spread +across multiple different compute nodes (VMs). + +A simple setup for this is to create a fixed assignment of Event Hub partitions to compute nodes. For instance, you +could have two compute nodes handling events from 8 Event Hub partitions, assigning the first 4 partitions to the +first node and assigning the second set of 4 to the second node. + +The downside of such a simple model with fixed assignments is that if one of the compute nodes becomes unavailable, no events +get processed for the partitions owned by that node. + +The alternative is to make ownership dynamic and have all processing nodes reach consensus about who owns which partition, +which is referred to as "[leader election](https://en.wikipedia.org/wiki/Leader_election)" or "consensus" in literature. +One infrastructure for negotiating leaders is [Apache Zookeeper] (https://zookeeper.apache.org/doc/trunk/recipes.html#sc_leaderElection), +another one one is [leader election over Azure Blobs](https://msdn.microsoft.com/de-de/library/dn568104.aspx). + +> The "event processor host" is a forthcoming extension to this Java client that provides an implementation of leader +> election over Azure blobs. The event processor host for Java is very similar to the respective implementation available +> for C# clients. + +As the number of event processor nodes grows or shrinks, a leader election model will yield a redistribution of partition +ownership. More nodes each own fewer partitions, fewer nodes each own more partitions. Since leader election occurs +external to the Event Hub clients, there's a mechanism needed to allow a new leader for a partition to force the old leader +to let go of the partition, meaning it must be forced to stop receiving and processing events from the partition. + +That mechanism is called **epochs**. An epoch is an integer value that acts as a label for the time period during which the +"current" leader for the partition retains its ownership. The epoch value is provided as an argument to the +*EventHubClient::createEpochReciver* method. + + ``` Java + epochValue = 1 + PartitionReceiver receiver1 = ehClient.createEpochReceiverSync( + EventHubClient.DefaultConsumerGroupName, + partitionId, + EventPosition.fromOffset(savedOffset), + epochValue); + ``` + + When a new partition owner takes over, it creates a receiver for the same partition, but with a greater epoch value. This will instantly + cause the previous receiver to be dropped (the service initiates a shutdown of the link) and the new receiver to take over. + + ``` Java + /* obtain checkpoint data */ + epochValue = 2 + PartitionReceiver receiver2 = ehClient.createEpochReceiverSync( + EventHubClient.DefaultConsumerGroupName, + partitionId, + EventPosition.fromOffset(savedOffset), + epochValue); + ``` + +The new reader obviously also needs to know at which offset processing shall continue. For this, the current owner of a partition should +periodically record its progress on the event stream to a shared location, tracking the offset of the last processed message. This is +called "checkpointing". In case of the aforementioned Azure Blob lease election model, the blob itself is a great place to keep this information. + +How often an event processor writes checkpoint information depends on the use-case. Frequent checkpointing may cause excessive writes to +the checkpoint state location. Too infrequent checkpointing may cause too many events to be re-processed as the new onwer picks up from +an outdated offset. + +## AMQP 1.0 +Azure Event Hubs requires using the AMQP 1.0 protocol for consuming events. + +AMQP 1.0 is a TCP based protocol. For Azure Event Hubs, all traffic *must* be protected using TLS (SSL) and is using +TCP port 5671. For the WebSocket binding of AMQP, traffic flows via port 443. + +## Connection Strings + +Azure Event Hubs and Azure Service Bus share a common format for connection strings. A connection string holds all required +information to set up a connection with an Event Hub. The format is a simple property/value list of the form +{property}={value} with pairs separated by ampersands (&). + +| Property | Description | +|-----------------------|------------------------------------------------------------| +| Endpoint | URI for the Event Hubs namespace. Typically has the form *sb://{namespace}.servicebus.windows.net/* | +| EntityPath | Relative path of the Event Hub in the namespace. Commonly this is just the Event Hub name | +| SharedAccessKeyName | Name of a Shared Access Signature rule configured for the Event Hub or the Event Hub name. For publishers, the rule must include "Send" permissions. | +| SharedAccessKey | Base64-encoded value of the Shared Access Key for the rule | +| SharedAccessSignature | A previously issued [Shared Access Signature token](https://azure.microsoft.com/en-us/documentation/articles/service-bus-sas-overview/) | + +A connection string will therefore have the following form: + +``` + Endpoint=sb://clemensveu.servicebus.windows.net&EntityPath=myeventhub&SharedAccessSignature=.... +``` + +### Azure IoT Hub event hub compatible endpoint connection string + +If you want to read Device to Cloud (D2C) messages sent to **Azure IoT Hub**, [IoT Hub Event Hub-compatible endpoint](https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-devguide-messages-read-builtin#read-from-the-built-in-endpoint) covers the connection string to be used in this case in detail. + +Consumers generally have a different relationship with the Event Hub than publishers. Usually there are relatively few consumers +and those consumers enjoy a high level of trust within the context of a system. The relationship between an event consumer +and the Event Hub is commonly also much longer-lived. + +It's therefore more common for a consumer to be directly configured with a SAS key rule name and key as part of the +connection string. In order to prevent the SAS key from leaking, it is still advisable to use a long-lived +token rather than the naked key. + +A generated token will be configured into the connection string with the *SharedAccessSignature* property. + +More information about Shared Access Signature in Service Bus and Event Hubs about about how to generate the required tokens +in a range of languages [can be found on the Azure site.](https://azure.microsoft.com/en-us/documentation/articles/service-bus-sas-overview/) + +The easiest way to obtain a token for development purposes is to copy the connection string from the Azure portal. These tokens +do include key name and key value outright. The portal does not issue tokens. + diff --git a/eventhubs/data-plane/LICENSE b/eventhubs/data-plane/LICENSE new file mode 100644 index 0000000000000..f61ce8cad20b2 --- /dev/null +++ b/eventhubs/data-plane/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 Microsoft Azure + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/eventhubs/data-plane/Overview.md b/eventhubs/data-plane/Overview.md new file mode 100644 index 0000000000000..b6ac2d2da7613 --- /dev/null +++ b/eventhubs/data-plane/Overview.md @@ -0,0 +1,113 @@ + +# General Overview of Microsoft Azure Event Hubs client for Java + +This Java client library for Azure Event Hubs allows for both sending events to and receiving events from an Azure Event Hub. + +An **event publisher** is a source of telemetry data, diagnostics information, usage logs, or other log data, as +part of an embedded device solution, a mobile device application, a game title running on a console or other device, +some client or server based business solution, or a web site. + +An **event consumer** picks up such information from the Event Hub and processes it. Processing may involve aggregation, complex +computation and filtering. Processing may also involve distribution or storage of the information in a raw or transformed fashion. +Event Hub consumers are often robust and high-scale platform infrastructure parts with built-in analytics capabilities, like Azure +Stream Analytics, Apache Spark, or Apache Storm. + +Most applications will act either as an event publisher or an event consumer, but rarely both. The exception are event +consumers that filter and/or transform event streams and then forward them on to another Event Hub; an example for such a consumer +and (re-)publisher is Azure Stream Analytics. + +We'll therefore only give a glimpse at publishing and receiving here in this overview and provide further detail in +the [Publishing Events](PublishingEvents.md) and [Consuming Events](ConsumingEvents.md) guides. + +### Publishing Events + +The vast majority of Event Hub applications using this and other client libraries are and will be event publishers. +And for most of these publishers, publishing events is extremely simple. + +With your Java application referencing this client library, +which is quite simple in a Maven build [as we explain in the guide](PublishingEvents.md), you'll need to import the +*com.microsoft.azure.eventhubs* package with the *EventData* and *EventHubClient* classes. + + +```Java + import com.microsoft.azure.eventhubs.*; +``` + +Event Hubs client library uses qpid proton reactor framework which exposes AMQP connection and message delivery related +state transitions as reactive events. In the process, +the library will need to run many asynchronous tasks while sending and receiving messages to Event Hubs. +So, `EventHubClient` requires an instance of `ScheduledExecutorService`, where all these tasks are run. + + +```Java + ScheduledExecutorService executor = Executors.newScheduledThreadPool(8) +``` + +Using an Event Hub connection string, which holds all required connection information, including an authorization key or token, +you then create an *EventHubClient* instance, which manages a secure AMQP 1.0 connection to the Event Hub. + +```Java + ConnectionStringBuilder connStr = new ConnectionStringBuilder() + .setNamespaceName("----ServiceBusNamespaceName-----") + .setEventHubName("----EventHubName-----") + .setSasKeyName("-----SharedAccessSignatureKeyName-----") + .setSasKey("---SharedAccessSignatureKey----"); + + EventHubClient ehClient = EventHubClient.createSync(connStr.toString(), executor); +``` + +Once you have the client in hands, you can package any arbitrary payload as a plain array of bytes and send it. + +```Java + EventData sendEvent = EventData.create(payloadBytes); + ehClient.sendSync(sendEvent); +``` + +The entire client API is built for Java 8's concurrent task model, generally returning +[*CompletableFuture*](https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html), so the library has these methods suffixed with *Sync* as their Synchronous counterparts/varaints. + +Learn more about publishing events, including advanced options, and when you should and shouldn't use those options, +[in the event publisher guide](PublishingEvents.md). + +### Consuming Events + +Consuming events from Azure Event Hubs is a bit more complex than sending events, because the receivers need to be +aware of Event Hub's partitioning model, while senders can most often ignore it. + +Any Event Hub's event store is split up into at least 4 partitions, each maintaining a separate event log. You can think +of partitions like lanes on a highway. The more events the Event Hub needs to handle, the more lanes (partitions) you have +to add. Each partition can handle at most the equivalent of 1 "throughput unit", equivalent to at most 1000 events per +second and at most 1 Megabyte per second. + +Consuming messages is also quite different compared to typical messaging infrastucture like queues or topic +subscriptions, where the consumer simply fetches the "next" message. Azure Event Hubs puts the consumer in control of +the offset from which the log shall be read, and the consumer can repeatedly pick a different or the same offset and read +the event stream from chosen offsets while the events are being retained. Each partition is therefore loosely analogous +to a tape drive that you can wind back to a particular mark and then play back to the freshest data available. + +Just like the sender, the receiver code imports the package and creates an *EventHubClient* from a given connecting string. +The receiver code then creates (at least) one *PartitionReceiver* that will receive the data. The receiver is seeded with +an offset, in the snippet below it's simply the start of the log. + +```Java + String partitionId = "0"; + PartitionReceiver receiver = ehClient.createReceiverSync( + EventHubClient.DEFAULT_CONSUMER_GROUP_NAME, + partitionId, + EventPosition.fromStartOfStream()); + + receiver.setReceiveTimeout(Duration.ofSeconds(20)); +``` + +Once the receiver is initialized, getting events is just a matter of calling the *receive()* method in a loop. Each call +to *receive()* will fetch an enumerable batch of events to process. +Simply put, create a receiver from a specific offset and from then on, the log can be read only in one direction (oldest to latest event). + +```Java + Iterable receivedEvents = receiver.receiveSync(maxEventsCount); +``` + +As you might imagine, there's quite a bit more to know about partitions, about distributing the workload of processing huge and +fast data streams across several receiver machines, and about managing offsets in such a multi-machine scenario such that +data is not repeatedly read or, worse, skipped. You can find this and other details discussed in +the [Consuming Events](ConsumingEvents.md) guide. diff --git a/eventhubs/data-plane/PublishingEvents.md b/eventhubs/data-plane/PublishingEvents.md new file mode 100644 index 0000000000000..5a9eb428e531b --- /dev/null +++ b/eventhubs/data-plane/PublishingEvents.md @@ -0,0 +1,174 @@ +# Publishing Events with the Java client for Azure Event Hubs + +The vast majority of Event Hub applications using this and the other client libraries are and will be event publishers. +And for most of these publishers, publishing events is extremely simple and handled with just a few API gestures. + +## Getting Started + +This library is available for use in Maven projects from the Maven Central Repository, and can be referenced using the +following dependency declaration inside of your Maven project file: + +```XML + + com.microsoft.azure + azure-eventhubs + 2.0.0 + + ``` + + For different types of build environments, the latest released JAR files can also be [explicitly obtained from the + Maven Central Repository](https://search.maven.org/#search%7Cga%7C1%7Ca%3A%22azure-eventhubs%22) or from [the Release distribution point on GitHub](https://github.com/Azure/azure-event-hubs/releases). + + +For a simple event publisher, you'll need to import the *com.microsoft.azure.eventhubs* package for the Event Hub client classes. + + +```Java + import com.microsoft.azure.eventhubs.*; +``` + +Event Hubs client library uses qpid proton reactor framework which exposes AMQP connection and message delivery related +state transitions as reactive events. In the process, +the library will need to run many asynchronous tasks while sending and receiving messages to Event Hubs. +So, `EventHubClient` requires an instance of `Executor`, where all these tasks are run. + + +```Java + ScheduledExecutorService executor = Executors.newScheduledThreadPool(8) +``` + +Using an Event Hub connection string, which holds all required connection information including an authorization key or token +(see [Connection Strings](#connection-strings)), you then create an *EventHubClient* instance. + +```Java + ConnectionStringBuilder connStr = new ConnectionStringBuilder() + .setNamespaceName("----ServiceBusNamespaceName-----") + .setEventHubName("----EventHubName-----") + .setSasKeyName("-----SharedAccessSignatureKeyName-----") + .setSasKey("---SharedAccessSignatureKey----"); + + EventHubClient ehClient = EventHubClient.createSync(connStr.toString(), executor); +``` + +Once you have the client in hands, you can package any arbitrary payload as a plain array of bytes and send it. The samples +we use to illustrate the functionality send a UTF-8 encoded JSON data, but you can transfer any format you wish. + +```Java + EventData sendEvent = EventData.create(payloadBytes); + ehClient.sendSync(sendEvent); +``` + +The entire client API is built for Java 8's concurrent task model, generally returning +[*CompletableFuture*](https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html), so the library has these methods suffixed with *Sync* as their Synchronous counterparts/varaints. + +## AMQP 1.0 +Azure Event Hubs allows for publishing events using the HTTPS and AMQP 1.0 protocols. The Azure Event Hub endpoints +also support AMQP over the WebSocket protocol, allowing event traffic to leverage the same outbound TCP port as +HTTPS. + +This client library is built on top of the [Apache Qpid Proton-J]() libraries and supports AMQP, which is significantly +more efficient at publishing event streams than HTTPS. AMQP 1.0 is an international standard published as ISO/IEC 19464:2014. + +AMQP is session-oriented and sets up the required addressing information and authorization information just once for each +send link, while HTTPS requires doing so with each sent message. AMQP also has a compact binary format to express common +event properties, while HTTPS requires passing message metadata in a verbose text format. AMQP can also keep a significant +number of events "in flight" with asynchronous and robust acknowledgement flow, while HTTPS enforces a strict request-reply +pattern. + +AMQP 1.0 is a TCP based protocol. For Azure Event Hubs, all traffic *must* be protected using TLS (SSL) and is using +TCP port 5671. + +This library will provide HTTPS support via WebSockets when Proton-J supports HTTPS. + +## Connection Strings + +Azure Event Hubs and Azure Service Bus share a common format for connection strings. A connection string holds all required +information to set up a connection with an Event Hub. The format is a simple property/value list of the form +{property}={value} with pairs separated by ampersands (&). + +| Property | Description | +|-----------------------|------------------------------------------------------------| +| Endpoint | URI for the Event Hubs namespace. Typically has the form *sb://{namespace}.servicebus.windows.net/* | +| EntityPath | Relative path of the Event Hub in the namespace. Commonly this is just the Event Hub name | +| SharedAccessKeyName | Name of a Shared Access Signature rule configured for the Event Hub or the Event Hub name. For publishers, the rule must include "Send" permissions. | +| SharedAccessKey | Base64-encoded value of the Shared Access Key for the rule | +| SharedAccessSignature | A previously issued Shared Access Signature token (not yet supported; will be soon) | + +A connection string will therefore have the following form: + +``` + Endpoint=sb://clemensveu.servicebus.windows.net&EntityPath=myeventhub&SharedAccessSignature=.... +``` + +## Advanced Operations + +The publisher example shown in the overview above sends an event into the Event Hub without further qualification. This is +the preferred and most flexible and reliable option. For specific needs, Event Hubs offers two extra options to +qualify send operations: Publisher policies and partion addressing. + +### Partition Addressing + +Any Event Hub's event store is split up into at least 4 partitions, each maintaining a separate event log. You can think +of partitions like lanes on a highway. The more events the Event Hub needs to handle, the more lanes (partitions) you have +to add. Each partition can handle at most the equivalent of 1 "throughput unit", equivalent to at most 1000 events per +second and at most 1 Megabyte per second. + +In some cases, publisher applications need to address partitions directly in order to pre-categorize events for consumption. +A partition is directly addressed either by using the partition's identifier or by using some string (partition key) that gets +consistently hashed to a particular partition. + +This capability, paired with a large number of partitions, may appear attractive for implementing a fine grained, per publisher +subscription scheme similar to what Topics offer in Service Bus Messaging - but it's not at all how the capability should be used +and it's likely not going to yield satisfying results. + +Partition addressing is designed as a routing capability that consistently assigns events from the same sources to the same partition allowing +downstream consumer systems to be optimized, but under the assumption of very many of such sources (hundreds, thousands) share +the same partition. If you need fine-grained content-based routing, Service Bus Topics might be the better option. + +#### Using Partition Keys + +Of the two addressing options, the preferable one is to let the hash algorithm map the event to the appropriate partition. +The gesture is a straightforward extra override to the send operation supplying the partition key: + +```Java + EventData sendEvent = EventData.create(payloadBytes); +> ehClient.sendSync(sendEvent, partitionKey); +``` + +#### Using Partition Ids + +If you indeed need to target a specific partition, for instance because you must use a particular distribution strategy, +you can send directly to the partition, but doing so requires an extra gesture so that you don't accidentally choose this +option. To send to a partition you explicitly need to create a client object that is tied to the partition as shown below: + +```Java + EventHubClient ehClient = EventHubClient.createSync(connStr.toString(), executor); +> PartitionSender sender = ehClient.createPartitionSenderSync("0"); + EventData sendEvent = EventData.create(payloadBytes); + sender.sendSync(sendEvent); +``` + +#### Publisher Policies + +Event Hub Publisher Policies are not yet supported by this client and will be supported in a future release. + +#### Special considerations for partitions and publisher policies + +Using partitions or publisher policies (which are effectively a special kind of partition key) may impact throughput +and availability of your Event Hub solution. + +When you do a regular send operation that does not prescribe a particular partition, the Event Hub will choose a +partition at random, ensuring about equal distribution of events across partitions. Sticking with the above analogy, +all highway lanes get the same traffic. + +If you explicitly choose the partition key or partition-id, it's up to you to take care that traffic is evenly +distributed, otherwise you may end up with a traffic jam (in the form of throttling) on one partition while there's +little or no traffic on another partition. + +Also, like every other aspect of distributed systems, the log storage backing any partition may rarely and briefly slow +down or experience congestion. If you leave choosing the target partition for an event to Event Hubs, it can flexibly +react to such availability blips for publishers. + +Generally, you should *not* use partitioning as a traffic prioritization scheme, and you should *not* use it +for fine grained assignment of particular kinds of events to a particular partitions. *Partitions are a load +distribution mechanism, not a filtering model*. diff --git a/eventhubs/data-plane/appveyor.yml b/eventhubs/data-plane/appveyor.yml new file mode 100644 index 0000000000000..f1f3a37f39933 --- /dev/null +++ b/eventhubs/data-plane/appveyor.yml @@ -0,0 +1,24 @@ +version: 1.0.{build} +os: Windows Server 2012 +branches: + only: + - master + - dev +install: + - ps: | + Add-Type -AssemblyName System.IO.Compression.FileSystem + if (!(Test-Path -Path "C:\maven" )) { + (new-object System.Net.WebClient).DownloadFile( + 'http://www.us.apache.org/dist/maven/maven-3/3.5.4/binaries/apache-maven-3.5.4-bin.zip', + 'C:\maven-bin.zip' + ) + [System.IO.Compression.ZipFile]::ExtractToDirectory("C:\maven-bin.zip", "C:\maven") + } + - cmd: SET PATH=C:\maven\apache-maven-3.2.5\bin;%JAVA_HOME%\bin;%PATH% + - cmd: SET MAVEN_OPTS=-XX:MaxPermSize=2g -Xmx4g + - cmd: SET JAVA_OPTS=-XX:MaxPermSize=2g -Xmx4g +build_script: + - mvn clean package javadoc:jar source:jar +cache: + - C:\maven\ + - C:\Users\appveyor\.m2 diff --git a/eventhubs/data-plane/azure-eventhubs-eph/.gitignore b/eventhubs/data-plane/azure-eventhubs-eph/.gitignore new file mode 100644 index 0000000000000..b83d22266ac8a --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/.gitignore @@ -0,0 +1 @@ +/target/ diff --git a/eventhubs/data-plane/azure-eventhubs-eph/Overview.md b/eventhubs/data-plane/azure-eventhubs-eph/Overview.md new file mode 100644 index 0000000000000..e4332111693f9 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/Overview.md @@ -0,0 +1,279 @@ +# General Overview of Microsoft Azure Event Processor Host for Java + +Event Processor Host is built on top of the Microsoft Azure Event Hubs Client for Java and provides a number of features +not present in that lower layer: + +1. Event Processor Host removes the need to write a receive loop. You simply create a Java class which + implements the IEventProcessor interface, and Event Processor Host will call an instance of that class when + events are available. +2. Event Processor Host removes the need to think about partitions. By default, it creates one instance of the event + processor class for each partition. Each instance will only ever handle + events from one partition, further simplifying the processing code. If you need a different pattern, you can + replace the event processor factory and generate and dispense event processor instances in any way you like. +3. Event Processor Host allows easy load balancing. Utilizing a shared persistent store for leases on partitions + (by default based on Azure Storage), instances of Event Processor Host receiving from the same consumer group + of the same Event Hub can be spread across multiple machines and partitions will be distributed across those + machines as evenly as possible. These instances can be started and stopped at any time, and partitions will be + redistributed as needed. It is even allowed to have more instances than partitions as a form of hot standby. (Note that + partition distribution is based solely on the number of partitions per instance, not event flow rate or any other metric.) +4. Event Processor Host allows the event processor to create a persistent "checkpoint" that describes a position in + the partition's event stream, and if restarted it automatically begins receiving at the next event after the checkpoint. + Because checkpointing is usually an expensive operation, it is up to your IEventProcessor implementation to create + them, at whatever interval is suitable for your application. For example, an application with relatively + infrequent messages might checkpoint after processing each one, whereas an application that requires high performance in + the processing code in order to keep up with event flow might checkpoint once every hundred messages, or once + per second. + +## Using Event Processor Host + +### Step 1: Implement IEventProcessor + +There are four methods which need to be implemented: onOpen, onClose, onError, and onEvents. +onOpen and onClose are called when an event processor instance is created and shut down, respectively, and are intended for setup +and cleanup. For example, an onOpen implementation might open a database connection, and then close it in onClose. onError is called when +an error tied to the partition, such as a receiver failure, has occurred. Recovering from the error, if possible, is up to +Event Processor Host; the call to onError is informational. If it is not possible to recover from the error and the event +processor instance must be shut down, onClose will be called to allow graceful cleanup. + +The onEvents method is where the real work of processing +events occurs: whenever additional events become available for the partition, this method will be called with a batch of events. +The maximum number of events in a batch can be controlled by an option when the event processor class is registered, described below, +and defaults to 10; the actual number of events in a particular batch will vary between 1 and the specified maximum. onEvents may also +be called with an empty iterable on receive timeout, if an option is set when the event processor class is registered, but by default will not. +Note that if onEvents throws an exception out to the calling code before processing all events in the iterable, it loses the opportunity to +process the remaining events. We strongly recommend having a try-catch inside the loop which iterates over the events. + +By default, any particular instance of the event processor is permanently associated with a partition. A PartitionContext +object is provided to every call, but the partition id will never change from call to call. If you are using a non-default event processor +factory to implement a different pattern, such as one where an event processor instance can handle events from multiple partitions, +then the PartitionContext becomes more meaningful. + +PartitionContext also provides the means to create a checkpoint for the partition. The code snippet below checkpoints after +processing every event, for the purpose of providing an example. Because checkpointing is usually an expensive operation, this +pattern is not appropriate for every application. + +```Java +class EventProcessor implements IEventProcessor +{ + @Override + public void onOpen(PartitionContext context) throws Exception + { + System.out.println("Partition " + context.getPartitionId() + " is opening"); + } + + @Override + public void onClose(PartitionContext context, CloseReason reason) throws Exception + { + System.out.println("Partition " + context.getPartitionId() + " is closing for reason " + reason.toString()); + } + + @Override + public void onError(PartitionContext context, Throwable error) + { + System.out.println("Partition " + context.getPartitionId() + " got error " + error.toString()); + } + + @Override + public void onEvents(PartitionContext context, Iterable events) throws Exception + { + System.out.println("SAMPLE: Partition " + context.getPartitionId() + " got message batch"); + for (EventData data : events) + { + try + { + // + // Do something useful with the event here. + // + + // Checkpointing is asynchronous. The only way to determine success or failure is to + // eventually wait for completion of the CompletableFuture. Doing an immediate get() is not + // the best for performance, but it makes a simple example. Because the get() can throw, + // it is inside the per-message try/catch. + context.checkpoint(data).get(); + } + catch (Exception e) // Replace with specific exceptions to catch. + { + // Handle the message-specific issue, or at least swallow the exception so the + // loop can go on to process the next event. Throwing out of onEvents results in + // skipping the entire rest of the batch. + } + } + } +} +``` + +### Step 2: Implement the General Error Notification Handler + +This is a class which implements Consumer. There is just one required method, accept, which will be +called with an argument of type ExceptionReceivedEventArgs if an error occurs which is not tied to any particular partition, or +sometimes if the error came from the event processor for that partition and therefore the state of the event processor is suspect. The +ExceptionReceivedEventArgs contains information specifying the instance of EventProcessorHost where the error occurred, the +exception, and the action being performed at the time of the error. To install this handler, an object of this class is passed +as an option when the event processor class is registered. Recovering from the error, if possible, is up to Event Processor Host; this +notification is informational. + +```Java +class ErrorNotificationHandler implements Consumer +{ + @Override + public void accept(ExceptionReceivedEventArgs t) + { + // Handle the notification here + } +} +``` + +### Step 3: Instantiate EventProcessorHost + +You will first need to build a connection string for the Event Hub. This may be conveniently done using +the ConnectionStringBuilder class provided by the Java client for Azure Event Hubs. Make sure the sasKey has listen permission. + +The EventProcessorHost class itself has multiple constructors. All of them require a name for the host instance, +the path to the Event Hub, the name of the consumer +group to receive from, and the connection string for the Event Hub. The most basic constructor also requires an Azure Storage +connection string for a storage account that the built-in partition lease and checkpoint managers will use to persist these +artifacts, and the name of a container to use or create in that storage account. Other constructors add more options. The +most advanced constructor allows the user to replace the Azure Storage-based lease and checkpoint managers with user implementations +of ILeaseManager and ICheckpointManager (for example, to use Zookeeper instead of Azure Storage). + +```Java +final String namespaceName = "---ServiceBusNamespaceName---"; +final String eventHubName = "---EventHubName---"; +final String sasKeyName = "---SharedAccessSignatureKeyName---"; +final String sasKey = "---SharedAccessSignatureKey---"; +ConnectionStringBuilder eventHubConnectionString = new ConnectionStringBuilder() + .setNamespaceName(namespaceName) + .setEventHubName(eventHubName) + .setSasKeyName(sasKeyName) + .setSasKey(sasKey); + +final String hostname = EventProcessorHost.createHostName("examplehost"); // createHostName adds a UUID to make a unique host name +final String consumerGroupName = EventHubClient.DEFAULT_CONSUMER_GROUP_NAME; // or any consumer group you have created +final String storageConnectionString = "---YouCanGetTheConnectionStringForAStorageAccountFromPortal---"; +final String storageContainerName = "---StorageContainerName---"; +EventProcessorHost host = new EventProcessorHost(hostname, eventHubName, consumerGroupName, eventHubConnectionString.toString(), storageConnectionString, storageContainerName); +``` + +### Step 4: Register the Event Processor Implementation to Start Processing Events + +Instantiate an object of class EventProcessorOptions and call the setExceptionNotification method with an object of the class +implemented in step 2. This is also the time to modify the maximum event batch size (setMaxBatchSize) if you wish, or set other options +such as the receive timeout duration or prefetch count. + +To start processing events, call registerEventProcessor with the options object and the .class of the IEventProcessor implementation +from step 1. This call returns a CompletableFuture which will complete when initialization is finished and event pumping is about to begin. +Waiting for the CompletableFuture to complete (by calling get) is important because initialization failures are detected by catching +exceptions from the get call. Many exceptions will be wrapped within a CompletionException. + +The code shown here uses the default event processor factory, which will generate and dispense a new instance of the event processor class +for every partition. To use a different pattern, you would need to implement IEventProcessorFactory and pass an instance of the +implementation to EventProcessorHost.registerEventProcessorFactory. + +``` Java +EventProcessorOptions options = EventProcessorOptions.getDefaultOptions(); +options.setExceptionNotification(new ErrorNotificationHandler()); +try +{ + host.registerEventProcessor(EventProcessor.class, options).get(); +} +catch (Exception e) +{ + System.out.print("Failure while registering: "); + if (e instanceof CompletionException) + { + Throwable inner = e.getCause(); + System.out.println(inner.toString()); + } + else + { + System.out.println(e.toString()); + } +} +``` + +### Step 5: Graceful Shutdown + +When the time comes to shut down the instance of EventProcessorHost, call the unregisterEventProcessor method. This also +returns a CompletableFuture, which will complete when the event processor host has finished shutting down. + +``` Java +CompletableFuture hostShutdown = host.unregisterEventProcessor(); + +// Do some other shutdown tasks here. + +try +{ + hostShutdown.get(); +} +catch (Exception e) +{ + System.out.print("Failure while shutting down: "); + if (e instanceof CompletionException) + { + Throwable inner = e.getCause(); + System.out.println(inner.toString()); + } + else + { + System.out.println(e.toString()); + } +} +``` + +## Threading Notes + +Calls to the IEventProcessor methods onOpen, onEvents, and onClose are serialized for a given partition. There is no guarantee that +calls to these methods will be on any particular thread, but there will only be one call to any of these methods at a time. The onError +method does not share this guarantee. In particular, if onEvents throws an exception up to the caller, then onError will be called with +that exception. Technically onError is not running at the same time as onEvents, since onEvents has terminated by throwing, but shared data +may be in an unexpected state. + +When using the default event processor factory, there is one IEventProcessor instance per partition, and each instance is permanently tied +to one partition. Under these conditions, an IEventProcessor instance is effectively single-threaded, except for onError. A user-supplied +event processor factory can implement any pattern, such as creating only one IEventProcessor instance and dispensing that instance for use +by every partition. In that example, onEvents will not receive multiple calls for a given partition at the same time, but it can be called +on multiple threads for different partitions. + +## Checkpointing, Partition Ownership, and Reprocessing Messages + +In a system using Event Processor Host, there are one or more hosts processing events from a particular event hub+consumer group combination, and +ownership of the partitions of the event hub are split up between the hosts. When a host takes ownership of a partition, it starts a receiver on +that partition, and when doing so it must specify the position in the stream of events at which the receiver will begin consuming. If there is a checkpoint +for that event hub+consumer group+partition combination available via the checkpoint manager (by default, in Azure Storage), the receiver will begin +consuming at the position indicated by the checkpoint. + +Any time a host takes ownership of a partition, reprocessing of events may occur. Exactly how many messages may be reprocessed depends on how +often checkpoints are written. Writing a checkpoint with the default checkpoint manager is expensive, since it makes at least one HTTPS call to Azure Storage. +The obvious strategy to minimize reprocessing of events is to checkpoint after processing each event, but we advise against this due to the performance hit. +In a low-throughput scenario it may be OK, but as the event rate goes up, checkpointing too often could prevent a processor from being able to keep up with +the flow. Also, event checkpointing after each event cannot completely prevent event reprocessing, since there will always be some time between finishing +processing and writing the checkpoint, during which the processor could fail. Customer applications must be able to detect and handle some amount of +reprocessing, and the customer needs to study their particular scenario and application to balance the cost of handling the reprocessing against the +performance hit of checkpointing more frequently. + +What can cause ownership of a partition to change: +1. Bringing a host online: it will steal ownership of partitions from already-running hosts until the distribution of partitions among hosts is as even as possible. +2. A host crashing/losing power/losing network connection/going offline for any reason: the leases on the partitions that the downed host owned will expire and the +remaining hosts will find the expired leases and take ownership. This may result in unbalanced distribution to start with which will cause additional ownership changes +until the distribution is balanced. +3. Azure Storage latency or failures which result in a partition lease expiring because it cannot be renewed in time: other hosts (or even the same host) will find the +expired lease and take ownership. Again, this can result in unbalanced distribution and additional ownership changes. This scenario can occur even if there is only one host. +4. Certain event hub client errors can cause the processor for a partition to shut down, with the same effects as case 3. This scenario can also occur even with only one host. + +## Running Tests + +Event Processor Host comes with a suite of JUnit-based tests. To run these tests, you will need an event hub and an Azure Storage account. +You can create both through the Azure Portal at [portal.azure.com](http://portal.azure.com/). Once you have done that, get the +connection strings for both and place them in environment variables: + +* `EVENT_HUB_CONNECTION_STRING` is the event hub connection string. The connection string needs to include a SAS rule which has send and listen permissions. +* `EPHTESTSTORAGE` is the storage account connection string. + +Under src/test/java, the general test cases are in files named *Test. If you have made modifications to the code, these are the +cases to run in order to detect major breakage. There are also some test cases in Repros.java, but those are not suitable for +general use. That file preserves repro code from times when we had to mount a major investigation to get to the +bottom of a problem. + +## Tracing + +Event Processor Host can trace its execution for debugging and problem diagnosis, using the well-known SLF4J library. diff --git a/eventhubs/data-plane/azure-eventhubs-eph/Readme.md b/eventhubs/data-plane/azure-eventhubs-eph/Readme.md new file mode 100644 index 0000000000000..8926086c76c25 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/Readme.md @@ -0,0 +1,10 @@ +

    + Microsoft Azure Event Hubs +

    + +# Microsoft Azure Event Processor Host for Java + +Event Processor Host is built on top of the Azure Event Hubs Client and provides a number of features +not present in that lower layer. See the [readme for Azure Event Hubs Client for Java](../readme.md) for details of how to +include Event Processor Host in your project. See the [overview](Overview.md) for details of the functionality +that Event Processor Host offers and how to use it. diff --git a/eventhubs/data-plane/azure-eventhubs-eph/pom.xml b/eventhubs/data-plane/azure-eventhubs-eph/pom.xml new file mode 100644 index 0000000000000..f0d357b38d901 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/pom.xml @@ -0,0 +1,40 @@ + + + + com.microsoft.azure + azure-eventhubs-clients + 2.0.0 + + + 2.2.0 + + 4.0.0 + + azure-eventhubs-eph + azure-eventhubs-eph + + + scm:git:https://github.com/Azure/azure-event-hubs + + + + + com.microsoft.azure + azure-eventhubs + ${project.parent.version} + + + com.microsoft.azure + azure-storage + 8.0.0 + + + com.google.code.gson + gson + 2.8.5 + + + + libraries and extensions built on Microsoft Azure Event Hubs + diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/AzureBlobLease.java b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/AzureBlobLease.java new file mode 100644 index 0000000000000..d3feba5fbd983 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/AzureBlobLease.java @@ -0,0 +1,102 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.BlobProperties; +import com.microsoft.azure.storage.blob.BlobRequestOptions; +import com.microsoft.azure.storage.blob.CloudBlockBlob; + +final class AzureBlobLease extends CompleteLease { + private final transient CloudBlockBlob blob; // do not serialize + private final transient BlobRequestOptions options; // do not serialize + private String offset = null; // null means checkpoint is uninitialized + private long sequenceNumber = 0; + private String token = null; + + // not intended to be used; built for GSon + @SuppressWarnings("unused") + private AzureBlobLease() { + super(); + this.blob = null; // so that we can mark blob as final + this.options = null; // so that we can mark options as final + } + + AzureBlobLease(String partitionId, CloudBlockBlob blob, BlobRequestOptions options) { + super(partitionId); + this.blob = blob; + this.options = options; + } + + AzureBlobLease(AzureBlobLease source) { + super(source); + this.offset = source.offset; + this.sequenceNumber = source.sequenceNumber; + this.blob = source.blob; + this.options = source.options; + this.token = source.token; + } + + AzureBlobLease(AzureBlobLease source, CloudBlockBlob blob, BlobRequestOptions options) { + super(source); + this.offset = source.offset; + this.sequenceNumber = source.sequenceNumber; + this.blob = blob; + this.options = options; + this.token = source.token; + } + + AzureBlobLease(CompleteLease source, CloudBlockBlob blob, BlobRequestOptions options) { + super(source); + this.blob = blob; + this.options = options; + } + + CloudBlockBlob getBlob() { + return this.blob; + } + + String getOffset() { + return this.offset; + } + + void setOffset(String offset) { + this.offset = offset; + } + + long getSequenceNumber() { + return this.sequenceNumber; + } + + void setSequenceNumber(long sequenceNumber) { + this.sequenceNumber = sequenceNumber; + } + + String getToken() { + return this.token; + } + + void setToken(String token) { + this.token = token; + } + + Checkpoint getCheckpoint() { + return new Checkpoint(this.getPartitionId(), this.offset, this.sequenceNumber); + } + + @Override + String getStateDebug() { + String retval = "uninitialized"; + try { + this.blob.downloadAttributes(); + BlobProperties props = this.blob.getProperties(); + retval = props.getLeaseState().toString() + " " + props.getLeaseStatus().toString() + " " + props.getLeaseDuration().toString(); + } catch (StorageException e) { + retval = "downloadAttributes on the blob caught " + e.toString(); + } + return retval; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/AzureStorageCheckpointLeaseManager.java b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/AzureStorageCheckpointLeaseManager.java new file mode 100644 index 0000000000000..65020d91a7f47 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/AzureStorageCheckpointLeaseManager.java @@ -0,0 +1,703 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +import com.google.gson.Gson; +import com.microsoft.azure.storage.*; +import com.microsoft.azure.storage.blob.*; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.security.InvalidKeyException; +import java.util.ArrayList; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.Hashtable; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.*; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + + +class AzureStorageCheckpointLeaseManager implements ICheckpointManager, ILeaseManager { + private static final Logger TRACE_LOGGER = LoggerFactory.getLogger(AzureStorageCheckpointLeaseManager.class); + private static final String METADATA_OWNER_NAME = "OWNINGHOST"; + + private final String storageConnectionString; + private final String storageBlobPrefix; + private final BlobRequestOptions leaseOperationOptions = new BlobRequestOptions(); + private final BlobRequestOptions checkpointOperationOptions = new BlobRequestOptions(); + private final BlobRequestOptions renewRequestOptions = new BlobRequestOptions(); + private HostContext hostContext; + private String storageContainerName; + private CloudBlobClient storageClient; + private CloudBlobContainer eventHubContainer; + private CloudBlobDirectory consumerGroupDirectory; + private Gson gson; + + private Hashtable latestCheckpoint = new Hashtable(); + + AzureStorageCheckpointLeaseManager(String storageConnectionString, String storageContainerName) { + this(storageConnectionString, storageContainerName, ""); + } + + AzureStorageCheckpointLeaseManager(String storageConnectionString, String storageContainerName, String storageBlobPrefix) { + if ((storageConnectionString == null) || storageConnectionString.trim().isEmpty()) { + throw new IllegalArgumentException("Provide valid Azure Storage connection string when using Azure Storage"); + } + this.storageConnectionString = storageConnectionString; + + if ((storageContainerName != null) && storageContainerName.trim().isEmpty()) { + throw new IllegalArgumentException("Azure Storage container name must be a valid container name or null to use the default"); + } + this.storageContainerName = storageContainerName; + + // Convert all-whitespace prefix to empty string. Convert null prefix to empty string. + // Then the rest of the code only has one case to worry about. + this.storageBlobPrefix = (storageBlobPrefix != null) ? storageBlobPrefix.trim() : ""; + } + + // The EventProcessorHost can't pass itself to the AzureStorageCheckpointLeaseManager constructor + // because it is still being constructed. Do other initialization here also because it might throw and + // hence we don't want it in the constructor. + void initialize(HostContext hostContext) throws InvalidKeyException, URISyntaxException, StorageException { + this.hostContext = hostContext; + + if (this.storageContainerName == null) { + this.storageContainerName = this.hostContext.getEventHubPath(); + } + + // Validate that the event hub name is also a legal storage container name. + // Regex pattern is copied from .NET version. The syntax for Java regexes seems to be the same. + // Error message is also copied from .NET version. + Pattern p = Pattern.compile("^(?-i)(?:[a-z0-9]|(?<=[0-9a-z])-(?=[0-9a-z])){3,63}$"); + Matcher m = p.matcher(this.storageContainerName); + if (!m.find()) { + throw new IllegalArgumentException("EventHub names must conform to the following rules to be able to use it with EventProcessorHost: " + + "Must start with a letter or number, and can contain only letters, numbers, and the dash (-) character. " + + "Every dash (-) character must be immediately preceded and followed by a letter or number; consecutive dashes are not permitted in container names. " + + "All letters in a container name must be lowercase. " + + "Must be from 3 to 63 characters long."); + } + + this.storageClient = CloudStorageAccount.parse(this.storageConnectionString).createCloudBlobClient(); + + this.eventHubContainer = this.storageClient.getContainerReference(this.storageContainerName); + + // storageBlobPrefix is either empty or a real user-supplied string. Either way we can just + // stick it on the front and get the desired result. + this.consumerGroupDirectory = this.eventHubContainer.getDirectoryReference(this.storageBlobPrefix + this.hostContext.getConsumerGroupName()); + + this.gson = new Gson(); + + this.leaseOperationOptions.setMaximumExecutionTimeInMs(this.hostContext.getPartitionManagerOptions().getLeaseDurationInSeconds() * 1000); + this.storageClient.setDefaultRequestOptions(this.leaseOperationOptions); + this.checkpointOperationOptions.setMaximumExecutionTimeInMs(this.hostContext.getPartitionManagerOptions().getCheckpointTimeoutInSeconds() * 1000); + // The only option that .NET sets on renewRequestOptions is ServerTimeout, which doesn't exist in Java equivalent. + // Keep it separate in case we need to change something later. + // Only used for leases, not checkpoints, so set max execution time to lease value + this.renewRequestOptions.setMaximumExecutionTimeInMs(this.hostContext.getPartitionManagerOptions().getLeaseDurationInSeconds() * 1000); + } + + @Override + public CompletableFuture checkpointStoreExists() { + return storeExistsInternal(this.checkpointOperationOptions, EventProcessorHostActionStrings.CHECKING_CHECKPOINT_STORE, + "Failure while checking checkpoint store existence"); + } + + + // + // In this implementation, checkpoints are data that's actually in the lease blob, so checkpoint operations + // turn into lease operations under the covers. + // + + @Override + public CompletableFuture createCheckpointStoreIfNotExists() { + // Because we control the caller, we know that this method will only be called after createLeaseStoreIfNotExists. + // In this implementation, it's the same store, so the store will always exist if execution reaches here. + return CompletableFuture.completedFuture(null); + } + + @Override + public CompletableFuture deleteCheckpointStore() { + return deleteStoreInternal(this.checkpointOperationOptions); + } + + @Override + public CompletableFuture getCheckpoint(String partitionId) { + CompletableFuture future = null; + + try { + AzureBlobLease lease = getLeaseInternal(partitionId, this.checkpointOperationOptions); + Checkpoint checkpoint = null; + if (lease != null) { + if ((lease.getOffset() != null) && !lease.getOffset().isEmpty()) { + checkpoint = new Checkpoint(partitionId); + checkpoint.setOffset(lease.getOffset()); + checkpoint.setSequenceNumber(lease.getSequenceNumber()); + } + // else offset is null meaning no checkpoint stored for this partition so return null + } + future = CompletableFuture.completedFuture(checkpoint); + } catch (URISyntaxException | IOException | StorageException e) { + future = new CompletableFuture(); + future.completeExceptionally(LoggingUtils.wrapException(e, EventProcessorHostActionStrings.GETTING_CHECKPOINT)); + } + + return future; + } + + @Override + public CompletableFuture createAllCheckpointsIfNotExists(List partitionIds) { + // Because we control the caller, we know that this method will only be called after createAllLeasesIfNotExists. + // In this implementation checkpoints are in the same blobs as leases, so the blobs will already exist if execution reaches here. + return CompletableFuture.completedFuture(null); + } + + @Override + public CompletableFuture updateCheckpoint(CompleteLease lease, Checkpoint checkpoint) { + AzureBlobLease updatedLease = new AzureBlobLease((AzureBlobLease) lease); + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(checkpoint.getPartitionId(), + "Checkpointing at " + checkpoint.getOffset() + " // " + checkpoint.getSequenceNumber())); + updatedLease.setOffset(checkpoint.getOffset()); + updatedLease.setSequenceNumber(checkpoint.getSequenceNumber()); + + CompletableFuture future = null; + + try { + if (updateLeaseInternal(updatedLease, this.checkpointOperationOptions)) { + future = CompletableFuture.completedFuture(null); + } else { + TRACE_LOGGER.warn(this.hostContext.withHostAndPartition(lease, "Lease lost")); + future = new CompletableFuture(); + future.completeExceptionally(LoggingUtils.wrapException(new RuntimeException("Lease lost while updating checkpoint"), + EventProcessorHostActionStrings.UPDATING_CHECKPOINT)); + } + } catch (StorageException | IOException e) { + TRACE_LOGGER.warn(this.hostContext.withHostAndPartition(lease, "Failure updating checkpoint"), e); + future = new CompletableFuture(); + future.completeExceptionally(LoggingUtils.wrapException(e, EventProcessorHostActionStrings.UPDATING_CHECKPOINT)); + } + + return future; + } + + @Override + public CompletableFuture deleteCheckpoint(String partitionId) { + // Not currently used by EventProcessorHost. + return CompletableFuture.completedFuture(null); + } + + + // + // Lease operations. + // + + @Override + public int getLeaseDurationInMilliseconds() { + return this.hostContext.getPartitionManagerOptions().getLeaseDurationInSeconds() * 1000; + } + + @Override + public CompletableFuture leaseStoreExists() { + return storeExistsInternal(this.leaseOperationOptions, EventProcessorHostActionStrings.CHECKING_LEASE_STORE, + "Failure while checking lease store existence"); + } + + private CompletableFuture storeExistsInternal(BlobRequestOptions options, String action, String trace) { + CompletableFuture future = null; + try { + future = CompletableFuture.completedFuture(this.eventHubContainer.exists(null, options, null)); + } catch (StorageException e) { + TRACE_LOGGER.error(this.hostContext.withHost(trace), e); + future = new CompletableFuture(); + future.completeExceptionally(LoggingUtils.wrapException(e, action)); + } + return future; + } + + @Override + public CompletableFuture createLeaseStoreIfNotExists() { + CompletableFuture future = null; + + try { + // returns true if the container was created, false if it already existed -- we don't care + this.eventHubContainer.createIfNotExists(this.leaseOperationOptions, null); + TRACE_LOGGER.info(this.hostContext.withHost("Created lease store OK or it already existed")); + future = CompletableFuture.completedFuture(null); + } catch (StorageException e) { + future = new CompletableFuture(); + future.completeExceptionally(LoggingUtils.wrapException(e, EventProcessorHostActionStrings.CREATING_LEASE_STORE)); + TRACE_LOGGER.error(this.hostContext.withHost("Failure while creating lease store"), e); + } + + return future; + } + + @Override + public CompletableFuture deleteLeaseStore() { + return deleteStoreInternal(this.leaseOperationOptions); + } + + private CompletableFuture deleteStoreInternal(BlobRequestOptions options) { + CompletableFuture future = null; + + try { + for (ListBlobItem blob : this.eventHubContainer.listBlobs(null, false, EnumSet.noneOf(BlobListingDetails.class), options, null)) { + if (blob instanceof CloudBlobDirectory) { + for (ListBlobItem subBlob : ((CloudBlobDirectory) blob).listBlobs(null, false, EnumSet.noneOf(BlobListingDetails.class), options, null)) { + ((CloudBlockBlob) subBlob).deleteIfExists(DeleteSnapshotsOption.NONE, null, options, null); + } + } else if (blob instanceof CloudBlockBlob) { + ((CloudBlockBlob) blob).deleteIfExists(DeleteSnapshotsOption.NONE, null, options, null); + } + } + + this.eventHubContainer.deleteIfExists(null, options, null); + + future = CompletableFuture.completedFuture(null); + } + catch (StorageException | URISyntaxException e) { + TRACE_LOGGER.error(this.hostContext.withHost("Failure while deleting lease store"), e); + future = new CompletableFuture(); + future.completeExceptionally(new CompletionException(e)); + } + + return future; + } + + @Override + public CompletableFuture getLease(String partitionId) { + CompletableFuture future = null; + + try { + future = CompletableFuture.completedFuture(getLeaseInternal(partitionId, this.leaseOperationOptions)); + } catch (URISyntaxException | IOException | StorageException e) { + TRACE_LOGGER.warn(this.hostContext.withHostAndPartition(partitionId, "Failure while getting lease details"), e); + future = new CompletableFuture(); + future.completeExceptionally(LoggingUtils.wrapException(e, EventProcessorHostActionStrings.GETTING_LEASE)); + } + + return future; + } + + private AzureBlobLease getLeaseInternal(String partitionId, BlobRequestOptions options) throws URISyntaxException, IOException, StorageException { + AzureBlobLease retval = null; + + CloudBlockBlob leaseBlob = this.consumerGroupDirectory.getBlockBlobReference(partitionId); // getBlockBlobReference does not take options + if (leaseBlob.exists(null, options, null)) { + retval = downloadLease(leaseBlob, options); + } + + return retval; + } + + @Override + public CompletableFuture> getAllLeases() { + CompletableFuture> future = null; + + try { + ArrayList infos = new ArrayList(); + EnumSet details = EnumSet.of(BlobListingDetails.METADATA); + Iterable leaseBlobs = this.consumerGroupDirectory.listBlobs("", true, details, this.leaseOperationOptions, null); + leaseBlobs.forEach((lbi) -> { + CloudBlob blob = (CloudBlob)lbi; + BlobProperties bp = blob.getProperties(); + HashMap metadata = blob.getMetadata(); + Path p = Paths.get(lbi.getUri().getPath()); + infos.add(new BaseLease(p.getFileName().toString(), metadata.get(AzureStorageCheckpointLeaseManager.METADATA_OWNER_NAME), + (bp.getLeaseState() == LeaseState.LEASED))); + }); + future = CompletableFuture.completedFuture(infos); + } catch (URISyntaxException | StorageException e) { + TRACE_LOGGER.warn(this.hostContext.withHost("Failure while getting lease state details"), e); + future = new CompletableFuture>(); + future.completeExceptionally(LoggingUtils.wrapException(e, EventProcessorHostActionStrings.GETTING_LEASE)); + } + + return future; + } + + // NOTE NOTE NOTE: this is the one place where this lease manager implementation returns an uncompleted future. + // This is to support creating the blobs in parallel, which can be an important part of fast startup. + // Because it happens during startup, when no user code is running, it cannot deadlock with checkpointing. + @Override + public CompletableFuture createAllLeasesIfNotExists(List partitionIds) { + CompletableFuture future = null; + + // Optimization: list the blobs currently existing in the directory. If there are the + // expected number of blobs, then we can skip doing the creates. + int blobCount = 0; + try { + Iterable leaseBlobs = this.consumerGroupDirectory.listBlobs("", true, null, this.leaseOperationOptions, null); + Iterator blobIterator = leaseBlobs.iterator(); + while (blobIterator.hasNext()) { + blobCount++; + blobIterator.next(); + } + } catch (URISyntaxException | StorageException e) { + TRACE_LOGGER.error(this.hostContext.withHost("Exception checking lease existence - leaseContainerName: " + this.storageContainerName + " consumerGroupName: " + + this.hostContext.getConsumerGroupName() + " storageBlobPrefix: " + this.storageBlobPrefix), e); + future = new CompletableFuture(); + future.completeExceptionally(LoggingUtils.wrapException(e, EventProcessorHostActionStrings.CREATING_LEASES)); + } + + if (future == null) { + // No error checking the list, so keep going + if (blobCount == partitionIds.size()) { + // All expected blobs found, so short-circuit + future = CompletableFuture.completedFuture(null); + } else { + // Create the blobs in parallel + ArrayList> createFutures = new ArrayList>(); + + for (String id : partitionIds) { + CompletableFuture oneCreate = CompletableFuture.supplyAsync(() -> { + CompleteLease returnLease = null; + try { + returnLease = createLeaseIfNotExistsInternal(id, this.leaseOperationOptions); + } catch (URISyntaxException | IOException | StorageException e) { + TRACE_LOGGER.error(this.hostContext.withHostAndPartition(id, + "Exception creating lease - leaseContainerName: " + this.storageContainerName + " consumerGroupName: " + this.hostContext.getConsumerGroupName() + + " storageBlobPrefix: " + this.storageBlobPrefix), e); + throw LoggingUtils.wrapException(e, EventProcessorHostActionStrings.CREATING_LEASES); + } + return returnLease; + }, this.hostContext.getExecutor()); + createFutures.add(oneCreate); + } + + CompletableFuture dummy[] = new CompletableFuture[createFutures.size()]; + future = CompletableFuture.allOf(createFutures.toArray(dummy)); + } + } + + return future; + } + + private AzureBlobLease createLeaseIfNotExistsInternal(String partitionId, BlobRequestOptions options) throws URISyntaxException, IOException, StorageException { + AzureBlobLease returnLease = null; + try { + CloudBlockBlob leaseBlob = this.consumerGroupDirectory.getBlockBlobReference(partitionId); // getBlockBlobReference does not take options + returnLease = new AzureBlobLease(partitionId, leaseBlob, this.leaseOperationOptions); + uploadLease(returnLease, leaseBlob, AccessCondition.generateIfNoneMatchCondition("*"), UploadActivity.Create, options); + // Do not set metadata on creation. No metadata/no owner value indicates that the lease is unowned. + TRACE_LOGGER.info(this.hostContext.withHostAndPartition(partitionId, + "CreateLeaseIfNotExist OK - leaseContainerName: " + this.storageContainerName + " consumerGroupName: " + this.hostContext.getConsumerGroupName() + + " storageBlobPrefix: " + this.storageBlobPrefix)); + } catch (StorageException se) { + StorageExtendedErrorInformation extendedErrorInfo = se.getExtendedErrorInformation(); + if ((extendedErrorInfo != null) && + ((extendedErrorInfo.getErrorCode().compareTo(StorageErrorCodeStrings.BLOB_ALREADY_EXISTS) == 0) || + (extendedErrorInfo.getErrorCode().compareTo(StorageErrorCodeStrings.LEASE_ID_MISSING) == 0))) // occurs when somebody else already has leased the blob + { + // The blob already exists. + TRACE_LOGGER.info(this.hostContext.withHostAndPartition(partitionId, "Lease already exists")); + returnLease = getLeaseInternal(partitionId, options); + } else { + throw se; + } + } + + return returnLease; + } + + @Override + public CompletableFuture deleteLease(CompleteLease lease) { + CompletableFuture future = null; + + TRACE_LOGGER.info(this.hostContext.withHostAndPartition(lease, "Deleting lease")); + try { + ((AzureBlobLease) lease).getBlob().deleteIfExists(); + future = CompletableFuture.completedFuture(null); + } catch (StorageException e) { + TRACE_LOGGER.error(this.hostContext.withHostAndPartition(lease, "Exception deleting lease"), e); + future = new CompletableFuture(); + future.completeExceptionally(LoggingUtils.wrapException(e, EventProcessorHostActionStrings.DELETING_LEASE)); + } + + return future; + } + + @Override + public CompletableFuture acquireLease(CompleteLease lease) { + CompletableFuture future = null; + + try { + future = CompletableFuture.completedFuture(acquireLeaseInternal((AzureBlobLease) lease)); + } catch (IOException | StorageException e) { + TRACE_LOGGER.warn(this.hostContext.withHostAndPartition(lease, "Failure acquiring lease"), e); + future = new CompletableFuture(); + future.completeExceptionally(LoggingUtils.wrapException(e, EventProcessorHostActionStrings.ACQUIRING_LEASE)); + } + + return future; + } + + private boolean acquireLeaseInternal(AzureBlobLease lease) throws IOException, StorageException { + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(lease, "Acquiring lease")); + + CloudBlockBlob leaseBlob = lease.getBlob(); + boolean succeeded = true; + String newLeaseId = EventProcessorHost.safeCreateUUID(); + if ((newLeaseId == null) || newLeaseId.isEmpty()) { + throw new IllegalArgumentException("acquireLeaseSync: newLeaseId really is " + ((newLeaseId == null) ? "null" : "empty")); + } + try { + String newToken = null; + leaseBlob.downloadAttributes(); + if (leaseBlob.getProperties().getLeaseState() == LeaseState.LEASED) { + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(lease, "changeLease")); + if ((lease.getToken() == null) || lease.getToken().isEmpty()) { + // We reach here in a race condition: when this instance of EventProcessorHost scanned the + // lease blobs, this partition was unowned (token is empty) but between then and now, another + // instance of EPH has established a lease (getLeaseState() is LEASED). We normally enforce + // that we only steal the lease if it is still owned by the instance which owned it when we + // scanned, but we can't do that when we don't know who owns it. The safest thing to do is just + // fail the acquisition. If that means that one EPH instance gets more partitions than it should, + // rebalancing will take care of that quickly enough. + succeeded = false; + } else { + newToken = leaseBlob.changeLease(newLeaseId, AccessCondition.generateLeaseCondition(lease.getToken())); + } + } else { + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(lease, "acquireLease")); + newToken = leaseBlob.acquireLease(this.hostContext.getPartitionManagerOptions().getLeaseDurationInSeconds(), newLeaseId); + } + if (succeeded) { + lease.setToken(newToken); + lease.setOwner(this.hostContext.getHostName()); + lease.incrementEpoch(); // Increment epoch each time lease is acquired or stolen by a new host + uploadLease(lease, leaseBlob, AccessCondition.generateLeaseCondition(lease.getToken()), UploadActivity.Acquire, this.leaseOperationOptions); + } + } catch (StorageException se) { + if (wasLeaseLost(se, lease.getPartitionId())) { + succeeded = false; + } else { + throw se; + } + } + + return succeeded; + } + + @Override + public CompletableFuture renewLease(CompleteLease lease) { + CompletableFuture future = null; + + try { + future = CompletableFuture.completedFuture(renewLeaseInternal(lease)); + } catch (StorageException se) { + future = new CompletableFuture(); + future.completeExceptionally(LoggingUtils.wrapException(se, EventProcessorHostActionStrings.RENEWING_LEASE)); + } + + return future; + } + + private boolean renewLeaseInternal(CompleteLease lease) throws StorageException { + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(lease, "Renewing lease")); + + boolean result = false; + AzureBlobLease azLease = (AzureBlobLease)lease; + CloudBlockBlob leaseBlob = azLease.getBlob(); + + try { + leaseBlob.renewLease(AccessCondition.generateLeaseCondition(azLease.getToken()), this.renewRequestOptions, null); + result = true; + } catch (StorageException se) { + if (wasLeaseLost(se, azLease.getPartitionId())) { + // leave result as false + } else { + throw se; + } + } + + return result; + } + + @Override + public CompletableFuture releaseLease(CompleteLease lease) { + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(lease, "Releasing lease")); + + CompletableFuture future = null; + + AzureBlobLease inLease = (AzureBlobLease) lease; + CloudBlockBlob leaseBlob = inLease.getBlob(); + + try { + String leaseId = inLease.getToken(); + AzureBlobLease releasedCopy = new AzureBlobLease(inLease); + releasedCopy.setToken(""); + releasedCopy.setOwner(""); + uploadLease(releasedCopy, leaseBlob, AccessCondition.generateLeaseCondition(leaseId), UploadActivity.Release, this.leaseOperationOptions); + leaseBlob.releaseLease(AccessCondition.generateLeaseCondition(leaseId)); + future = CompletableFuture.completedFuture(null); + } catch (StorageException se) { + if (wasLeaseLost(se, lease.getPartitionId())) { + // If the lease was already lost, then the intent of releasing it has been achieved. + future = CompletableFuture.completedFuture(null); + } else { + future = new CompletableFuture(); + future.completeExceptionally(LoggingUtils.wrapException(se, EventProcessorHostActionStrings.RELEASING_LEASE)); + } + } catch (IOException ie) { + future = new CompletableFuture(); + future.completeExceptionally(LoggingUtils.wrapException(ie, EventProcessorHostActionStrings.RELEASING_LEASE)); + } + + return future; + } + + @Override + public CompletableFuture updateLease(CompleteLease lease) { + CompletableFuture future = null; + + try { + boolean result = updateLeaseInternal((AzureBlobLease) lease, this.leaseOperationOptions); + future = CompletableFuture.completedFuture(result); + } catch (StorageException | IOException e) { + TRACE_LOGGER.warn(this.hostContext.withHostAndPartition(lease, "Failure updating lease"), e); + future = new CompletableFuture(); + future.completeExceptionally(LoggingUtils.wrapException(e, EventProcessorHostActionStrings.UPDATING_LEASE)); + } + + return future; + } + + public boolean updateLeaseInternal(AzureBlobLease lease, BlobRequestOptions options) throws StorageException, IOException { + if (lease == null) { + return false; + } + + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(lease, "Updating lease")); + + String token = lease.getToken(); + if ((token == null) || (token.length() == 0)) { + return false; + } + + // Renew the lease to make sure the update will go through. + // Renewing the lease is always logically a lease operation, even if it is part of writing a checkpoint, so + // don't pass options. + boolean result = renewLeaseInternal(lease); + if (result) { + CloudBlockBlob leaseBlob = lease.getBlob(); + try { + uploadLease(lease, leaseBlob, AccessCondition.generateLeaseCondition(token), UploadActivity.Update, options); + // Success! Result is already true, so pass it up unchanged + } catch (StorageException se) { + if (wasLeaseLost(se, lease.getPartitionId())) { + result = false; + } else { + throw se; + } + } catch (IOException ie) { + throw ie; + } + } + // else could not renew lease due to lease loss. Result is already false, so pass it up unchanged + + return result; + } + + private AzureBlobLease downloadLease(CloudBlockBlob blob, BlobRequestOptions options) throws StorageException, IOException { + String jsonLease = blob.downloadText(null, null, options, null); + TRACE_LOGGER.debug(this.hostContext.withHost("Raw JSON downloaded: " + jsonLease)); + AzureBlobLease rehydrated = this.gson.fromJson(jsonLease, AzureBlobLease.class); + AzureBlobLease blobLease = new AzureBlobLease(rehydrated, blob, this.leaseOperationOptions); + + if (blobLease.getOffset() != null) { + this.latestCheckpoint.put(blobLease.getPartitionId(), blobLease.getCheckpoint()); + } + + return blobLease; + } + + private void uploadLease(AzureBlobLease lease, CloudBlockBlob blob, AccessCondition condition, UploadActivity activity, BlobRequestOptions options) + throws StorageException, IOException { + if (activity != UploadActivity.Create) { + // It is possible for AzureBlobLease objects in memory to have stale offset/sequence number fields if a + // checkpoint was written but PartitionManager hasn't done its ten-second sweep which downloads new copies + // of all the leases. This can happen because we're trying to maintain the fiction that checkpoints and leases + // are separate -- which they can be in other implementations -- even though they are completely intertwined + // in this implementation. To prevent writing stale checkpoint data to the store, merge the checkpoint data + // from the most recently written checkpoint into this write, if needed. + Checkpoint cached = this.latestCheckpoint.get(lease.getPartitionId()); // HASHTABLE + if ((cached != null) && ((cached.getSequenceNumber() > lease.getSequenceNumber()) || (lease.getOffset() == null))) { + lease.setOffset(cached.getOffset()); + lease.setSequenceNumber(cached.getSequenceNumber()); + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(lease, + "Replacing stale offset/seqno while uploading lease")); + } else if (lease.getOffset() != null) { + this.latestCheckpoint.put(lease.getPartitionId(), lease.getCheckpoint()); + } + } + + String jsonLease = this.gson.toJson(lease); + blob.uploadText(jsonLease, null, condition, options, null); + // During create, we blindly try upload and it may throw. Doing the logging after the upload + // avoids a spurious trace in that case. + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(lease, + "Raw JSON uploading for " + activity + ": " + jsonLease)); + + if ((activity == UploadActivity.Acquire) || (activity == UploadActivity.Release)) { + blob.downloadAttributes(); + HashMap metadata = blob.getMetadata(); + switch (activity) { + case Acquire: + // Add owner in metadata + metadata.put(AzureStorageCheckpointLeaseManager.METADATA_OWNER_NAME, lease.getOwner()); + break; + + case Release: + // Remove owner in metadata + metadata.remove(AzureStorageCheckpointLeaseManager.METADATA_OWNER_NAME); + break; + + default: + // Should never get here, but passing the metadata through unchanged is harmless. + break; + } + blob.setMetadata(metadata); + blob.uploadMetadata(condition, options, null); + } + // else don't touch metadata + } + + private boolean wasLeaseLost(StorageException se, String partitionId) { + boolean retval = false; + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(partitionId, "WAS LEASE LOST? Http " + se.getHttpStatusCode())); + if (se.getExtendedErrorInformation() != null) { + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(partitionId, + "Http " + se.getExtendedErrorInformation().getErrorCode() + " :: " + se.getExtendedErrorInformation().getErrorMessage())); + } + if ((se.getHttpStatusCode() == 409) || // conflict + (se.getHttpStatusCode() == 412)) // precondition failed + { + StorageExtendedErrorInformation extendedErrorInfo = se.getExtendedErrorInformation(); + if (extendedErrorInfo != null) { + String errorCode = extendedErrorInfo.getErrorCode(); + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(partitionId, "Error code: " + errorCode)); + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(partitionId, "Error message: " + extendedErrorInfo.getErrorMessage())); + if ((errorCode.compareTo(StorageErrorCodeStrings.LEASE_LOST) == 0) || + (errorCode.compareTo(StorageErrorCodeStrings.LEASE_ID_MISMATCH_WITH_LEASE_OPERATION) == 0) || + (errorCode.compareTo(StorageErrorCodeStrings.LEASE_ID_MISMATCH_WITH_BLOB_OPERATION) == 0) || + (errorCode.compareTo(StorageErrorCodeStrings.LEASE_ALREADY_PRESENT) == 0)) { + retval = true; + } + } + } + return retval; + } + + private enum UploadActivity {Create, Acquire, Release, Update} +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/AzureStoragePartitionManagerOptions.java b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/AzureStoragePartitionManagerOptions.java new file mode 100644 index 0000000000000..0b877ff7024f3 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/AzureStoragePartitionManagerOptions.java @@ -0,0 +1,20 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +public final class AzureStoragePartitionManagerOptions extends PartitionManagerOptions { + public AzureStoragePartitionManagerOptions() { + } + + @Override + public void setLeaseDurationInSeconds(int duration) { + // Max Azure Storage blob lease is 60 seconds + if (duration > 60) { + throw new IllegalArgumentException("Lease duration cannot be more than 60 seconds"); + } + super.setLeaseDurationInSeconds(duration); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/BaseLease.java b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/BaseLease.java new file mode 100644 index 0000000000000..3ca7cb1d95861 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/BaseLease.java @@ -0,0 +1,134 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +/** + * BaseLease class is public so that advanced users can implement an ILeaseManager. + * Unless you are implementing ILeaseManager you should not have to deal with objects + * of this class or derived classes directly. + *

    + * This lightweight base exists to allow ILeaseManager.getAllLeases to operate as quickly + * as possible -- for some lease manager implementations, loading the entire contents of a + * lease form the store may be expensive. BaseLease contains only the minimum amount of + * information required to allow PartitionScanner to operate. + *

    + * Note that a Lease object just carries information about a partition lease. The APIs + * to acquire/renew/release a lease are all on ILeaseManager. + */ +public class BaseLease implements Comparable { + private final String partitionId; + private String owner = ""; + private transient boolean isOwned = false; // do not serialize + + /** + * Do not use; added only for GSon deserializer + */ + protected BaseLease() { + partitionId = "-1"; + } + + /** + * Create a BaseLease for the given partition. + * + * @param partitionId Partition id for this lease. + */ + public BaseLease(String partitionId) { + this.partitionId = partitionId; + } + + /** + * Create and populate a BaseLease for the given partition. + * + * @param partitionId Partition id for this lease. + * @param owner Current owner of this lease, or empty. + * @param isOwned True if the lease is owned, false if not. + */ + public BaseLease(String partitionId, String owner, boolean isOwned) { + this.partitionId = partitionId; + this.owner = owner; + this.isOwned = isOwned; + } + + /** + * Create a BaseLease by duplicating the given Lease. + * + * @param source BaseLease to clone. + */ + public BaseLease(BaseLease source) { + this.partitionId = source.partitionId; + this.owner = source.owner; + this.isOwned = source.isOwned; + } + + /** + * The owner of a lease is the name of the EventProcessorHost instance which currently holds the lease. + * + * @return name of the owning instance + */ + public String getOwner() { + return this.owner; + } + + /** + * Set the owner string. Used when a host steals a lease. + * + * @param owner name of the new owning instance + */ + public void setOwner(String owner) { + this.owner = owner; + } + + /** + * Set the owned state of the lease. + * + * @param newState true if the lease is owned, or false if it is not + */ + public void setIsOwned(boolean newState) { + this.isOwned = newState; + } + + /** + * Get the owned state of the lease. + * + * @return true if the lease is owned, or false if it is not + */ + public boolean getIsOwned() { + return this.isOwned; + } + + /** + * Convenience function for comparing possibleOwner against this.owner + * + * @param possibleOwner name to check + * @return true if possibleOwner is the same as this.owner, false otherwise + */ + public boolean isOwnedBy(String possibleOwner) { + boolean retval = false; + if (this.owner != null) { + retval = (this.owner.compareTo(possibleOwner) == 0); + } + return retval; + } + + /** + * Returns the id of the partition that this Lease is for. Immutable so there is no corresponding setter. + * + * @return partition id + */ + public String getPartitionId() { + return this.partitionId; + } + + // Compares by partition id + @Override + public int compareTo(BaseLease other) { + return this.partitionId.compareTo(other.getPartitionId()); + } + + String getStateDebug() { + return "N/A"; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/Checkpoint.java b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/Checkpoint.java new file mode 100644 index 0000000000000..1180420df3a41 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/Checkpoint.java @@ -0,0 +1,103 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +import com.microsoft.azure.eventhubs.impl.ClientConstants; + +/** + * Checkpoint class is public so that advanced users can implement an ICheckpointManager. + * Unless you are implementing ICheckpointManager you should not have to deal with objects + * of this class directly. + *

    + * A Checkpoint is essentially just a tuple. It has a fixed partition id, set at creation time + * and immutable thereafter, and associates that with an offset/sequenceNumber pair which + * indicates a position within the events in that partition. + */ +public class Checkpoint { + private final String partitionId; + private String offset = ClientConstants.START_OF_STREAM; + private long sequenceNumber = 0; + + /** + * Create a checkpoint with offset/sequenceNumber set to the start of the stream. + * + * @param partitionId Associated partition. + */ + public Checkpoint(String partitionId) { + this.partitionId = partitionId; + } + + /** + * Create a checkpoint with the given offset and sequenceNumber. It is important that the + * offset and sequence number refer to the same event in the stream. The safest thing + * to do is get both values from the system properties of one EventData instance. + * + * @param partitionId Associated partition. + * @param offset Offset in the stream. + * @param sequenceNumber Sequence number in the stream. + */ + public Checkpoint(String partitionId, String offset, long sequenceNumber) { + this.partitionId = partitionId; + this.offset = offset; + this.sequenceNumber = sequenceNumber; + } + + /** + * Create a checkpoint which is a duplicate of the given checkpoint. + * + * @param source Existing checkpoint to clone. + */ + public Checkpoint(Checkpoint source) { + this.partitionId = source.partitionId; + this.offset = source.offset; + this.sequenceNumber = source.sequenceNumber; + } + + /** + * Return the offset. + * + * @return the current offset value. + */ + public String getOffset() { + return this.offset; + } + + /** + * Set the offset. Remember to also set the sequence number! + * + * @param newOffset the new value for offset in the stream. + */ + public void setOffset(String newOffset) { + this.offset = newOffset; + } + + /** + * Get the sequence number. + * + * @return the current sequence number. + */ + public long getSequenceNumber() { + return this.sequenceNumber; + } + + /** + * Set the sequence number. Remember to also set the offset! + * + * @param newSequenceNumber the new value for sequence number. + */ + public void setSequenceNumber(long newSequenceNumber) { + this.sequenceNumber = newSequenceNumber; + } + + /** + * Get the partition id. There is no corresponding setter because the partition id is immutable. + * + * @return the associated partition id. + */ + public String getPartitionId() { + return this.partitionId; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/Closable.java b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/Closable.java new file mode 100644 index 0000000000000..2d9fa0bff6e39 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/Closable.java @@ -0,0 +1,58 @@ +package com.microsoft.azure.eventprocessorhost; + +class Closable { + private final Object syncClose; + private final Closable parent; // null for top-level + private boolean isClosing; + private boolean isClosed; + + // null parent means top-level + Closable(Closable parent) { + this.syncClose = new Object(); + this.parent = parent; + this.isClosing = false; + this.isClosed = false; + } + + protected final boolean getIsClosed() { + final boolean isParentClosed = this.parent != null && this.parent.getIsClosed(); + synchronized (this.syncClose) { + return isParentClosed || this.isClosed; + } + } + + // returns true even if the Parent is (being) Closed + protected final boolean getIsClosingOrClosed() { + final boolean isParentClosingOrClosed = this.parent != null && this.parent.getIsClosingOrClosed(); + synchronized (this.syncClose) { + return isParentClosingOrClosed || this.isClosing || this.isClosed; + } + } + + protected final void setClosing() { + synchronized (this.syncClose) { + this.isClosing = true; + } + } + + protected final void setClosed() { + synchronized (this.syncClose) { + this.isClosing = false; + this.isClosed = true; + } + } + + protected final void throwIfClosingOrClosed(String message) { + if (getIsClosingOrClosed()) { + throw new ClosingException(message); + } + } + + class ClosingException extends RuntimeException { + private static final long serialVersionUID = 1138985585921317036L; + + ClosingException(String message) { + super(message); + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/CloseReason.java b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/CloseReason.java new file mode 100644 index 0000000000000..4fb4387be2f5b --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/CloseReason.java @@ -0,0 +1,22 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +/*** + * Used when implementing IEventProcessor. One argument to onClose is this enum. + */ +public enum CloseReason { + /*** + * The IEventProcessor is closing because the lease on the partition has been lost. + */ + LeaseLost, + + /*** + * The IEventProcessor is closing because the event processor host is being shut down, + * or because an error has occurred. + */ + Shutdown +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/CompleteLease.java b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/CompleteLease.java new file mode 100644 index 0000000000000..f58cec14f9877 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/CompleteLease.java @@ -0,0 +1,80 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +/** + * CompleteLease class is public so that advanced users can implement an ILeaseManager. + * Unless you are implementing ILeaseManager you should not have to deal with objects + * of this class or derived classes directly. + *

    + * CompleteLease carries around complete information about a lease. By itself, it has the + * epoch. Any lease manager implementation can derive from this class to add data which + * the lease manager needs to function -- see AzureBlobLease for an example. Having two + * distinct classes allows the code to clearly express which variety of lease any variable + * holds or a method requires, and avoids the problem of accidentally supplying a lightweight + * BaseLease to a method which needs the lease-manager-specific fields. + */ +public class CompleteLease extends BaseLease { + protected long epoch = -1; // start with illegal epoch + + /** + * Do not use; added only for GSon deserializer + */ + protected CompleteLease() { + super(); + } + + /** + * Create a CompleteLease for the given partition. + * + * @param partitionId Partition id for this lease. + */ + public CompleteLease(String partitionId) { + super(partitionId); + } + + /** + * Create a Lease by duplicating the given Lease. + * + * @param source Lease to clone. + */ + public CompleteLease(CompleteLease source) { + super(source); + this.epoch = source.epoch; + } + + /** + * Epoch is a concept used by Event Hub receivers. If a receiver is created on a partition + * with a higher epoch than the existing receiver, the previous receiver is forcibly disconnected. + * Attempting to create a receiver with a lower epoch than the existing receiver will fail. The Lease + * carries the epoch around so that when a host instance steals a lease, it can create a receiver with a higher epoch. + * + * @return the epoch of the current receiver + */ + public long getEpoch() { + return this.epoch; + } + + /** + * Set the epoch value. Used to update the lease after creating a new receiver with a higher epoch. + * + * @param epoch updated epoch value + */ + public void setEpoch(long epoch) { + this.epoch = epoch; + } + + /** + * The most common operation on the epoch value is incrementing it after stealing a lease. This + * convenience function replaces the get-increment-set that would otherwise be required. + * + * @return The new value of the epoch. + */ + public long incrementEpoch() { + this.epoch++; + return this.epoch; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/DefaultEventProcessorFactory.java b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/DefaultEventProcessorFactory.java new file mode 100644 index 0000000000000..99e578be6da59 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/DefaultEventProcessorFactory.java @@ -0,0 +1,20 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + + +class DefaultEventProcessorFactory implements IEventProcessorFactory { + private Class eventProcessorClass = null; + + void setEventProcessorClass(Class eventProcessorClass) { + this.eventProcessorClass = eventProcessorClass; + } + + @Override + public T createEventProcessor(PartitionContext context) throws Exception { + return this.eventProcessorClass.newInstance(); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/EventProcessorHost.java b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/EventProcessorHost.java new file mode 100644 index 0000000000000..86724933e0c9e --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/EventProcessorHost.java @@ -0,0 +1,570 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +import com.microsoft.azure.eventhubs.ConnectionStringBuilder; +import com.microsoft.azure.eventhubs.RetryPolicy; +import com.microsoft.azure.storage.StorageException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.net.URISyntaxException; +import java.security.InvalidKeyException; +import java.util.UUID; +import java.util.concurrent.*; +import java.util.concurrent.atomic.AtomicInteger; + +/*** + * The main class of event processor host. + */ +public final class EventProcessorHost { + private static final Logger TRACE_LOGGER = LoggerFactory.getLogger(EventProcessorHost.class); + private static final Object uuidSynchronizer = new Object(); + // weOwnExecutor exists to support user-supplied thread pools. + private final boolean weOwnExecutor; + private final ScheduledExecutorService executorService; + private final int executorServicePoolSize = 16; + private final HostContext hostContext; + private boolean initializeLeaseManager = false; + private volatile CompletableFuture unregistered = null; + private PartitionManager partitionManager; + private PartitionManagerOptions partitionManagerOptions = null; + + /** + * Create a new host instance to process events from an Event Hub. + *

    + * Since Event Hubs are generally used for scale-out, high-traffic scenarios, in most scenarios there will + * be only one host instances per process, and the processes will be run on separate machines. Besides scale, this also + * provides isolation: one process or machine crashing will not take out multiple host instances. However, it is + * supported to run multiple host instances on one machine, or even within one process, for development and testing. + *

    + * The hostName parameter is a name for this event processor host, which must be unique among all event processor host instances + * receiving from this event hub+consumer group combination: the unique name is used to distinguish which event processor host + * instance owns the lease for a given partition. An easy way to generate a unique hostName which also includes + * other information is to call EventProcessorHost.createHostName("mystring"). + *

    + * This overload of the constructor uses the built-in lease and checkpoint managers. The + * Azure Storage account specified by the storageConnectionString parameter is used by the built-in + * managers to record leases and checkpoints, in the specified container. + *

    + * The Event Hub connection string may be conveniently constructed using the ConnectionStringBuilder class + * from the Java Event Hub client. + * + * @param hostName A name for this event processor host. See method notes. + * @param eventHubPath Specifies the Event Hub to receive events from. + * @param consumerGroupName The name of the consumer group to use when receiving from the Event Hub. + * @param eventHubConnectionString Connection string for the Event Hub to receive from. + * @param storageConnectionString Connection string for the Azure Storage account to use for persisting leases and checkpoints. + * @param storageContainerName Azure Storage container name for use by built-in lease and checkpoint manager. + */ + public EventProcessorHost( + final String hostName, + final String eventHubPath, + final String consumerGroupName, + final String eventHubConnectionString, + final String storageConnectionString, + final String storageContainerName) { + this(hostName, eventHubPath, consumerGroupName, eventHubConnectionString, storageConnectionString, storageContainerName, (ScheduledExecutorService) null); + } + + /** + * Create a new host to process events from an Event Hub. + *

    + * This overload adds an argument to specify a user-provided thread pool. The number of partitions in the + * target event hub and the number of host instances should be considered when choosing the size of the thread pool: + * how many partitions is one instance expected to own under normal circumstances? One thread per partition should + * provide good performance, while being able to support more partitions adequately if a host instance fails and its + * partitions must be redistributed. + * + * @param hostName A name for this event processor host. See method notes. + * @param eventHubPath Specifies the Event Hub to receive events from. + * @param consumerGroupName The name of the consumer group to use when receiving from the Event Hub. + * @param eventHubConnectionString Connection string for the Event Hub to receive from. + * @param storageConnectionString Connection string for the Azure Storage account to use for persisting leases and checkpoints. + * @param storageContainerName Azure Storage container name for use by built-in lease and checkpoint manager. + * @param executorService User-supplied thread executor, or null to use EventProcessorHost-internal executor. + */ + public EventProcessorHost( + final String hostName, + final String eventHubPath, + final String consumerGroupName, + final String eventHubConnectionString, + final String storageConnectionString, + final String storageContainerName, + final ScheduledExecutorService executorService) { + this(hostName, eventHubPath, consumerGroupName, eventHubConnectionString, storageConnectionString, storageContainerName, (String) null, executorService); + } + + /** + * Create a new host to process events from an Event Hub. + *

    + * This overload adds an argument to specify a prefix used by the built-in lease manager when naming blobs in Azure Storage. + * + * @param hostName A name for this event processor host. See method notes. + * @param eventHubPath Specifies the Event Hub to receive events from. + * @param consumerGroupName The name of the consumer group to use when receiving from the Event Hub. + * @param eventHubConnectionString Connection string for the Event Hub to receive from. + * @param storageConnectionString Connection string for the Azure Storage account to use for persisting leases and checkpoints. + * @param storageContainerName Azure Storage container name for use by built-in lease and checkpoint manager. + * @param storageBlobPrefix Prefix used when naming blobs within the storage container. + */ + public EventProcessorHost( + final String hostName, + final String eventHubPath, + final String consumerGroupName, + final String eventHubConnectionString, + final String storageConnectionString, + final String storageContainerName, + final String storageBlobPrefix) { + this(hostName, eventHubPath, consumerGroupName, eventHubConnectionString, storageConnectionString, storageContainerName, storageBlobPrefix, + (ScheduledExecutorService) null); + } + + /** + * Create a new host to process events from an Event Hub. + *

    + * This overload allows the caller to specify both a user-supplied thread pool and + * a prefix used by the built-in lease manager when naming blobs in Azure Storage. + * + * @param hostName A name for this event processor host. See method notes. + * @param eventHubPath Specifies the Event Hub to receive events from. + * @param consumerGroupName The name of the consumer group to use when receiving from the Event Hub. + * @param eventHubConnectionString Connection string for the Event Hub to receive from. + * @param storageConnectionString Connection string for the Azure Storage account to use for persisting leases and checkpoints. + * @param storageContainerName Azure Storage container name for use by built-in lease and checkpoint manager. + * @param storageBlobPrefix Prefix used when naming blobs within the storage container. + * @param executorService User-supplied thread executor, or null to use EventProcessorHost-internal executor. + */ + public EventProcessorHost( + final String hostName, + final String eventHubPath, + final String consumerGroupName, + final String eventHubConnectionString, + final String storageConnectionString, + final String storageContainerName, + final String storageBlobPrefix, + final ScheduledExecutorService executorService) { + // Would like to check storageConnectionString and storageContainerName here but can't, because Java doesn't allow statements before + // calling another constructor. storageBlobPrefix is allowed to be null or empty, doesn't need checking. + this(hostName, eventHubPath, consumerGroupName, eventHubConnectionString, + new AzureStorageCheckpointLeaseManager(storageConnectionString, storageContainerName, storageBlobPrefix), executorService); + this.initializeLeaseManager = true; + this.partitionManagerOptions = new AzureStoragePartitionManagerOptions(); + } + + // Because Java won't let you do ANYTHING before calling another constructor. In particular, you can't + // new up an object and pass it as TWO parameters of the other constructor. + private EventProcessorHost( + final String hostName, + final String eventHubPath, + final String consumerGroupName, + final String eventHubConnectionString, + final AzureStorageCheckpointLeaseManager combinedManager, + final ScheduledExecutorService executorService) { + this(hostName, eventHubPath, consumerGroupName, eventHubConnectionString, combinedManager, combinedManager, executorService, null); + } + + /** + * Create a new host to process events from an Event Hub. + *

    + * This overload allows the caller to provide their own lease and checkpoint managers to replace the built-in + * ones based on Azure Storage. + * + * @param hostName A name for this event processor host. See method notes. + * @param eventHubPath Specifies the Event Hub to receive events from. + * @param consumerGroupName The name of the consumer group to use when receiving from the Event Hub. + * @param eventHubConnectionString Connection string for the Event Hub to receive from. + * @param checkpointManager Implementation of ICheckpointManager, to be replacement checkpoint manager. + * @param leaseManager Implementation of ILeaseManager, to be replacement lease manager. + */ + public EventProcessorHost( + final String hostName, + final String eventHubPath, + final String consumerGroupName, + final String eventHubConnectionString, + ICheckpointManager checkpointManager, + ILeaseManager leaseManager) { + this(hostName, eventHubPath, consumerGroupName, eventHubConnectionString, checkpointManager, leaseManager, null, null); + } + + /** + * Create a new host to process events from an Event Hub. + *

    + * This overload allows the caller to provide their own lease and checkpoint managers to replace the built-in + * ones based on Azure Storage, and to provide an executor service and a retry policy for communications with the event hub. + * + * @param hostName A name for this event processor host. See method notes. + * @param eventHubPath Specifies the Event Hub to receive events from. + * @param consumerGroupName The name of the consumer group to use when receiving from the Event Hub. + * @param eventHubConnectionString Connection string for the Event Hub to receive from. + * @param checkpointManager Implementation of ICheckpointManager, to be replacement checkpoint manager. + * @param leaseManager Implementation of ILeaseManager, to be replacement lease manager. + * @param executorService User-supplied thread executor, or null to use EventProcessorHost-internal executor. + * @param retryPolicy Retry policy governing communications with the event hub. + */ + public EventProcessorHost( + final String hostName, + final String eventHubPath, + final String consumerGroupName, + final String eventHubConnectionString, + ICheckpointManager checkpointManager, + ILeaseManager leaseManager, + ScheduledExecutorService executorService, + RetryPolicy retryPolicy) { + if ((hostName == null) || hostName.isEmpty()) { + throw new IllegalArgumentException("hostName argument must not be null or empty string"); + } + + // eventHubPath is allowed to be null or empty if it is provided in the connection string. That will be checked later. + if ((consumerGroupName == null) || consumerGroupName.isEmpty()) { + throw new IllegalArgumentException("consumerGroupName argument must not be null or empty"); + } + + if ((eventHubConnectionString == null) || eventHubConnectionString.isEmpty()) { + throw new IllegalArgumentException("eventHubConnectionString argument must not be null or empty"); + } + + // The event hub path must appear in at least one of the eventHubPath argument or the connection string. + // If it appears in both, then it must be the same in both. If it appears in only one, populate the other. + ConnectionStringBuilder providedCSB = new ConnectionStringBuilder(eventHubConnectionString); + String extractedEntityPath = providedCSB.getEventHubName(); + String effectiveEventHubPath = eventHubPath; + String effectiveEventHubConnectionString = eventHubConnectionString; + if ((effectiveEventHubPath != null) && !effectiveEventHubPath.isEmpty()) { + if (extractedEntityPath != null) { + if (effectiveEventHubPath.compareTo(extractedEntityPath) != 0) { + throw new IllegalArgumentException("Provided EventHub path in eventHubPath parameter conflicts with the path in provided EventHub connection string"); + } + // else they are the same and that's fine + } else { + // There is no entity path in the connection string, so put it there. + ConnectionStringBuilder rebuildCSB = new ConnectionStringBuilder() + .setEndpoint(providedCSB.getEndpoint()) + .setEventHubName(effectiveEventHubPath) + .setSasKeyName(providedCSB.getSasKeyName()) + .setSasKey(providedCSB.getSasKey()); + rebuildCSB.setOperationTimeout(providedCSB.getOperationTimeout()); + effectiveEventHubConnectionString = rebuildCSB.toString(); + } + } else { + if ((extractedEntityPath != null) && !extractedEntityPath.isEmpty()) { + effectiveEventHubPath = extractedEntityPath; + } else { + throw new IllegalArgumentException("Provide EventHub entity path in either eventHubPath argument or in eventHubConnectionString"); + } + } + + if (checkpointManager == null) { + throw new IllegalArgumentException("Must provide an object which implements ICheckpointManager"); + } + if (leaseManager == null) { + throw new IllegalArgumentException("Must provide an object which implements ILeaseManager"); + } + // executorService argument is allowed to be null, that is the indication to use an internal threadpool. + + if (this.partitionManagerOptions == null) { + // Normally will not be null because we're using the AzureStorage implementation. + // If it is null, we're using user-supplied implementation. Establish generic defaults + // in case the user doesn't provide an options object. + this.partitionManagerOptions = new PartitionManagerOptions(); + } + + if (executorService != null) { + // User has supplied an ExecutorService, so use that. + this.weOwnExecutor = false; + this.executorService = executorService; + } else { + this.weOwnExecutor = true; + this.executorService = Executors.newScheduledThreadPool( + this.executorServicePoolSize, + new EventProcessorHostThreadPoolFactory(hostName, effectiveEventHubPath, consumerGroupName)); + } + + this.hostContext = new HostContext(this.executorService, + this, hostName, + effectiveEventHubPath, consumerGroupName, effectiveEventHubConnectionString, retryPolicy, + leaseManager, checkpointManager); + + this.partitionManager = new PartitionManager(hostContext); + + TRACE_LOGGER.info(this.hostContext.withHost("New EventProcessorHost created.")); + } + + /** + * Convenience method for generating unique host names, safe to pass to the EventProcessorHost constructors + * that take a hostName argument. + *

    + * If a prefix is supplied, the constructed name begins with that string. If the prefix argument is null or + * an empty string, the constructed name begins with "javahost". Then a dash '-' and a UUID are appended to + * create a unique name. + * + * @param prefix String to use as the beginning of the name. If null or empty, a default is used. + * @return A unique host name to pass to EventProcessorHost constructors. + */ + public static String createHostName(String prefix) { + String usePrefix = prefix; + if ((usePrefix == null) || usePrefix.isEmpty()) { + usePrefix = "javahost"; + } + return usePrefix + "-" + safeCreateUUID(); + } + + /** + * Synchronized string UUID generation convenience method. + *

    + * We saw null and empty strings returned from UUID.randomUUID().toString() when used from multiple + * threads and there is no clear answer on the net about whether it is really thread-safe or not. + *

    + * One of the major users of UUIDs is the built-in lease and checkpoint manager, which can be replaced by + * user implementations. This UUID generation method is public so user implementations can use it as well and + * avoid the problems. + * + * @return A string UUID with dashes but no curly brackets. + */ + public static String safeCreateUUID() { + synchronized (EventProcessorHost.uuidSynchronizer) { + final UUID newUuid = UUID.randomUUID(); + return newUuid.toString(); + } + } + + /** + * The processor host name is supplied by the user at constructor time, but being able to get + * it is useful because it means not having to carry both the host object and the name around. + * As long as you have the host object, you can get the name back, such as for logging. + * + * @return The processor host name + */ + public String getHostName() { + return this.hostContext.getHostName(); + } + + // TEST USE ONLY + void setPartitionManager(PartitionManager pm) { + this.partitionManager = pm; + } + + HostContext getHostContext() { + return this.hostContext; + } + + /** + * Returns the existing partition manager options object. Unless you are providing implementations of + * ILeaseManager and ICheckpointMananger, to change partition manager options, call this method to get + * the existing object and call setters on it to adjust the values. + * + * @return the internally-created PartitionManangerObjects object or any replacement object set with setPartitionManangerOptions + */ + public PartitionManagerOptions getPartitionManagerOptions() { + return this.partitionManagerOptions; + } + + /** + * Set the partition manager options all at once. Normally this method is used only when providing user + * implementations of ILeaseManager and ICheckpointManager, because it allows passing an object of a class + * derived from PartitionManagerOptions, which could contain options specific to the user-implemented ILeaseManager + * or ICheckpointMananger. When using the default, Azure Storage-based implementation, the recommendation is to + * call getPartitionManangerOptions to return the existing options object, then call setters on that object to + * adjust the values. + * + * @param options - a PartitionManangerOptions object (or derived object) representing the desired options + */ + public void setPartitionManagerOptions(PartitionManagerOptions options) { + this.partitionManagerOptions = options; + } + + /** + * Register class for event processor and start processing. + *

    + * This overload uses the default event processor factory, which simply creates new instances of + * the registered event processor class, and uses all the default options. + *

    + * The returned CompletableFuture completes when host initialization is finished. Initialization failures are + * reported by completing the future with an exception, so it is important to call get() on the future and handle + * any exceptions thrown. + *

    +     * class MyEventProcessor implements IEventProcessor { ... }
    +     * EventProcessorHost host = new EventProcessorHost(...);
    +     * {@literal CompletableFuture} foo = host.registerEventProcessor(MyEventProcessor.class);
    +     * foo.get();
    +     * 
    + * + * @param Not actually a parameter. Represents the type of your class that implements IEventProcessor. + * @param eventProcessorType Class that implements IEventProcessor. + * @return Future that completes when initialization is finished. + */ + public CompletableFuture registerEventProcessor(Class eventProcessorType) { + DefaultEventProcessorFactory defaultFactory = new DefaultEventProcessorFactory(); + defaultFactory.setEventProcessorClass(eventProcessorType); + return registerEventProcessorFactory(defaultFactory, EventProcessorOptions.getDefaultOptions()); + } + + /** + * Register class for event processor and start processing. + *

    + * This overload uses the default event processor factory, which simply creates new instances of + * the registered event processor class, but takes user-specified options. + *

    + * The returned CompletableFuture completes when host initialization is finished. Initialization failures are + * reported by completing the future with an exception, so it is important to call get() on the future and handle + * any exceptions thrown. + * + * @param Not actually a parameter. Represents the type of your class that implements IEventProcessor. + * @param eventProcessorType Class that implements IEventProcessor. + * @param processorOptions Options for the processor host and event processor(s). + * @return Future that completes when initialization is finished. + */ + public CompletableFuture registerEventProcessor(Class eventProcessorType, EventProcessorOptions processorOptions) { + DefaultEventProcessorFactory defaultFactory = new DefaultEventProcessorFactory(); + defaultFactory.setEventProcessorClass(eventProcessorType); + return registerEventProcessorFactory(defaultFactory, processorOptions); + } + + /** + * Register a user-supplied event processor factory and start processing. + *

    + * If creating a new event processor requires more work than just new'ing an objects, the user must + * create an object that implements IEventProcessorFactory and pass it to this method, instead of calling + * registerEventProcessor. + *

    + * This overload uses default options for the processor host and event processor(s). + *

    + * The returned CompletableFuture completes when host initialization is finished. Initialization failures are + * reported by completing the future with an exception, so it is important to call get() on the future and handle + * any exceptions thrown. + * + * @param factory User-supplied event processor factory object. + * @return Future that completes when initialization is finished. + */ + public CompletableFuture registerEventProcessorFactory(IEventProcessorFactory factory) { + return registerEventProcessorFactory(factory, EventProcessorOptions.getDefaultOptions()); + } + + /** + * Register user-supplied event processor factory and start processing. + *

    + * This overload takes user-specified options. + *

    + * The returned CompletableFuture completes when host initialization is finished. Initialization failures are + * reported by completing the future with an exception, so it is important to call get() on the future and handle + * any exceptions thrown. + * + * @param factory User-supplied event processor factory object. + * @param processorOptions Options for the processor host and event processor(s). + * @return Future that completes when initialization is finished. + */ + public CompletableFuture registerEventProcessorFactory(IEventProcessorFactory factory, EventProcessorOptions processorOptions) { + if (this.unregistered != null) { + throw new IllegalStateException("Register cannot be called on an EventProcessorHost after unregister. Please create a new EventProcessorHost instance."); + } + if (this.hostContext.getEventProcessorFactory() != null) { + throw new IllegalStateException("Register has already been called on this EventProcessorHost"); + } + + this.hostContext.setEventProcessorFactory(factory); + this.hostContext.setEventProcessorOptions(processorOptions); + + if (this.executorService.isShutdown() || this.executorService.isTerminated()) { + TRACE_LOGGER.warn(this.hostContext.withHost("Calling registerEventProcessor/Factory after executor service has been shut down.")); + throw new RejectedExecutionException("EventProcessorHost executor service has been shut down"); + } + + if (this.initializeLeaseManager) { + try { + ((AzureStorageCheckpointLeaseManager) this.hostContext.getLeaseManager()).initialize(this.hostContext); + } catch (InvalidKeyException | URISyntaxException | StorageException e) { + TRACE_LOGGER.error(this.hostContext.withHost("Failure initializing default lease and checkpoint manager.")); + throw new RuntimeException("Failure initializing Storage lease manager", e); + } + } + + TRACE_LOGGER.info(this.hostContext.withHost("Starting event processing.")); + + return this.partitionManager.initialize(); + } + + /** + * Stop processing events and shut down this host instance. + * + * @return A CompletableFuture that completes when shutdown is finished. + */ + public CompletableFuture unregisterEventProcessor() { + TRACE_LOGGER.info(this.hostContext.withHost("Stopping event processing")); + + if (this.unregistered == null) { + // PartitionManager is created in constructor. If this object exists, then + // this.partitionManager is not null. + this.unregistered = this.partitionManager.stopPartitions(); + + // If we own the executor, stop it also. + // Owned executor is also created in constructor. + if (this.weOwnExecutor) { + this.unregistered = this.unregistered.thenRunAsync(() -> + { + // IMPORTANT: run this last stage in the default threadpool! + // If a task running in a threadpool waits for that threadpool to terminate, it's going to wait a long time... + + // It is OK to call shutdown() here even if threads are still running. + // Shutdown() causes the executor to stop accepting new tasks, but existing tasks will + // run to completion. The pool will terminate when all existing tasks finish. + // By this point all new tasks generated by the shutdown have been submitted. + this.executorService.shutdown(); + + try { + this.executorService.awaitTermination(10, TimeUnit.MINUTES); + } catch (InterruptedException e) { + throw new CompletionException(e); + } + }, ForkJoinPool.commonPool()); + } + } + + return this.unregistered; + } + + static class EventProcessorHostThreadPoolFactory implements ThreadFactory { + private static final AtomicInteger poolNumber = new AtomicInteger(1); + private final AtomicInteger threadNumber = new AtomicInteger(1); + private final ThreadGroup group; + private final String namePrefix; + private final String hostName; + private final String entityName; + private final String consumerGroupName; + + public EventProcessorHostThreadPoolFactory( + String hostName, + String entityName, + String consumerGroupName) { + this.hostName = hostName; + this.entityName = entityName; + this.consumerGroupName = consumerGroupName; + this.namePrefix = this.getNamePrefix(); + SecurityManager s = System.getSecurityManager(); + this.group = (s != null) ? s.getThreadGroup() : Thread.currentThread().getThreadGroup(); + } + + @Override + public Thread newThread(Runnable r) { + Thread t = new Thread(this.group, r, this.namePrefix + this.threadNumber.getAndIncrement(), 0); + t.setDaemon(false); + t.setPriority(Thread.NORM_PRIORITY); + t.setUncaughtExceptionHandler(new ThreadUncaughtExceptionHandler()); + return t; + } + + private String getNamePrefix() { + return String.format("[%s|%s|%s]-%s-", + this.entityName, this.consumerGroupName, this.hostName, poolNumber.getAndIncrement()); + } + + static class ThreadUncaughtExceptionHandler implements Thread.UncaughtExceptionHandler { + @Override + public void uncaughtException(Thread t, Throwable e) { + TRACE_LOGGER.warn("Uncaught exception occurred. Thread " + t.getName(), e); + } + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/EventProcessorHostActionStrings.java b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/EventProcessorHostActionStrings.java new file mode 100644 index 0000000000000..7d1335d5dfe4e --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/EventProcessorHostActionStrings.java @@ -0,0 +1,36 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +/*** + * The action string of ExceptionReceivedEventArts will be one of this. + * They describe what activity was taking place when the exception occurred. + */ +public final class EventProcessorHostActionStrings { + public final static String ACQUIRING_LEASE = "Acquiring Lease"; + public final static String CHECKING_CHECKPOINT_STORE = "Checking Checpoint Store Existence"; + public final static String CHECKING_LEASES = "Checking Leases"; + public final static String CHECKING_LEASE_STORE = "Checking Lease Store Existence"; + public final static String CLOSING_EVENT_PROCESSOR = "Closing Event Processor"; + public final static String CREATING_CHECKPOINTS = "Creating Checkpoint Holders"; + public final static String CREATING_CHECKPOINT_STORE = "Creating Checkpoint Store"; + public final static String CREATING_EVENT_HUB_CLIENT = "Creating Event Hub Client"; + public final static String CREATING_EVENT_PROCESSOR = "Creating Event Processor"; + public final static String CREATING_LEASES = "Creating Leases"; + public final static String CREATING_LEASE_STORE = "Creating Lease Store"; + public final static String DELETING_LEASE = "Deleting Lease"; + public final static String GETTING_CHECKPOINT = "Getting Checkpoint Details"; + public final static String GETTING_LEASE = "Getting Lease Details"; + public final static String INITIALIZING_STORES = "Initializing Stores"; + public final static String OPENING_EVENT_PROCESSOR = "Opening Event Processor"; + public final static String PARTITION_MANAGER_CLEANUP = "Partition Manager Cleanup"; + public final static String PARTITION_MANAGER_MAIN_LOOP = "Partition Manager Main Loop"; + public final static String RELEASING_LEASE = "Releasing Lease"; + public final static String RENEWING_LEASE = "Renewing Lease"; + public final static String STEALING_LEASE = "Stealing Lease"; + public final static String UPDATING_CHECKPOINT = "Updating Checkpoint"; + public final static String UPDATING_LEASE = "Updating Lease"; +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/EventProcessorOptions.java b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/EventProcessorOptions.java new file mode 100644 index 0000000000000..e5d5ac8cce5e7 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/EventProcessorOptions.java @@ -0,0 +1,242 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +import com.microsoft.azure.eventhubs.EventPosition; +import com.microsoft.azure.eventhubs.PartitionReceiver; + +import java.time.Duration; +import java.util.Locale; +import java.util.function.Consumer; +import java.util.function.Function; + +/*** + * Options affecting the behavior of the event processor host instance in general. + */ +public final class EventProcessorOptions { + private Consumer exceptionNotificationHandler = null; + private Boolean invokeProcessorAfterReceiveTimeout = false; + private boolean receiverRuntimeMetricEnabled = false; + private int maxBatchSize = 10; + private int prefetchCount = 300; + private Duration receiveTimeOut = Duration.ofMinutes(1); + private Function initialPositionProvider = (partitionId) -> { + return EventPosition.fromStartOfStream(); + }; + + public EventProcessorOptions() { + } + + /*** + * Returns an EventProcessorOptions instance with all options set to the default values. + * + * The default values are: + *

    +     * MaxBatchSize: 10
    +     * ReceiveTimeOut: 1 minute
    +     * PrefetchCount: 300
    +     * InitialPositionProvider: uses the last checkpoint, or START_OF_STREAM
    +     * InvokeProcessorAfterReceiveTimeout: false
    +     * ReceiverRuntimeMetricEnabled: false
    +     * 
    + * + * @return an EventProcessorOptions instance with all options set to the default values + */ + public static EventProcessorOptions getDefaultOptions() { + return new EventProcessorOptions(); + } + + /** + * Sets a handler which receives notification of general exceptions. + *

    + * Exceptions which occur while processing events from a particular Event Hub partition are delivered + * to the onError method of the event processor for that partition. This handler is called on occasions + * when there is no event processor associated with the throwing activity, or the event processor could + * not be created. + *

    + * The handler is not expected to do anything about the exception. If it is possible to recover, the + * event processor host instance will recover automatically. + * + * @param notificationHandler Handler which is called when an exception occurs. Set to null to stop handling. + */ + public void setExceptionNotification(Consumer notificationHandler) { + this.exceptionNotificationHandler = notificationHandler; + } + + /** + * Returns the maximum number of events that will be passed to one call to IEventProcessor.onEvents + * + * @return the maximum maximum number of events that will be passed to one call to IEventProcessor.onEvents + */ + public int getMaxBatchSize() { + return this.maxBatchSize; + } + + /** + * Sets the maximum number of events that will be passed to one call to IEventProcessor.onEvents + * + * @param maxBatchSize the maximum number of events that will be passed to one call to IEventProcessor.onEvents + */ + public void setMaxBatchSize(int maxBatchSize) { + this.maxBatchSize = maxBatchSize; + } + + /** + * Returns the timeout for receive operations. + * + * @return the timeout for receive operations + */ + public Duration getReceiveTimeOut() { + return this.receiveTimeOut; + } + + /** + * Sets the timeout for receive operations. + * + * @param receiveTimeOut new timeout for receive operations + */ + public void setReceiveTimeOut(Duration receiveTimeOut) { + this.receiveTimeOut = receiveTimeOut; + } + + /*** + * Returns the current prefetch count for the underlying event hub client. + * + * @return the current prefetch count for the underlying client + */ + public int getPrefetchCount() { + return this.prefetchCount; + } + + /*** + * Sets the prefetch count for the underlying event hub client. + * + * The default is 500. This controls how many events are received in advance. + * + * @param prefetchCount The new prefetch count. + */ + public void setPrefetchCount(int prefetchCount) { + if (prefetchCount < PartitionReceiver.MINIMUM_PREFETCH_COUNT) { + throw new IllegalArgumentException(String.format(Locale.US, + "PrefetchCount has to be above %s", PartitionReceiver.MINIMUM_PREFETCH_COUNT)); + } + + if (prefetchCount > PartitionReceiver.MAXIMUM_PREFETCH_COUNT) { + throw new IllegalArgumentException(String.format(Locale.US, + "PrefetchCount has to be below %s", PartitionReceiver.MAXIMUM_PREFETCH_COUNT)); + } + + this.prefetchCount = prefetchCount; + } + + /*** + * If there is no checkpoint for a partition, the initialPositionProvider function is used to determine + * the position at which to start receiving events for that partition. + * + * @return the current initial position provider function + */ + public Function getInitialPositionProvider() { + return this.initialPositionProvider; + } + + /*** + * Sets the function used to determine the position at which to start receiving events for a + * partition if there is no checkpoint for that partition. + * + * The provider function takes one argument, the partition id (a String), and returns the desired position. + * + * @param initialPositionProvider The new provider function. + */ + public void setInitialPositionProvider(Function initialPositionProvider) { + this.initialPositionProvider = initialPositionProvider; + } + + /*** + * Returns whether the EventProcessorHost will call IEventProcessor.onEvents() with an empty iterable + * when a receive timeout occurs (true) or not (false). + * + * Defaults to false. + * + * @return true if EventProcessorHost will call IEventProcessor.OnEvents on receive timeout, false otherwise + */ + public Boolean getInvokeProcessorAfterReceiveTimeout() { + return this.invokeProcessorAfterReceiveTimeout; + } + + /** + * Changes whether the EventProcessorHost will call IEventProcessor.onEvents() with an empty iterable + * when a receive timeout occurs (true) or not (false). + *

    + * The default is false (no call). + * + * @param invokeProcessorAfterReceiveTimeout the new value for what to do + */ + public void setInvokeProcessorAfterReceiveTimeout(Boolean invokeProcessorAfterReceiveTimeout) { + this.invokeProcessorAfterReceiveTimeout = invokeProcessorAfterReceiveTimeout; + } + + /** + * Knob to enable/disable runtime metric of the receiver. If this is set to true, + * the first parameter {@link com.microsoft.azure.eventprocessorhost.PartitionContext#runtimeInformation} of + * {@link IEventProcessor#onEvents(com.microsoft.azure.eventprocessorhost.PartitionContext, java.lang.Iterable)} will be populated. + *

    + * Enabling this knob will add 3 additional properties to all raw AMQP events received. + * + * @return the {@link boolean} indicating, whether, the runtime metric of the receiver was enabled + */ + public boolean getReceiverRuntimeMetricEnabled() { + return this.receiverRuntimeMetricEnabled; + } + + /** + * Knob to enable/disable runtime metric of the receiver. If this is set to true, + * the first parameter {@link com.microsoft.azure.eventprocessorhost.PartitionContext#runtimeInformation} of + * {@link IEventProcessor#onEvents(com.microsoft.azure.eventprocessorhost.PartitionContext, java.lang.Iterable)} will be populated. + *

    + * Enabling this knob will add 3 additional properties to all raw AMQP events received. + * + * @param value the {@link boolean} to indicate, whether, the runtime metric of the receiver should be enabled + */ + public void setReceiverRuntimeMetricEnabled(boolean value) { + this.receiverRuntimeMetricEnabled = value; + } + + void notifyOfException(String hostname, Exception exception, String action) { + notifyOfException(hostname, exception, action, ExceptionReceivedEventArgs.NO_ASSOCIATED_PARTITION); + } + + void notifyOfException(String hostname, Exception exception, String action, String partitionId) { + // Capture handler so it doesn't get set to null between test and use + Consumer handler = this.exceptionNotificationHandler; + if (handler != null) { + handler.accept(new ExceptionReceivedEventArgs(hostname, exception, action, partitionId)); + } + } + + /*** + * A prefab initial position provider that starts from the first event available. + * + * How to use this initial position provider: setInitialPositionProvider(new EventProcessorOptions.StartOfStreamInitialPositionProvider()); + */ + public class StartOfStreamInitialPositionProvider implements Function { + @Override + public EventPosition apply(String t) { + return EventPosition.fromStartOfStream(); + } + } + + /*** + * A prefab initial position provider that starts from the next event that becomes available. + * + * How to use this initial position provider: setInitialPositionProvider(new EventProcessorOptions.EndOfStreamInitialPositionProvider()); + */ + public class EndOfStreamInitialPositionProvider implements Function { + @Override + public EventPosition apply(String t) { + return EventPosition.fromEndOfStream(); + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/ExceptionReceivedEventArgs.java b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/ExceptionReceivedEventArgs.java new file mode 100644 index 0000000000000..78eb7311864a0 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/ExceptionReceivedEventArgs.java @@ -0,0 +1,68 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +/** + * Passed as an argument to the general exception handler that can be set via EventProcessorOptions. + */ +public final class ExceptionReceivedEventArgs { + public static final String NO_ASSOCIATED_PARTITION = "N/A"; + private final String hostname; + private final Exception exception; + private final String action; + private final String partitionId; + + ExceptionReceivedEventArgs(String hostname, Exception exception, String action) { + this(hostname, exception, action, ExceptionReceivedEventArgs.NO_ASSOCIATED_PARTITION); + } + + ExceptionReceivedEventArgs(String hostname, Exception exception, String action, String partitionId) { + this.hostname = hostname; + this.exception = exception; + this.action = action; + if ((partitionId == null) || partitionId.isEmpty()) { + throw new IllegalArgumentException("PartitionId must not be null or empty"); + } + this.partitionId = partitionId; + } + + /** + * Allows distinguishing the error source if multiple hosts in a single process. + * + * @return The name of the host that experienced the exception. + */ + public String getHostname() { + return this.hostname; + } + + /** + * Returns the exception that was thrown. + * + * @return The exception. + */ + public Exception getException() { + return this.exception; + } + + /** + * See EventProcessorHostActionString for a list of possible values. + * + * @return A short string that indicates what general activity threw the exception. + */ + public String getAction() { + return this.action; + } + + /** + * If the error is associated with a particular partition (for example, failed to open the event processor + * for the partition), the id of the partition. Otherwise, NO_ASSOCIATED_PARTITION. + * + * @return A partition id. + */ + public String getPartitionId() { + return this.partitionId; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/ExceptionWithAction.java b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/ExceptionWithAction.java new file mode 100644 index 0000000000000..22b8e406f0ef2 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/ExceptionWithAction.java @@ -0,0 +1,27 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +// This class is never thrown into user code, so it can be package private. +class ExceptionWithAction extends Exception { + private static final long serialVersionUID = 7480590197418857145L; + + private final String action; + + ExceptionWithAction(Throwable e, String action) { + super(e); + this.action = action; + } + + ExceptionWithAction(Throwable e, String message, String action) { + super(message, e); + this.action = action; + } + + String getAction() { + return this.action; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/HostContext.java b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/HostContext.java new file mode 100644 index 0000000000000..d18a18317f771 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/HostContext.java @@ -0,0 +1,130 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +import com.microsoft.azure.eventhubs.RetryPolicy; + +import java.util.concurrent.ScheduledExecutorService; + +final class HostContext { + final private ScheduledExecutorService executor; + + // Ideally we wouldn't need the host, but there are certain things which can be dynamically changed + // by the user via APIs on the host and which need to be exposed on the HostContext. Passing the + // call through is easier and safer than trying to keep two copies in sync. + final private EventProcessorHost host; + final private String hostName; + + final private String eventHubPath; + final private String consumerGroupName; + final private String eventHubConnectionString; + final private RetryPolicy retryPolicy; + + final private ILeaseManager leaseManager; + final private ICheckpointManager checkpointManager; + + // Cannot be final because it is not available at HostContext construction time. + private EventProcessorOptions eventProcessorOptions = null; + + // Cannot be final because it is not available at HostContext construction time. + private IEventProcessorFactory processorFactory = null; + + + HostContext(ScheduledExecutorService executor, + EventProcessorHost host, String hostName, + String eventHubPath, String consumerGroupName, String eventHubConnectionString, RetryPolicy retryPolicy, + ILeaseManager leaseManager, ICheckpointManager checkpointManager) { + this.executor = executor; + + this.host = host; + this.hostName = hostName; + + this.eventHubPath = eventHubPath; + this.consumerGroupName = consumerGroupName; + this.eventHubConnectionString = eventHubConnectionString; + this.retryPolicy = retryPolicy; + + this.leaseManager = leaseManager; + this.checkpointManager = checkpointManager; + } + + ScheduledExecutorService getExecutor() { + return this.executor; + } + + String getHostName() { + return this.hostName; + } + + String getEventHubPath() { + return this.eventHubPath; + } + + String getConsumerGroupName() { + return this.consumerGroupName; + } + + String getEventHubConnectionString() { + return this.eventHubConnectionString; + } + + RetryPolicy getRetryPolicy() { + return this.retryPolicy; + } + + ILeaseManager getLeaseManager() { + return this.leaseManager; + } + + ICheckpointManager getCheckpointManager() { + return this.checkpointManager; + } + + PartitionManagerOptions getPartitionManagerOptions() { + return this.host.getPartitionManagerOptions(); + } + + // May be null if called too early! Not set until register time. + // In particular, store initialization happens before this is set. + EventProcessorOptions getEventProcessorOptions() { + return this.eventProcessorOptions; + } + + void setEventProcessorOptions(EventProcessorOptions epo) { + this.eventProcessorOptions = epo; + } + + // May be null if called too early! Not set until register time. + // In particular, store initialization happens before this is set. + IEventProcessorFactory getEventProcessorFactory() { + return this.processorFactory; + } + + void setEventProcessorFactory(IEventProcessorFactory pf) { + this.processorFactory = pf; + } + + // + // Logging utility functions. They are here rather than on LoggingUtils because they + // make use of this.hostName. + // + + String withHost(String logMessage) { + return "host " + this.hostName + ": " + logMessage; + } + + String withHostAndPartition(String partitionId, String logMessage) { + return withHost(partitionId + ": " + logMessage); + } + + String withHostAndPartition(PartitionContext context, String logMessage) { + return withHostAndPartition(context.getPartitionId(), logMessage); + } + + String withHostAndPartition(BaseLease lease, String logMessage) { + return withHostAndPartition(lease.getPartitionId(), logMessage); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/ICheckpointManager.java b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/ICheckpointManager.java new file mode 100644 index 0000000000000..687d547901085 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/ICheckpointManager.java @@ -0,0 +1,97 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +import java.util.List; +import java.util.concurrent.CompletableFuture; + +/*** + * If you wish to have EventProcessorHost store checkpoints somewhere other than Azure Storage, + * you can write your own checkpoint manager using this interface. + * + * The Azure Storage managers use the same storage for both lease and checkpoints, so both + * interfaces are implemented by the same class. You are free to do the same thing if you have + * a unified store for both types of data. + * + * This interface does not specify initialization methods because we have no way of knowing what + * information your implementation will require. If your implementation needs initialization, you + * will have to initialize the instance before passing it to the EventProcessorHost constructor. + */ +public interface ICheckpointManager { + /*** + * Does the checkpoint store exist? + * + * The returned CompletableFuture completes with true if the checkpoint store exists or false if it + * does not. It completes exceptionally on error. + * + * @return CompletableFuture {@literal ->} true if it exists, false if not + */ + public CompletableFuture checkpointStoreExists(); + + /*** + * Create the checkpoint store if it doesn't exist. Do nothing if it does exist. + * + * @return CompletableFuture {@literal ->} null on success, completes exceptionally on error. + */ + public CompletableFuture createCheckpointStoreIfNotExists(); + + /** + * Deletes the checkpoint store. + * + * @return CompletableFuture {@literal ->} null on success, completes exceptionally on error. + */ + public CompletableFuture deleteCheckpointStore(); + + /*** + * Get the checkpoint data associated with the given partition. Could return null if no checkpoint has + * been created for that partition. + * + * @param partitionId Id of partition to get checkpoint info for. + * + * @return CompletableFuture {@literal ->} checkpoint info, or null. Completes exceptionally on error. + */ + public CompletableFuture getCheckpoint(String partitionId); + + /*** + * Creates the checkpoint HOLDERs for the given partitions. Does nothing for any checkpoint HOLDERs + * that already exist. + * + * The semantics of this are complicated because it is possible to use the same store for both + * leases and checkpoints (the Azure Storage implementation does so) and it is required to + * have a lease for every partition but it is not required to have a checkpoint for a partition. + * It is a valid scenario to never use checkpoints at all, so it is important for the store to + * distinguish between creating the structure(s) that will hold a checkpoint and actually creating + * a checkpoint (storing an offset/sequence number pair in the structure). + * + * @param partitionIds List of partitions to create checkpoint HOLDERs for. + * @return CompletableFuture {@literal ->} null on success, completes exceptionally on error. + */ + public CompletableFuture createAllCheckpointsIfNotExists(List partitionIds); + + /*** + * Update the checkpoint in the store with the offset/sequenceNumber in the provided checkpoint. + * + * The lease argument is necessary to make the Azure Storage implementation work correctly: the + * Azure Storage implementation stores the checkpoint as part of the lease and we cannot completely + * hide the connection between the two. If your implementation does not have this limitation, you are + * free to ignore the lease argument. + * + * @param lease lease for the partition to be checkpointed. + * @param checkpoint offset/sequenceNumber and partition id to update the store with. + * @return CompletableFuture {@literal ->} null on success. Completes exceptionally on error. + */ + public CompletableFuture updateCheckpoint(CompleteLease lease, Checkpoint checkpoint); + + /*** + * Delete the stored checkpoint data for the given partition. If there is no stored checkpoint for the + * given partition, that is treated as success. Deleting the checkpoint HOLDER is allowed but not required; + * your implementation is free to do whichever is more convenient. + * + * @param partitionId id of partition to delete checkpoint from store + * @return CompletableFuture {@literal ->} null on success. Completes exceptionally on error. + */ + public CompletableFuture deleteCheckpoint(String partitionId); +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/IEventProcessor.java b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/IEventProcessor.java new file mode 100644 index 0000000000000..4d52f262c6d36 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/IEventProcessor.java @@ -0,0 +1,74 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +import com.microsoft.azure.eventhubs.EventData; + + +/** + * Interface that must be implemented by event processor classes. + *

    + * Any given instance of an event processor class will only process events from one partition + * of one Event Hub. A PartitionContext is provided with each call to the event processor because + * some parameters could change, but it will always be the same partition. + *

    + * Although EventProcessorHost is multithreaded, calls to a given instance of an event processor + * class are serialized, except for onError(). onOpen() is called first, then onEvents() will be called zero or more + * times. When the event processor needs to be shut down, whether because there was a failure + * somewhere, or the lease for the partition has been lost, or because the entire processor host + * is being shut down, onClose() is called after the last onEvents() call returns. + *

    + * onError() could be called while onEvents() or onClose() is executing. No synchronization is attempted + * in order to avoid possibly deadlocking. + */ +public interface IEventProcessor { + /** + * Called by processor host to initialize the event processor. + *

    + * If onOpen fails, this event processor host instance will give up ownership of the partition. + * + * @param context Information about the partition that this event processor will process events from. + * @throws Exception to indicate failure. + */ + public void onOpen(PartitionContext context) throws Exception; + + /** + * Called by processor host to indicate that the event processor is being stopped. + *

    + * If onClose fails, the exception is reported to the general exception notification handler set via + * EventProcessorOptions, if any, but is otherwise ignored. + * + * @param context Information about the partition. + * @param reason Reason why the event processor is being stopped. + * @throws Exception to indicate failure. + */ + public void onClose(PartitionContext context, CloseReason reason) throws Exception; + + /** + * Called by the processor host when a batch of events has arrived. + *

    + * This is where the real work of the event processor is done. It is normally called when one + * or more events have arrived. If the EventProcessorHost instance was set up with an EventProcessorOptions + * on which setInvokeProcessorAfterReceiveTimeout(true) has been called, then when a receive times out, + * onEvents will be called with an empty iterable. By default this option is false and receive timeouts do not + * cause a call to this method. + * + * @param context Information about the partition. + * @param events The events to be processed. May be empty. + * @throws Exception to indicate failure. + */ + public void onEvents(PartitionContext context, Iterable events) throws Exception; + + /** + * Called when the underlying client experiences an error while receiving. EventProcessorHost will take + * care of recovering from the error and continuing to pump events, so no action is required from + * your code. This method is provided for informational purposes. + * + * @param context Information about the partition. + * @param error The error that occured. + */ + public void onError(PartitionContext context, Throwable error); +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/IEventProcessorFactory.java b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/IEventProcessorFactory.java new file mode 100644 index 0000000000000..2a5204a7f6dd0 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/IEventProcessorFactory.java @@ -0,0 +1,28 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + + +/** + * Interface that must be implemented by an event processor factory class. + *

    + * User-provided factories are needed if creating an event processor object requires more work than + * just a new with a parameterless constructor. + * + * @param The type of event processor objects produced by this factory, which must implement IEventProcessor + */ +public interface IEventProcessorFactory { + /** + * Called to create an event processor for the given partition. + *

    + * If it throws an exception, that causes this event processor host instance to give up ownership of the partition. + * + * @param context Information about the partition that the event processor will handle events from. + * @throws Exception to indicate failure. + * @return The event processor object. + */ + public T createEventProcessor(PartitionContext context) throws Exception; +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/ILeaseManager.java b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/ILeaseManager.java new file mode 100644 index 0000000000000..ac6c8831033a5 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/ILeaseManager.java @@ -0,0 +1,145 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +import java.util.List; +import java.util.concurrent.CompletableFuture; + +/*** + * If you wish to have EventProcessorHost store leases somewhere other than Azure Storage, + * you can write your own lease manager using this interface. + * + * The Azure Storage managers use the same storage for both lease and checkpoints, so both + * interfaces are implemented by the same class. You are free to do the same thing if you have + * a unified store for both types of data. + * + * This interface does not specify initialization methods because we have no way of knowing what + * information your implementation will require. If your implementation needs initialization, you + * will have to initialize the instance before passing it to the EventProcessorHost constructor. + */ +public interface ILeaseManager { + /** + * The lease duration is mostly internal to the lease manager implementation but may be needed + * by other parts of the event processor host. + * + * @return Duration of a lease before it expires unless renewed, specified in milliseconds. + */ + public int getLeaseDurationInMilliseconds(); + + /** + * Does the lease store exist? + *

    + * The returned CompletableFuture completes with true if the checkpoint store exists or false if it + * does not. It completes exceptionally on error. + * + * @return CompletableFuture {@literal ->} true if it exists, false if not + */ + public CompletableFuture leaseStoreExists(); + + /** + * Create the lease store if it does not exist, do nothing if it does exist. + * + * @return CompletableFuture {@literal ->} null on success, completes exceptionally on error. + */ + public CompletableFuture createLeaseStoreIfNotExists(); + + /** + * Deletes the lease store. + * + * @return CompletableFuture {@literal ->} null on success, completes exceptionally on error. + */ + public CompletableFuture deleteLeaseStore(); + + /** + * Returns the lease info for the given partition.. + * + * @param partitionId Get the lease info for this partition. + * @return CompletableFuture {@literal ->} Lease, completes exceptionally on error. + */ + public CompletableFuture getLease(String partitionId); + + /** + * Returns lightweight BaseLease for all leases, which includes name of owning host and whether lease + * is expired. An implementation is free to return CompleteLease or its own class derived from CompleteLease, + * but it is important that getAllLeases run as fast as possible. If it is faster to obtain only the + * information required for a BaseLease, we heavily recommend doing that. + * + * @return CompletableFuture {@literal ->} list of BaseLease, completes exceptionally on error. + */ + public CompletableFuture> getAllLeases(); + + + /** + * Create in the store a lease for each of the given partitions, if it does not exist. Do nothing for any + * lease which exists in the store already. + * + * @param partitionIds ids of partitions to create lease info for + * @return CompletableFuture {@literal ->} null on success, completes exceptionally on error + */ + public CompletableFuture createAllLeasesIfNotExists(List partitionIds); + + /** + * Delete the lease info for a partition from the store. If there is no stored lease for the given partition, + * that is treated as success. + * + * @param lease the currently existing lease info for the partition + * @return CompletableFuture {@literal ->} null on success, completes exceptionally on error. + */ + public CompletableFuture deleteLease(CompleteLease lease); + + /** + * Acquire the lease on the desired partition for this EventProcessorHost. + *

    + * Note that it is legal to acquire a lease that is currently owned by another host, which is called "stealing". + * Lease-stealing is how partitions are redistributed when additional hosts are started. + *

    + * The existing Azure Storage implementation can experience races between two host instances attempting to acquire or steal + * the lease at the same time. To avoid situations where two host instances both believe that they own the lease, acquisition + * can fail non-exceptionally by returning false and should do so when there is any doubt -- the worst that can happen is that + * no host instance owns the lease for a short time. This is qualitatively different from, for example, the underlying store + * throwing an access exception, which is an error and should complete exceptionally. + * + * @param lease Lease info for the desired partition + * @return CompletableFuture {@literal ->} true if the lease was acquired, false if not, completes exceptionally on error. + */ + public CompletableFuture acquireLease(CompleteLease lease); + + /** + * Renew a lease currently held by this host instance. + *

    + * If the lease has been taken by another host instance (either stolen or after expiration) or explicitly released, + * renewLease must return false. With the Azure Storage-based implementation, it IS possible to renew an expired lease + * that has not been taken by another host, so your implementation can allow that or not, whichever is convenient. If + * it does not, renewLease should return false. + * + * @param lease Lease to be renewed + * @return true if the lease was renewed, false as described above, completes exceptionally on error. + */ + public CompletableFuture renewLease(CompleteLease lease); + + /** + * Give up a lease currently held by this host. + *

    + * If the lease has expired or been taken by another host, releasing it is unnecessary but will succeed since the intent + * has been fulfilled. + * + * @param lease Lease to be given up + * @return CompletableFuture {@literal ->} null on success, completes exceptionally on error. + */ + public CompletableFuture releaseLease(CompleteLease lease); + + /** + * Update the store with the information in the provided lease. + *

    + * It is necessary to currently hold a lease in order to update it. If the lease has been stolen, or expired, or + * released, it cannot be updated. Lease manager implementations should renew the lease before performing the update to avoid lease + * expiration during the process. + * + * @param lease New lease info to be stored + * @return true if the update was successful, false if lease was lost and could not be updated, completes exceptionally on error. + */ + public CompletableFuture updateLease(CompleteLease lease); +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/InMemoryCheckpointManager.java b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/InMemoryCheckpointManager.java new file mode 100644 index 0000000000000..29ba81a38e5cc --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/InMemoryCheckpointManager.java @@ -0,0 +1,164 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; + +/*** + * An ICheckpointManager implementation based on an in-memory store. + * + * THIS CLASS IS PROVIDED AS A CONVENIENCE FOR TESTING ONLY. All data stored via this class is in memory + * only and not persisted in any way. In addition, it is only visible within the same process: multiple + * instances of EventProcessorHost in the same process will share the same in-memory store and checkpoints + * created by one will be visible to the others, but that is not true across processes. + * + * With an ordinary store, there is a clear and distinct line between the values that are persisted + * and the values that are live in memory. With an in-memory store, that line gets blurry. If we + * accidentally hand out a reference to the in-store object, then the calling code is operating on + * the "persisted" values without going through the manager and behavior will be very different. + * Hence, the implementation takes pains to distinguish between references to "live" and "persisted" + * checkpoints. + * + * To use this class, create a new instance and pass it to the EventProcessorHost constructor that takes + * ICheckpointManager as an argument. After the EventProcessorHost instance is constructed, be sure to + * call initialize() on this object before starting processing with EventProcessorHost.registerEventProcessor() + * or EventProcessorHost.registerEventProcessorFactory(). + */ +public class InMemoryCheckpointManager implements ICheckpointManager { + private static final Logger TRACE_LOGGER = LoggerFactory.getLogger(InMemoryCheckpointManager.class); + private HostContext hostContext; + + public InMemoryCheckpointManager() { + } + + // This object is constructed before the EventProcessorHost and passed as an argument to + // EventProcessorHost's constructor. So it has to get context info later. + public void initialize(HostContext hostContext) { + this.hostContext = hostContext; + } + + @Override + public CompletableFuture checkpointStoreExists() { + boolean exists = InMemoryCheckpointStore.singleton.existsMap(); + TRACE_LOGGER.debug(this.hostContext.withHost("checkpointStoreExists() " + exists)); + return CompletableFuture.completedFuture(exists); + } + + @Override + public CompletableFuture createCheckpointStoreIfNotExists() { + TRACE_LOGGER.debug(this.hostContext.withHost("createCheckpointStoreIfNotExists()")); + InMemoryCheckpointStore.singleton.initializeMap(); + return CompletableFuture.completedFuture(null); + } + + @Override + public CompletableFuture deleteCheckpointStore() { + TRACE_LOGGER.debug(this.hostContext.withHost("deleteCheckpointStore()")); + InMemoryCheckpointStore.singleton.deleteMap(); + return CompletableFuture.completedFuture(null); + } + + @Override + public CompletableFuture getCheckpoint(String partitionId) { + Checkpoint returnCheckpoint = null; + Checkpoint checkpointInStore = InMemoryCheckpointStore.singleton.getCheckpoint(partitionId); + if (checkpointInStore == null) { + TRACE_LOGGER.warn(this.hostContext.withHostAndPartition(partitionId, + "getCheckpoint() no existing Checkpoint")); + returnCheckpoint = null; + } else if (checkpointInStore.getSequenceNumber() == -1) { + // Uninitialized, so return null. + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(partitionId, "getCheckpoint() uninitalized")); + returnCheckpoint = null; + } else { + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(partitionId, + "getCheckpoint() found " + checkpointInStore.getOffset() + "//" + checkpointInStore.getSequenceNumber())); + returnCheckpoint = new Checkpoint(checkpointInStore); + } + return CompletableFuture.completedFuture(returnCheckpoint); + } + + @Override + public CompletableFuture createAllCheckpointsIfNotExists(List partitionIds) { + for (String id : partitionIds) { + Checkpoint checkpointInStore = InMemoryCheckpointStore.singleton.getCheckpoint(id); + if (checkpointInStore != null) { + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(id, + "createCheckpointIfNotExists() found existing checkpoint, OK")); + } else { + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(id, + "createCheckpointIfNotExists() creating new checkpoint")); + Checkpoint newStoreCheckpoint = new Checkpoint(id); + // This API actually creates the holder, not the checkpoint itself. In this implementation, we do create a Checkpoint object + // and put it in the store, but the values are set to indicate that it is not initialized. + newStoreCheckpoint.setOffset(null); + newStoreCheckpoint.setSequenceNumber(-1); + InMemoryCheckpointStore.singleton.setOrReplaceCheckpoint(newStoreCheckpoint); + } + } + return CompletableFuture.completedFuture(null); + } + + @Override + public CompletableFuture updateCheckpoint(CompleteLease lease, Checkpoint checkpoint) { + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(checkpoint.getPartitionId(), + "updateCheckpoint() " + checkpoint.getOffset() + "//" + checkpoint.getSequenceNumber())); + Checkpoint checkpointInStore = InMemoryCheckpointStore.singleton.getCheckpoint(checkpoint.getPartitionId()); + if (checkpointInStore != null) { + checkpointInStore.setOffset(checkpoint.getOffset()); + checkpointInStore.setSequenceNumber(checkpoint.getSequenceNumber()); + } else { + TRACE_LOGGER.warn(this.hostContext.withHostAndPartition(checkpoint.getPartitionId(), + "updateCheckpoint() can't find checkpoint")); + } + return CompletableFuture.completedFuture(null); + } + + @Override + public CompletableFuture deleteCheckpoint(String partitionId) { + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(partitionId, "deleteCheckpoint()")); + InMemoryCheckpointStore.singleton.removeCheckpoint(partitionId); + return CompletableFuture.completedFuture(null); + } + + + private static class InMemoryCheckpointStore { + final static InMemoryCheckpointStore singleton = new InMemoryCheckpointStore(); + + private ConcurrentHashMap inMemoryCheckpointsPrivate = null; + + synchronized boolean existsMap() { + return (this.inMemoryCheckpointsPrivate != null); + } + + synchronized void initializeMap() { + if (this.inMemoryCheckpointsPrivate == null) { + this.inMemoryCheckpointsPrivate = new ConcurrentHashMap(); + } + } + + synchronized void deleteMap() { + this.inMemoryCheckpointsPrivate = null; + } + + synchronized Checkpoint getCheckpoint(String partitionId) { + return this.inMemoryCheckpointsPrivate.get(partitionId); + } + + synchronized void setOrReplaceCheckpoint(Checkpoint newCheckpoint) { + this.inMemoryCheckpointsPrivate.put(newCheckpoint.getPartitionId(), newCheckpoint); + } + + synchronized void removeCheckpoint(String partitionId) { + this.inMemoryCheckpointsPrivate.remove(partitionId); + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/InMemoryLeaseManager.java b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/InMemoryLeaseManager.java new file mode 100644 index 0000000000000..3e2a0332f6d74 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/InMemoryLeaseManager.java @@ -0,0 +1,416 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.*; + +/*** + * An ILeaseManager implementation based on an in-memory store. + * + * THIS CLASS IS PROVIDED AS A CONVENIENCE FOR TESTING ONLY. All data stored via this class is in memory + * only and not persisted in any way. In addition, it is only visible within the same process: multiple + * instances of EventProcessorHost in the same process will share the same in-memory store and leases + * created by one will be visible to the others, but that is not true across processes. + * + * With an ordinary store, there is a clear and distinct line between the values that are persisted + * and the values that are live in memory. With an in-memory store, that line gets blurry. If we + * accidentally hand out a reference to the in-store object, then the calling code is operating on + * the "persisted" values without going through the manager and behavior will be very different. + * Hence, the implementation takes pains to distinguish between references to "live" and "persisted" + * checkpoints. + * + * To use this class, create a new instance and pass it to the EventProcessorHost constructor that takes + * ILeaseManager as an argument. After the EventProcessorHost instance is constructed, be sure to + * call initialize() on this object before starting processing with EventProcessorHost.registerEventProcessor() + * or EventProcessorHost.registerEventProcessorFactory(). + */ +public class InMemoryLeaseManager implements ILeaseManager { + private final static Logger TRACE_LOGGER = LoggerFactory.getLogger(InMemoryLeaseManager.class); + private HostContext hostContext; + private long millisecondsLatency = 0; + + public InMemoryLeaseManager() { + } + + // This object is constructed before the EventProcessorHost and passed as an argument to + // EventProcessorHost's constructor. So it has to get context info later. + public void initialize(HostContext hostContext) { + this.hostContext = hostContext; + } + + public void setLatency(long milliseconds) { + this.millisecondsLatency = milliseconds; + } + + private void latency(String caller) { + if (this.millisecondsLatency > 0) { + try { + //TRACE_LOGGER.info("sleep " + caller); + Thread.sleep(this.millisecondsLatency); + } catch (InterruptedException e) { + // Don't care + TRACE_LOGGER.info("sleepFAIL " + caller); + } + } + } + + @Override + public int getLeaseDurationInMilliseconds() { + return this.hostContext.getPartitionManagerOptions().getLeaseDurationInSeconds() * 1000; + } + + @Override + public CompletableFuture leaseStoreExists() { + boolean exists = InMemoryLeaseStore.singleton.existsMap(); + latency("leaseStoreExists"); + TRACE_LOGGER.debug(this.hostContext.withHost("leaseStoreExists() " + exists)); + return CompletableFuture.completedFuture(exists); + } + + @Override + public CompletableFuture createLeaseStoreIfNotExists() { + TRACE_LOGGER.debug(this.hostContext.withHost("createLeaseStoreIfNotExists()")); + InMemoryLeaseStore.singleton.initializeMap(getLeaseDurationInMilliseconds()); + latency("createLeaseStoreIfNotExists"); + return CompletableFuture.completedFuture(null); + } + + @Override + public CompletableFuture deleteLeaseStore() { + TRACE_LOGGER.debug(this.hostContext.withHost("deleteLeaseStore()")); + InMemoryLeaseStore.singleton.deleteMap(); + latency("deleteLeaseStore"); + return CompletableFuture.completedFuture(null); + } + + @Override + public CompletableFuture getLease(String partitionId) { + TRACE_LOGGER.debug(this.hostContext.withHost("getLease()")); + latency("getLease"); + InMemoryLease leaseInStore = InMemoryLeaseStore.singleton.getLease(partitionId); + return CompletableFuture.completedFuture(new InMemoryLease(leaseInStore)); + } + + @Override + public CompletableFuture> getAllLeases() { + ArrayList infos = new ArrayList(); + for (String id : InMemoryLeaseStore.singleton.getPartitionIds()) { + InMemoryLease leaseInStore = InMemoryLeaseStore.singleton.getLease(id); + infos.add(new BaseLease(id, leaseInStore.getOwner(), !leaseInStore.isExpiredSync())); + } + latency("getAllLeasesStateInfo"); + return CompletableFuture.completedFuture(infos); + } + + @Override + public CompletableFuture createAllLeasesIfNotExists(List partitionIds) { + ArrayList> createFutures = new ArrayList>(); + + // Implemented like this to provide an experience more similar to lease creation in the Storage-based manager. + for (String id : partitionIds) { + final String workingId = id; + CompletableFuture oneCreate = CompletableFuture.supplyAsync(() -> { + InMemoryLease leaseInStore = InMemoryLeaseStore.singleton.getLease(workingId); + InMemoryLease returnLease = null; + if (leaseInStore != null) { + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(workingId, + "createLeaseIfNotExists() found existing lease, OK")); + returnLease = new InMemoryLease(leaseInStore); + } else { + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(workingId, + "createLeaseIfNotExists() creating new lease")); + InMemoryLease newStoreLease = new InMemoryLease(workingId); + InMemoryLeaseStore.singleton.setOrReplaceLease(newStoreLease); + returnLease = new InMemoryLease(newStoreLease); + } + latency("createLeaseIfNotExists " + workingId); + return returnLease; + }, this.hostContext.getExecutor()); + createFutures.add(oneCreate); + } + + CompletableFuture dummy[] = new CompletableFuture[createFutures.size()]; + return CompletableFuture.allOf(createFutures.toArray(dummy)); + } + + @Override + public CompletableFuture deleteLease(CompleteLease lease) { + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(lease, "deleteLease()")); + InMemoryLeaseStore.singleton.removeLease((InMemoryLease) lease); + latency("deleteLease " + lease.getPartitionId()); + return CompletableFuture.completedFuture(null); + } + + @Override + public CompletableFuture acquireLease(CompleteLease lease) { + InMemoryLease leaseToAcquire = (InMemoryLease) lease; + + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(leaseToAcquire, "acquireLease()")); + + boolean retval = true; + InMemoryLease leaseInStore = InMemoryLeaseStore.singleton.getLease(leaseToAcquire.getPartitionId()); + if (leaseInStore != null) { + InMemoryLease wasUnowned = InMemoryLeaseStore.singleton.atomicAquireUnowned(leaseToAcquire.getPartitionId(), this.hostContext.getHostName()); + if (wasUnowned != null) { + // atomicAcquireUnowned already set ownership of the persisted lease, just update the live lease. + leaseToAcquire.setOwner(this.hostContext.getHostName()); + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(leaseToAcquire, + "acquireLease() acquired lease")); + leaseInStore = wasUnowned; + leaseToAcquire.setExpirationTime(leaseInStore.getExpirationTime()); + } else { + if (leaseInStore.isOwnedBy(this.hostContext.getHostName())) { + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(leaseToAcquire, + "acquireLease() already hold lease")); + } else { + String oldOwner = leaseInStore.getOwner(); + // Make change in both persisted lease and live lease! + InMemoryLeaseStore.singleton.stealLease(leaseInStore, this.hostContext.getHostName()); + leaseToAcquire.setOwner(this.hostContext.getHostName()); + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(leaseToAcquire, + "acquireLease() stole lease from " + oldOwner)); + } + long newExpiration = System.currentTimeMillis() + getLeaseDurationInMilliseconds(); + // Make change in both persisted lease and live lease! + leaseInStore.setExpirationTime(newExpiration); + leaseToAcquire.setExpirationTime(newExpiration); + } + } else { + TRACE_LOGGER.warn(this.hostContext.withHostAndPartition(leaseToAcquire, + "acquireLease() can't find lease")); + retval = false; + } + + latency("acquireLease " + lease.getPartitionId()); + return CompletableFuture.completedFuture(retval); + } + + // Real partition pumps get "notified" when another host has stolen their lease because the receiver throws + // a ReceiverDisconnectedException. It doesn't matter how many hosts try to steal the lease at the same time, + // only one will end up with it and that one will kick the others off via the exclusivity of epoch receivers. + // This mechanism simulates that for dummy partition pumps used in testing. If expectedOwner does not currently + // own the lease for the given partition, then notifier is called immediately, otherwise it is called whenever + // ownership of the lease changes. + public void notifyOnSteal(String expectedOwner, String partitionId, Callable notifier) { + InMemoryLeaseStore.singleton.notifyOnSteal(expectedOwner, partitionId, notifier); + } + + @Override + public CompletableFuture renewLease(CompleteLease lease) { + InMemoryLease leaseToRenew = (InMemoryLease) lease; + + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(leaseToRenew, "renewLease()")); + + boolean retval = true; + InMemoryLease leaseInStore = InMemoryLeaseStore.singleton.getLease(leaseToRenew.getPartitionId()); + if (leaseInStore != null) { + // MATCH BEHAVIOR OF AzureStorageCheckpointLeaseManager: + // Renewing a lease that has expired succeeds unless some other host has grabbed it already. + // So don't check expiration, just ownership. + if (leaseInStore.isOwnedBy(this.hostContext.getHostName())) { + long newExpiration = System.currentTimeMillis() + getLeaseDurationInMilliseconds(); + // Make change in both persisted lease and live lease! + leaseInStore.setExpirationTime(newExpiration); + leaseToRenew.setExpirationTime(newExpiration); + } else { + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(leaseToRenew, + "renewLease() not renewed because we don't own lease")); + retval = false; + } + } else { + TRACE_LOGGER.warn(this.hostContext.withHostAndPartition(leaseToRenew, + "renewLease() can't find lease")); + retval = false; + } + + latency("renewLease " + lease.getPartitionId()); + return CompletableFuture.completedFuture(retval); + } + + @Override + public CompletableFuture releaseLease(CompleteLease lease) { + InMemoryLease leaseToRelease = (InMemoryLease) lease; + + CompletableFuture retval = CompletableFuture.completedFuture(null); + + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(leaseToRelease, "releaseLease()")); + + InMemoryLease leaseInStore = InMemoryLeaseStore.singleton.getLease(leaseToRelease.getPartitionId()); + if (leaseInStore != null) { + if (!leaseInStore.isExpiredSync() && leaseInStore.isOwnedBy(this.hostContext.getHostName())) { + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(leaseToRelease, "releaseLease() released OK")); + // Make change in both persisted lease and live lease! + leaseInStore.setOwner(""); + leaseToRelease.setOwner(""); + leaseInStore.setExpirationTime(0); + leaseToRelease.setExpirationTime(0); + } else { + // Lease was lost, intent achieved. + } + } else { + TRACE_LOGGER.warn(this.hostContext.withHostAndPartition(leaseToRelease, "releaseLease() can't find lease in store")); + retval = new CompletableFuture(); + retval.completeExceptionally(new CompletionException(new RuntimeException("releaseLease can't find lease in store for " + leaseToRelease.getPartitionId()))); + } + latency("releaseLease " + lease.getPartitionId()); + return retval; + } + + @Override + public CompletableFuture updateLease(CompleteLease lease) { + InMemoryLease leaseToUpdate = (InMemoryLease) lease; + + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(leaseToUpdate, "updateLease()")); + + // Renew lease first so it doesn't expire in the middle. + return renewLease(leaseToUpdate).thenApply((retval) -> + { + if (retval) { + InMemoryLease leaseInStore = InMemoryLeaseStore.singleton.getLease(leaseToUpdate.getPartitionId()); + if (leaseInStore != null) { + if (!leaseInStore.isExpiredSync() && leaseInStore.isOwnedBy(this.hostContext.getHostName())) { + // We are updating with values already in the live lease, so only need to set on the persisted lease. + leaseInStore.setEpoch(leaseToUpdate.getEpoch()); + // Don't copy expiration time, that is managed directly by Acquire/Renew/Release + } else { + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(leaseToUpdate, + "updateLease() not updated because we don't own lease")); + retval = false; + } + } else { + TRACE_LOGGER.warn(this.hostContext.withHostAndPartition(leaseToUpdate, + "updateLease() can't find lease")); + retval = false; + } + } + latency("updateLease " + lease.getPartitionId()); + return retval; + }); + } + + + private static class InMemoryLeaseStore { + final static InMemoryLeaseStore singleton = new InMemoryLeaseStore(); + private static int leaseDurationInMilliseconds; + + private ConcurrentHashMap inMemoryLeasesPrivate = null; + private ConcurrentHashMap> notifiers = new ConcurrentHashMap>(); + + synchronized boolean existsMap() { + return (this.inMemoryLeasesPrivate != null); + } + + synchronized void initializeMap(int leaseDurationInMilliseconds) { + if (this.inMemoryLeasesPrivate == null) { + this.inMemoryLeasesPrivate = new ConcurrentHashMap(); + } + InMemoryLeaseStore.leaseDurationInMilliseconds = leaseDurationInMilliseconds; + } + + synchronized void deleteMap() { + this.inMemoryLeasesPrivate = null; + } + + synchronized InMemoryLease getLease(String partitionId) { + return this.inMemoryLeasesPrivate.get(partitionId); + } + + synchronized List getPartitionIds() { + ArrayList ids = new ArrayList(); + this.inMemoryLeasesPrivate.keySet().forEach((key) -> + { + ids.add(key); + }); + return ids; + } + + synchronized InMemoryLease atomicAquireUnowned(String partitionId, String newOwner) { + InMemoryLease leaseInStore = getLease(partitionId); + if (leaseInStore.isExpiredSync() || (leaseInStore.getOwner() == null) || leaseInStore.getOwner().isEmpty()) { + leaseInStore.setOwner(newOwner); + leaseInStore.setExpirationTime(System.currentTimeMillis() + InMemoryLeaseStore.leaseDurationInMilliseconds); + } else { + // Return null if it was already owned + leaseInStore = null; + } + return leaseInStore; + } + + synchronized void notifyOnSteal(String expectedOwner, String partitionId, Callable notifier) { + InMemoryLease leaseInStore = getLease(partitionId); + if (!leaseInStore.isOwnedBy(expectedOwner)) { + // Already stolen. + try { + notifier.call(); + } catch (Exception e) { + } + } else { + this.notifiers.put(partitionId, notifier); + } + } + + synchronized void stealLease(InMemoryLease stealee, String newOwner) { + stealee.setOwner(newOwner); + Callable notifier = this.notifiers.get(stealee.getPartitionId()); + if (notifier != null) { + try { + notifier.call(); + } catch (Exception e) { + } + } + } + + synchronized void setOrReplaceLease(InMemoryLease newLease) { + this.inMemoryLeasesPrivate.put(newLease.getPartitionId(), newLease); + } + + synchronized void removeLease(InMemoryLease goneLease) { + this.inMemoryLeasesPrivate.remove(goneLease.getPartitionId()); + } + } + + + private static class InMemoryLease extends CompleteLease { + private final static Logger TRACE_LOGGER = LoggerFactory.getLogger(InMemoryLease.class); + private long expirationTimeMillis = 0; + + InMemoryLease(String partitionId) { + super(partitionId); + this.epoch = 0; + } + + InMemoryLease(InMemoryLease source) { + super(source); + this.expirationTimeMillis = source.expirationTimeMillis; + this.epoch = source.epoch; + } + + long getExpirationTime() { + return this.expirationTimeMillis; + } + + void setExpirationTime(long expireAtMillis) { + this.expirationTimeMillis = expireAtMillis; + } + + public boolean isExpiredSync() { + boolean hasExpired = (System.currentTimeMillis() >= this.expirationTimeMillis); + if (hasExpired) { + // CHANGE TO MATCH BEHAVIOR OF AzureStorageCheckpointLeaseManager + // An expired lease can be renewed by the previous owner. In order to implement that behavior for + // InMemory, the owner field has to remain unchanged. + //setOwner(""); + } + TRACE_LOGGER.debug("isExpired(" + this.getPartitionId() + (hasExpired ? ") expired " : ") leased ") + (this.expirationTimeMillis - System.currentTimeMillis())); + return hasExpired; + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/LeaseLostException.java b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/LeaseLostException.java new file mode 100644 index 0000000000000..ed9482db73592 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/LeaseLostException.java @@ -0,0 +1,27 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +public class LeaseLostException extends Exception { + private static final long serialVersionUID = -4625001822439809869L; + + private final BaseLease lease; + + LeaseLostException(BaseLease lease, Throwable cause) { + super(null, cause); + this.lease = lease; + } + + LeaseLostException(BaseLease lease, String message) { + super(message, null); + this.lease = lease; + } + + // We don't want to expose Lease to the public. + public String getPartitionId() { + return this.lease.getPartitionId(); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/LoggingUtils.java b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/LoggingUtils.java new file mode 100644 index 0000000000000..e26d9e8778bd1 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/LoggingUtils.java @@ -0,0 +1,75 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +import java.util.concurrent.CompletionException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ThreadPoolExecutor; + +/** + * Centralize log message generation + */ +public final class LoggingUtils { + static CompletionException wrapException(Throwable e, String action) { + return new CompletionException(new ExceptionWithAction(e, action)); + } + + static CompletionException wrapExceptionWithMessage(Throwable e, String message, String action) { + return new CompletionException(new ExceptionWithAction(e, message, action)); + } + + // outAction can be null if you don't care about any action string + static Throwable unwrapException(Throwable wrapped, StringBuilder outAction) { + Throwable unwrapped = wrapped; + + while ((unwrapped instanceof ExecutionException) || (unwrapped instanceof CompletionException) || + (unwrapped instanceof ExceptionWithAction)) { + if ((unwrapped instanceof ExceptionWithAction) && (outAction != null)) { + // Save the action string from an ExceptionWithAction, if desired. + outAction.append(((ExceptionWithAction) unwrapped).getAction()); + } + + if ((unwrapped.getCause() != null) && (unwrapped.getCause() instanceof Exception)) { + unwrapped = (Exception) unwrapped.getCause(); + } else { + break; + } + } + + return unwrapped; + } + + static String threadPoolStatusReport(String hostName, ScheduledExecutorService threadPool) { + String report = ""; + + if (threadPool instanceof ThreadPoolExecutor) { + ThreadPoolExecutor pool = (ThreadPoolExecutor) threadPool; + + StringBuilder builder = new StringBuilder(); + builder.append("Thread pool settings: core: "); + builder.append(pool.getCorePoolSize()); + builder.append(" active: "); + builder.append(pool.getActiveCount()); + builder.append(" current: "); + builder.append(pool.getPoolSize()); + builder.append(" largest: "); + builder.append(pool.getLargestPoolSize()); + builder.append(" max: "); + builder.append(pool.getMaximumPoolSize()); + builder.append(" policy: "); + builder.append(pool.getRejectedExecutionHandler().getClass().toString()); + builder.append(" queue avail: "); + builder.append(pool.getQueue().remainingCapacity()); + + report = builder.toString(); + } else { + report = "Cannot report on thread pool of type " + threadPool.getClass().toString(); + } + + return report; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionContext.java b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionContext.java new file mode 100644 index 0000000000000..601baab2d469e --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionContext.java @@ -0,0 +1,202 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +import com.microsoft.azure.eventhubs.EventData; +import com.microsoft.azure.eventhubs.EventPosition; +import com.microsoft.azure.eventhubs.ReceiverRuntimeInformation; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.concurrent.CompletableFuture; +import java.util.function.Function; + +/*** + * PartitionContext is used to provide partition-related information to the methods of IEventProcessor, + * particularly onEvents where the user's event-processing logic lives. It also allows the user to + * persist checkpoints for the partition, which determine where event processing will begin if the + * event processor for that partition must be restarted, such as if ownership of the partition moves + * from one event processor host instance to another. + */ +public class PartitionContext { + private static final Logger TRACE_LOGGER = LoggerFactory.getLogger(PartitionContext.class); + private final HostContext hostContext; + private final String partitionId; + private CompleteLease lease; + private String offset = null; + private long sequenceNumber = 0; + private ReceiverRuntimeInformation runtimeInformation; + + PartitionContext(HostContext hostContext, String partitionId) { + this.hostContext = hostContext; + this.partitionId = partitionId; + + this.runtimeInformation = new ReceiverRuntimeInformation(partitionId); + } + + /*** + * Get the name of the consumer group that is being received from. + * + * @return consumer group name + */ + public String getConsumerGroupName() { + return this.hostContext.getConsumerGroupName(); + } + + /*** + * Get the path of the event hub that is being received from. + * + * @return event hub path + */ + public String getEventHubPath() { + return this.hostContext.getEventHubPath(); + } + + /*** + * Get the name of the event processor host instance. + * + * @return event processor host instance name + */ + public String getOwner() { + return this.lease.getOwner(); + } + + /*** + * If receiver runtime metrics have been enabled in EventProcessorHost, this method + * gets the metrics as they come in. + * + * @return See ReceiverRuntimeInformation. + */ + public ReceiverRuntimeInformation getRuntimeInformation() { + return this.runtimeInformation; + } + + void setRuntimeInformation(ReceiverRuntimeInformation value) { + this.runtimeInformation = value; + } + + CompleteLease getLease() { + return this.lease; + } + + // Unlike other properties which are immutable after creation, the lease is updated dynamically and needs a setter. + void setLease(CompleteLease lease) { + this.lease = lease; + } + + void setOffsetAndSequenceNumber(EventData event) { + if (event.getSystemProperties().getSequenceNumber() >= this.sequenceNumber) { + this.offset = event.getSystemProperties().getOffset(); + this.sequenceNumber = event.getSystemProperties().getSequenceNumber(); + } else { + TRACE_LOGGER.info(this.hostContext.withHostAndPartition(this.partitionId, + "setOffsetAndSequenceNumber(" + event.getSystemProperties().getOffset() + "//" + + event.getSystemProperties().getSequenceNumber() + ") would move backwards, ignoring")); + } + } + + /*** + * Get the id of the partition being received from. + * + * @return partition id + */ + public String getPartitionId() { + return this.partitionId; + } + + // Returns a String (offset) or Instant (timestamp). + CompletableFuture getInitialOffset() { + return this.hostContext.getCheckpointManager().getCheckpoint(this.partitionId) + .thenApply((startingCheckpoint) -> + { + return checkpointToOffset(startingCheckpoint); + }); + } + + EventPosition checkpointToOffset(Checkpoint startingCheckpoint) { + EventPosition startAt = null; + if (startingCheckpoint == null) { + // No checkpoint was ever stored. Use the initialOffsetProvider instead. + Function initialPositionProvider = this.hostContext.getEventProcessorOptions().getInitialPositionProvider(); + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(this.partitionId, "Calling user-provided initial position provider")); + startAt = initialPositionProvider.apply(this.partitionId); + // Leave this.offset as null. The initialPositionProvider cannot provide enough information to write a valid checkpoint: + // at most if will give one of offset or sequence number, and if it is a starting time then it doesn't have either. + TRACE_LOGGER.info(this.hostContext.withHostAndPartition(this.partitionId, "Initial position provided: " + startAt)); + } else { + // Checkpoint is valid, use it. + this.offset = startingCheckpoint.getOffset(); + startAt = EventPosition.fromOffset(this.offset); + this.sequenceNumber = startingCheckpoint.getSequenceNumber(); + TRACE_LOGGER.info(this.hostContext.withHostAndPartition(this.partitionId, "Retrieved starting offset " + this.offset + "//" + this.sequenceNumber)); + } + + return startAt; + } + + /** + * Writes the position of the last event in the current batch to the checkpoint store via the checkpoint manager. + *

    + * It is important to check the result in order to detect failures. + *

    + * If receiving started from a user-provided EventPosition and no events have been received yet, + * then this will fail. (This scenario is possible when invoke-after-receive-timeout has been set + * in EventProcessorOptions.) + * + * @return CompletableFuture {@literal ->} null when the checkpoint has been persisted successfully, completes exceptionally on error. + */ + public CompletableFuture checkpoint() { + CompletableFuture result = null; + if (this.offset == null) { + result = new CompletableFuture(); + result.completeExceptionally(new RuntimeException("Cannot checkpoint until at least one event has been received on this partition")); + } else { + Checkpoint capturedCheckpoint = new Checkpoint(this.partitionId, this.offset, this.sequenceNumber); + result = checkpoint(capturedCheckpoint); + } + return result; + } + + /** + * Writes the position of the provided EventData instance to the checkpoint store via the checkpoint manager. + *

    + * It is important to check the result in order to detect failures. + * + * @param event A received EventData + * @return CompletableFuture {@literal ->} null when the checkpoint has been persisted successfully, completes exceptionally on error. + */ + public CompletableFuture checkpoint(EventData event) { + CompletableFuture result = null; + if (event == null) { + result = new CompletableFuture(); + result.completeExceptionally(new IllegalArgumentException("Cannot checkpoint with null EventData")); + } else { + result = checkpoint(new Checkpoint(this.partitionId, event.getSystemProperties().getOffset(), event.getSystemProperties().getSequenceNumber())); + } + return result; + } + + /** + * Writes the position of the provided Checkpoint instance to the checkpoint store via the checkpoint manager. + * + * It is important to check the result in order to detect failures. + * + * @param checkpoint a checkpoint + * @return CompletableFuture {@literal ->} null when the checkpoint has been persisted successfully, completes exceptionally on error. + */ + public CompletableFuture checkpoint(Checkpoint checkpoint) { + CompletableFuture result = null; + if (checkpoint == null) { + result = new CompletableFuture(); + result.completeExceptionally(new IllegalArgumentException("Cannot checkpoint with null Checkpoint")); + } else { + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(checkpoint.getPartitionId(), + "Saving checkpoint: " + checkpoint.getOffset() + "//" + checkpoint.getSequenceNumber())); + result = this.hostContext.getCheckpointManager().updateCheckpoint(this.lease, checkpoint); + } + return result; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionManager.java b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionManager.java new file mode 100644 index 0000000000000..ffad0a9de9305 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionManager.java @@ -0,0 +1,334 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +import com.microsoft.azure.eventhubs.EventHubClient; +import com.microsoft.azure.eventhubs.EventHubException; +import com.microsoft.azure.eventhubs.EventHubRuntimeInformation; +import com.microsoft.azure.eventhubs.IllegalEntityException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Arrays; +import java.util.concurrent.*; + +class PartitionManager extends Closable { + private static final Logger TRACE_LOGGER = LoggerFactory.getLogger(PartitionManager.class); + // Protected instead of private for testability + protected final HostContext hostContext; + final private Object scanFutureSynchronizer = new Object(); + private final int retryMax = 5; + protected PumpManager pumpManager = null; + protected volatile String partitionIds[] = null; + private ScheduledFuture scanFuture = null; + + PartitionManager(HostContext hostContext) { + super(null); + this.hostContext = hostContext; + } + + CompletableFuture cachePartitionIds() { + CompletableFuture retval = null; + + if (this.partitionIds != null) { + retval = CompletableFuture.completedFuture(null); + } else { + // This try-catch is necessary because EventHubClient.create can directly throw + // EventHubException or IOException, in addition to whatever failures may occur when the result of + // the CompletableFuture is evaluated. + try { + final CompletableFuture cleanupFuture = new CompletableFuture(); + + // Stage 0A: get EventHubClient for the event hub + retval = EventHubClient.create(this.hostContext.getEventHubConnectionString(), this.hostContext.getRetryPolicy(), this.hostContext.getExecutor()) + // Stage 0B: set up a way to close the EventHubClient when we're done + .thenApplyAsync((ehClient) -> + { + final EventHubClient saveForCleanupClient = ehClient; + cleanupFuture.thenComposeAsync((empty) -> saveForCleanupClient.close(), this.hostContext.getExecutor()); + return ehClient; + }, this.hostContext.getExecutor()) + // Stage 1: use the client to get runtime info for the event hub + .thenComposeAsync((ehClient) -> ehClient.getRuntimeInformation(), this.hostContext.getExecutor()) + // Stage 2: extract the partition ids from the runtime info or throw on null (timeout) + .thenAcceptAsync((EventHubRuntimeInformation ehInfo) -> + { + if (ehInfo != null) { + this.partitionIds = ehInfo.getPartitionIds(); + + TRACE_LOGGER.info(this.hostContext.withHost("Eventhub " + this.hostContext.getEventHubPath() + " count of partitions: " + ehInfo.getPartitionCount())); + for (String id : this.partitionIds) { + TRACE_LOGGER.info(this.hostContext.withHost("Found partition with id: " + id)); + } + } else { + throw new CompletionException(new TimeoutException("getRuntimeInformation returned null")); + } + }, this.hostContext.getExecutor()) + // Stage 3: RUN REGARDLESS OF EXCEPTIONS -- if there was an error, wrap it in IllegalEntityException and throw + .handleAsync((empty, e) -> + { + cleanupFuture.complete(null); // trigger client cleanup + if (e != null) { + Throwable notifyWith = e; + if (e instanceof CompletionException) { + notifyWith = e.getCause(); + } + throw new CompletionException(new IllegalEntityException("Failure getting partition ids for event hub", notifyWith)); + } + return null; + }, this.hostContext.getExecutor()); + } catch (EventHubException | IOException e) { + retval = new CompletableFuture(); + retval.completeExceptionally(new IllegalEntityException("Failure getting partition ids for event hub", e)); + } + } + + return retval; + } + + // Testability hook: allows a test subclass to insert dummy pump. + PumpManager createPumpTestHook() { + return new PumpManager(this.hostContext, this); + } + + // Testability hook: called after stores are initialized. + void onInitializeCompleteTestHook() { + } + + // Testability hook: called at the end of the main loop after all partition checks/stealing is complete. + void onPartitionCheckCompleteTestHook() { + } + + CompletableFuture stopPartitions() { + setClosing(); + + // If the lease scanner is between runs, cancel so it doesn't run again. + synchronized (this.scanFutureSynchronizer) { + if (this.scanFuture != null) { + this.scanFuture.cancel(true); + } + } + + // Stop any partition pumps that are running. + CompletableFuture stopping = CompletableFuture.completedFuture(null); + + if (this.pumpManager != null) { + TRACE_LOGGER.info(this.hostContext.withHost("Shutting down all pumps")); + stopping = this.pumpManager.removeAllPumps(CloseReason.Shutdown) + .whenCompleteAsync((empty, e) -> { + if (e != null) { + Throwable notifyWith = LoggingUtils.unwrapException(e, null); + TRACE_LOGGER.warn(this.hostContext.withHost("Failure during shutdown"), notifyWith); + if (notifyWith instanceof Exception) { + this.hostContext.getEventProcessorOptions().notifyOfException(this.hostContext.getHostName(), (Exception) notifyWith, + EventProcessorHostActionStrings.PARTITION_MANAGER_CLEANUP); + + } + } + }, this.hostContext.getExecutor()); + } + // else no pumps to shut down + + stopping = stopping.whenCompleteAsync((empty, e) -> { + TRACE_LOGGER.info(this.hostContext.withHost("Partition manager exiting")); + setClosed(); + }, this.hostContext.getExecutor()); + + return stopping; + } + + public CompletableFuture initialize() { + this.pumpManager = createPumpTestHook(); + + // Stage 0: get partition ids and cache + return cachePartitionIds() + // Stage 1: initialize stores, if stage 0 succeeded + .thenComposeAsync((unused) -> initializeStores(), this.hostContext.getExecutor()) + // Stage 2: RUN REGARDLESS OF EXCEPTIONS -- trace errors + .whenCompleteAsync((empty, e) -> + { + if (e != null) { + StringBuilder outAction = new StringBuilder(); + Throwable notifyWith = LoggingUtils.unwrapException(e, outAction); + if (outAction.length() > 0) { + TRACE_LOGGER.error(this.hostContext.withHost( + "Exception while initializing stores (" + outAction.toString() + "), not starting partition manager"), notifyWith); + } else { + TRACE_LOGGER.error(this.hostContext.withHost("Exception while initializing stores, not starting partition manager"), notifyWith); + } + } + }, this.hostContext.getExecutor()) + // Stage 3: schedule scan, which will find partitions and start pumps, if previous stages succeeded + .thenRunAsync(() -> + { + // Schedule the first scan immediately. + synchronized (this.scanFutureSynchronizer) { + TRACE_LOGGER.debug(this.hostContext.withHost("Scheduling lease scanner first pass")); + this.scanFuture = this.hostContext.getExecutor().schedule(() -> scan(true), 0, TimeUnit.SECONDS); + } + + onInitializeCompleteTestHook(); + }, this.hostContext.getExecutor()); + } + + private CompletableFuture initializeStores() { + ILeaseManager leaseManager = this.hostContext.getLeaseManager(); + ICheckpointManager checkpointManager = this.hostContext.getCheckpointManager(); + + // let R = this.retryMax + // Stages 0 to R: create lease store if it doesn't exist + CompletableFuture initializeStoresFuture = buildRetries(CompletableFuture.completedFuture(null), + () -> leaseManager.createLeaseStoreIfNotExists(), "Failure creating lease store for this Event Hub, retrying", + "Out of retries creating lease store for this Event Hub", EventProcessorHostActionStrings.CREATING_LEASE_STORE, this.retryMax); + + // Stages R+1 to 2R: create checkpoint store if it doesn't exist + initializeStoresFuture = buildRetries(initializeStoresFuture, () -> checkpointManager.createCheckpointStoreIfNotExists(), + "Failure creating checkpoint store for this Event Hub, retrying", "Out of retries creating checkpoint store for this Event Hub", + EventProcessorHostActionStrings.CREATING_CHECKPOINT_STORE, this.retryMax); + + // Stages 2R+1 to 3R: create leases if they don't exist + initializeStoresFuture = buildRetries(initializeStoresFuture, () -> leaseManager.createAllLeasesIfNotExists(Arrays.asList(this.partitionIds)), + "Failure creating leases, retrying", "Out of retries creating leases", EventProcessorHostActionStrings.CREATING_LEASES, this.retryMax); + + // Stages 3R+1 to 4R: create checkpoint holders if they don't exist + initializeStoresFuture = buildRetries(initializeStoresFuture, () -> checkpointManager.createAllCheckpointsIfNotExists(Arrays.asList(this.partitionIds)), + "Failure creating checkpoint holders, retrying", "Out of retries creating checkpoint holders", + EventProcessorHostActionStrings.CREATING_CHECKPOINTS, this.retryMax); + + initializeStoresFuture.whenCompleteAsync((r, e) -> + { + // If an exception has propagated this far, it should be a FinalException, which is guaranteed to contain a CompletionException. + // Unwrap it so we don't leak a private type. + if ((e != null) && (e instanceof FinalException)) { + throw ((FinalException) e).getInner(); + } + + // Otherwise, allow the existing result to pass to the caller. + }, this.hostContext.getExecutor()); + + return initializeStoresFuture; + } + + // CompletableFuture will be completed exceptionally if it runs out of retries. + // If the lambda succeeds, then it will not be invoked again by following stages. + private CompletableFuture buildRetries(CompletableFuture buildOnto, Callable> lambda, String retryMessage, + String finalFailureMessage, String action, int maxRetries) { + // Stage 0: first attempt + CompletableFuture retryChain = buildOnto.thenComposeAsync((unused) -> + { + CompletableFuture newresult = CompletableFuture.completedFuture(null); + try { + newresult = lambda.call(); + } catch (Exception e1) { + throw new CompletionException(e1); + } + return newresult; + }, this.hostContext.getExecutor()); + + for (int i = 1; i < maxRetries; i++) { + retryChain = retryChain + // Stages 1, 3, 5, etc: trace errors but stop normal exception propagation in order to keep going. + // Either return null if we don't have a valid result, or pass the result along to the next stage. + // FinalExceptions are passed along also so that fatal error earlier in the chain aren't lost. + .handleAsync((r, e) -> + { + Object effectiveResult = r; + if (e != null) { + if (e instanceof FinalException) { + // Propagate FinalException up to the end + throw (FinalException) e; + } else { + TRACE_LOGGER.warn(this.hostContext.withHost(retryMessage), LoggingUtils.unwrapException(e, null)); + } + } else { + // Some lambdas return null on success. Change to TRUE to skip retrying. + if (r == null) { + effectiveResult = true; + } + } + return (e == null) ? effectiveResult : null; // stop propagation of other exceptions so we can retry + }, this.hostContext.getExecutor()) + // Stages 2, 4, 6, etc: if we already have a valid result, pass it along. Otherwise, make another attempt. + // Once we have a valid result there will be no more attempts or exceptions. + .thenComposeAsync((oldresult) -> + { + CompletableFuture newresult = CompletableFuture.completedFuture(oldresult); + if (oldresult == null) { + try { + newresult = lambda.call(); + } catch (Exception e1) { + throw new CompletionException(e1); + } + } + return newresult; + }, this.hostContext.getExecutor()); + } + // Stage final: trace the exception with the final message, or pass along the valid result. + retryChain = retryChain.handleAsync((r, e) -> + { + if (e != null) { + if (e instanceof FinalException) { + throw (FinalException) e; + } else { + TRACE_LOGGER.warn(this.hostContext.withHost(finalFailureMessage)); + throw new FinalException(LoggingUtils.wrapExceptionWithMessage(LoggingUtils.unwrapException(e, null), finalFailureMessage, action)); + } + } + return (e == null) ? r : null; + }, this.hostContext.getExecutor()); + + return retryChain; + } + + // Return Void so it can be called from a lambda. + // throwOnFailure is true + private Void scan(boolean isFirst) { + TRACE_LOGGER.debug(this.hostContext.withHost("Starting lease scan")); + long start = System.currentTimeMillis(); + + (new PartitionScanner(this.hostContext, (lease) -> this.pumpManager.addPump(lease), this)).scan(isFirst) + .whenCompleteAsync((didSteal, e) -> + { + TRACE_LOGGER.debug(this.hostContext.withHost("Scanning took " + (System.currentTimeMillis() - start))); + + onPartitionCheckCompleteTestHook(); + + // Schedule the next scan unless we are shutting down. + if (!this.getIsClosingOrClosed()) { + int seconds = didSteal ? this.hostContext.getPartitionManagerOptions().getFastScanIntervalInSeconds() : + this.hostContext.getPartitionManagerOptions().getSlowScanIntervalInSeconds(); + if (isFirst) { + seconds = this.hostContext.getPartitionManagerOptions().getStartupScanDelayInSeconds(); + } + synchronized (this.scanFutureSynchronizer) { + this.scanFuture = this.hostContext.getExecutor().schedule(() -> scan(false), seconds, TimeUnit.SECONDS); + } + TRACE_LOGGER.debug(this.hostContext.withHost("Scheduling lease scanner in " + seconds)); + } else { + TRACE_LOGGER.debug(this.hostContext.withHost("Not scheduling lease scanner due to shutdown")); + } + }, this.hostContext.getExecutor()); + + return null; + } + + // Exception wrapper that buildRetries() uses to indicate that a fatal error has occurred. The chain + // built by buildRetries() normally swallows exceptions via odd-numbered stages so that the retries in + // even-numbered stages will execute. If multiple chains are concatenated, FinalException short-circuits + // the exceptional swallowing and allows fatal errors in earlier chains to be propagated all the way to the end. + class FinalException extends CompletionException { + private static final long serialVersionUID = -4600271981700687166L; + + FinalException(CompletionException e) { + super(e); + } + + CompletionException getInner() { + return (CompletionException) this.getCause(); + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionManagerOptions.java b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionManagerOptions.java new file mode 100644 index 0000000000000..ac20a6d61b396 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionManagerOptions.java @@ -0,0 +1,188 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +/*** + * Options affecting the operation of the partition manager within the event processor host. + * This class is broken out separately because many of these options also affect the operation + * of the ILeaseManager and ICheckpointManager implementations, and different implementations + * may need to subclass and provide different options or defaults. + */ +public class PartitionManagerOptions { + /** + * The default duration after which a partition lease will expire unless renewed. + */ + public final static int DefaultLeaseDurationInSeconds = 30; + + /** + * The default duration between lease renewals. + */ + public final static int DefaultLeaseRenewIntervalInSeconds = 10; + + /** + * The default timeout for checkpoint operations. + */ + public final static int DefaultCheckpointTimeoutInSeconds = 120; + + public final static int DefaultStartupScanDelayInSeconds = 30; + public final static int DefaultFastScanIntervalInSeconds = 3; + public final static int DefaultSlowScanIntervalInSeconds = 5; + + protected int leaseDurationInSeconds = PartitionManagerOptions.DefaultLeaseDurationInSeconds; + protected int leaseRenewIntervalInSeconds = PartitionManagerOptions.DefaultLeaseRenewIntervalInSeconds; + protected int checkpointTimeoutInSeconds = PartitionManagerOptions.DefaultCheckpointTimeoutInSeconds; + + protected int startupScanDelayInSeconds = PartitionManagerOptions.DefaultStartupScanDelayInSeconds; + protected int fastScanIntervalInSeconds = PartitionManagerOptions.DefaultFastScanIntervalInSeconds; + protected int slowScanIntervalInSeconds = PartitionManagerOptions.DefaultSlowScanIntervalInSeconds; + + /*** + * The base class automatically sets members to the static defaults. + */ + public PartitionManagerOptions() { + } + + /** + * Gets the duration after which a partition lease will expire unless renewed. + * Defaults to DefaultLeaseDurationInSeconds. + * + * @return lease duration + */ + public int getLeaseDurationInSeconds() { + return this.leaseDurationInSeconds; + } + + /** + * Sets the duration after which a partition lease will expire unless renewed. + * Must be greater than 0 and should not be less than the renew interval. When using the + * default, Azure Storage-based ILeaseManager, the duration cannot be greater than 60. + * + * @param duration new value for lease duration + */ + public void setLeaseDurationInSeconds(int duration) { + if (duration <= 0) { + throw new IllegalArgumentException("Lease duration must be greater than 0"); + } + this.leaseDurationInSeconds = duration; + } + + /** + * Gets the duration between lease renewals. Defaults to DefaultLeaseRenewIntervalInSeconds. + * + * @return how often leases are renewed + */ + public int getLeaseRenewIntervalInSeconds() { + return this.leaseRenewIntervalInSeconds; + } + + /** + * Sets the duration between lease renewals. Must be greater than 0 and less than the current lease duration. + * + * @param interval new value for how often leases are renewed + */ + public void setLeaseRenewIntervalInSeconds(int interval) { + if ((interval <= 0) || (interval > this.leaseDurationInSeconds)) { + throw new IllegalArgumentException("Lease renew interval must be greater than 0 and not more than lease duration"); + } + this.leaseRenewIntervalInSeconds = interval; + } + + /** + * Gets the timeout for checkpoint operations. Defaults to DefaultCheckpointTimeoutInSeconds. + * + * @return timeout for checkpoint operations + */ + public int getCheckpointTimeoutInSeconds() { + return this.checkpointTimeoutInSeconds; + } + + /** + * Sets the timeout for checkpoint operations. Must be greater than 0. + * + * @param timeout new value for checkpoint timeout + */ + public void setCheckpointTimeoutInSeconds(int timeout) { + if (timeout <= 0) { + throw new IllegalArgumentException("Checkpoint timeout must be greater than 0"); + } + this.checkpointTimeoutInSeconds = timeout; + } + + /** + * Gets the delay time between the first scan for available partitions and the second. This is + * part of a startup optimization which allows individual hosts to become visible to other + * hosts, and thereby get a more accurate count of the number of hosts in the system, before + * they try to estimate how many partitions they should own. + * + * Defaults to DefaultStartupScanDelayInSeconds. + * + * @return delay time in seconds + */ + public int getStartupScanDelayInSeconds() { + return this.startupScanDelayInSeconds; + } + + /** + * Sets the delay time in seconds between the first scan and the second. + * + * @param delay new delay time in seconds + */ + public void setStartupScanDelayInSeconds(int delay) { + if (delay <= 0) { + throw new IllegalArgumentException("Startup scan delay must be greater than 0"); + } + this.startupScanDelayInSeconds = delay; + } + + /** + * There are two possible interval times between scans for available partitions, fast and slow. + * The fast (short) interval is used after a scan in which lease stealing has occurred, to + * promote quicker rebalancing. + * + * Defaults to DefaultFastScanIntervalInSeconds. + * + * @return interval time in seconds + */ + public int getFastScanIntervalInSeconds() { + return this.fastScanIntervalInSeconds; + } + + /** + * Sets the time for fast interval. + * + * @param interval new fast interval in seconds + */ + public void setFastScanIntervalInSeconds(int interval) { + if (interval <= 0) { + throw new IllegalArgumentException("Fast scan interval must be greater than 0"); + } + this.fastScanIntervalInSeconds = interval; + } + + /** + * The slow (long) interval is used after a scan in which lease stealing did not occur, to + * reduce unnecessary scanning when the system is in steady state. + * + * Defaults to DefaultSlowScanIntervalInSeconds. + * + * @return interval time in seconds + */ + public int getSlowScanIntervalInSeconds() { + return this.slowScanIntervalInSeconds; + } + + /** + * Sets the time for slow interval. + * + * @param interval new slow interval in seconds + */ + public void setSlowScanIntervalInSeconds(int interval) { + if (interval <= 0) { + throw new IllegalArgumentException("Slow scan interval must be greater than 0"); + } + this.slowScanIntervalInSeconds = interval; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionPump.java b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionPump.java new file mode 100644 index 0000000000000..fcdbe9abd8537 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionPump.java @@ -0,0 +1,528 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +import com.microsoft.azure.eventhubs.*; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; + +class PartitionPump extends Closable implements PartitionReceiveHandler { + private static final Logger TRACE_LOGGER = LoggerFactory.getLogger(PartitionPump.class); + protected final HostContext hostContext; + protected final CompleteLease lease; // protected for testability + final private CompletableFuture shutdownTriggerFuture; + final private CompletableFuture shutdownFinishedFuture; + private final Object processingSynchronizer; + private final Consumer pumpManagerCallback; + private EventHubClient eventHubClient = null; + private PartitionReceiver partitionReceiver = null; + private CloseReason shutdownReason; + private volatile CompletableFuture internalOperationFuture = null; + private IEventProcessor processor = null; + private PartitionContext partitionContext = null; + private ScheduledFuture leaseRenewerFuture = null; + + PartitionPump(HostContext hostContext, CompleteLease lease, Closable parent, Consumer pumpManagerCallback) { + super(parent); + + this.hostContext = hostContext; + this.lease = lease; + this.pumpManagerCallback = pumpManagerCallback; + this.processingSynchronizer = new Object(); + + this.partitionContext = new PartitionContext(this.hostContext, this.lease.getPartitionId()); + this.partitionContext.setLease(this.lease); + + // Set up the shutdown futures. The shutdown process can be triggered just by completing this.shutdownFuture. + this.shutdownTriggerFuture = new CompletableFuture(); + this.shutdownFinishedFuture = this.shutdownTriggerFuture + .handleAsync((r, e) -> { + this.pumpManagerCallback.accept(this.lease.getPartitionId()); + return cancelPendingOperations(); + }, this.hostContext.getExecutor()) + .thenComposeAsync((empty) -> cleanUpAll(this.shutdownReason), this.hostContext.getExecutor()) + .thenComposeAsync((empty) -> releaseLeaseOnShutdown(), this.hostContext.getExecutor()) + .whenCompleteAsync((empty, e) -> { + setClosed(); + }, this.hostContext.getExecutor()); + } + + // The CompletableFuture returned by startPump remains uncompleted as long as the pump is running. + // If startup fails, or an error occurs while running, it will complete exceptionally. + // If clean shutdown due to unregister call, it completes normally. + CompletableFuture startPump() { + // Do the slow startup stuff asynchronously. + // Use whenComplete to trigger cleanup on exception. + CompletableFuture.runAsync(() -> openProcessor(), this.hostContext.getExecutor()) + .thenComposeAsync((empty) -> openClientsRetryWrapper(), this.hostContext.getExecutor()) + .thenRunAsync(() -> scheduleLeaseRenewer(), this.hostContext.getExecutor()) + .whenCompleteAsync((r, e) -> + { + if (e != null) { + // If startup failed, trigger shutdown to clean up. + internalShutdown(CloseReason.Shutdown, e); + } + }, this.hostContext.getExecutor()); + + return shutdownFinishedFuture; + } + + private void openProcessor() { + TRACE_LOGGER.info(this.hostContext.withHostAndPartition(this.partitionContext, "Creating and opening event processor instance")); + + String action = EventProcessorHostActionStrings.CREATING_EVENT_PROCESSOR; + try { + this.processor = this.hostContext.getEventProcessorFactory().createEventProcessor(this.partitionContext); + action = EventProcessorHostActionStrings.OPENING_EVENT_PROCESSOR; + this.processor.onOpen(this.partitionContext); + } catch (Exception e) { + // If the processor won't create or open, only thing we can do here is pass the buck. + // Null it out so we don't try to operate on it further. + this.processor = null; + TRACE_LOGGER.warn(this.hostContext.withHostAndPartition(this.partitionContext, "Failed " + action), e); + this.hostContext.getEventProcessorOptions().notifyOfException(this.hostContext.getHostName(), e, action, this.lease.getPartitionId()); + throw new CompletionException(e); + } + } + + private CompletableFuture openClientsRetryWrapper() { + // Stage 0: first attempt + CompletableFuture retryResult = openClients(); + + for (int i = 1; i < 5; i++) { + retryResult = retryResult + // Stages 1, 3, 5, etc: trace errors but stop exception propagation in order to keep going + // UNLESS it's ReceiverDisconnectedException. + .handleAsync((r, e) -> + { + if (e != null) { + Exception notifyWith = (Exception) LoggingUtils.unwrapException(e, null); + if (notifyWith instanceof ReceiverDisconnectedException) { + // TODO Assuming this is due to a receiver with a higher epoch. + // Is there a way to be sure without checking the exception text? + // DO NOT trace here because then we could get multiple traces for the same exception. + // If it's a bad epoch, then retrying isn't going to help. + // Rethrow to keep propagating error to the end and prevent any more attempts. + throw new CompletionException(notifyWith); + } else { + TRACE_LOGGER.warn(this.hostContext.withHostAndPartition(this.partitionContext, + "Failure creating client or receiver, retrying"), e); + } + } + // If we have a valid result, pass it along to prevent further attempts. + return (e == null) ? r : false; + }, this.hostContext.getExecutor()) + // Stages 2, 4, 6, etc: make another attempt if needed. + .thenComposeAsync((done) -> + { + return done ? CompletableFuture.completedFuture(done) : openClients(); + }, this.hostContext.getExecutor()); + } + // Stage final: on success, hook up the user's event handler to start receiving events. On error, + // trace exceptions from the final attempt, or ReceiverDisconnectedException. + return retryResult.handleAsync((r, e) -> + { + if (e == null) { + // IEventProcessor.onOpen is called from the base PartitionPump and must have returned in order for execution to reach here, + // meaning it is safe to set the handler and start calling IEventProcessor.onEvents. + this.partitionReceiver.setReceiveHandler(this, this.hostContext.getEventProcessorOptions().getInvokeProcessorAfterReceiveTimeout()); + } else { + Exception notifyWith = (Exception) LoggingUtils.unwrapException(e, null); + if (notifyWith instanceof ReceiverDisconnectedException) { + // TODO Assuming this is due to a receiver with a higher epoch. + // Is there a way to be sure without checking the exception text? + TRACE_LOGGER.warn(this.hostContext.withHostAndPartition(this.partitionContext, + "Receiver disconnected on create, bad epoch?"), notifyWith); + } else { + TRACE_LOGGER.warn(this.hostContext.withHostAndPartition(this.partitionContext, + "Failure creating client or receiver, out of retries"), e); + } + + // IEventProcessor.onOpen is called from the base PartitionPump and must have returned in order for execution to reach here, + // so we can report this error to it instead of the general error handler. + this.processor.onError(this.partitionContext, new ExceptionWithAction(notifyWith, EventProcessorHostActionStrings.CREATING_EVENT_HUB_CLIENT)); + + // Rethrow so caller will see failure + throw LoggingUtils.wrapException(notifyWith, EventProcessorHostActionStrings.CREATING_EVENT_HUB_CLIENT); + } + return null; + }, this.hostContext.getExecutor()); + } + + protected void scheduleLeaseRenewer() { + if (!getIsClosingOrClosed()) { + int seconds = this.hostContext.getPartitionManagerOptions().getLeaseRenewIntervalInSeconds(); + this.leaseRenewerFuture = this.hostContext.getExecutor().schedule(() -> leaseRenewer(), seconds, TimeUnit.SECONDS); + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(this.lease, "scheduling leaseRenewer in " + seconds)); + } + } + + private CompletableFuture openClients() { + // Create new client + TRACE_LOGGER.info(this.hostContext.withHostAndPartition(this.partitionContext, "Opening EH client")); + + CompletableFuture startOpeningFuture = null; + try { + startOpeningFuture = EventHubClient.create(this.hostContext.getEventHubConnectionString(), + this.hostContext.getRetryPolicy(), this.hostContext.getExecutor()); + } catch (EventHubException | IOException e2) { + // Marking startOpeningFuture as completed exceptionally will cause all the + // following stages to fall through except stage 1 which will report the error. + startOpeningFuture = new CompletableFuture(); + startOpeningFuture.completeExceptionally(e2); + } + this.internalOperationFuture = startOpeningFuture; + + // Stage 0: get EventHubClient + return startOpeningFuture + // Stage 1: save EventHubClient on success, trace on error + .whenCompleteAsync((ehclient, e) -> + { + if ((ehclient != null) && (e == null)) { + this.eventHubClient = ehclient; + } else { + TRACE_LOGGER.error(this.hostContext.withHostAndPartition(this.partitionContext, "EventHubClient creation failed"), e); + } + // this.internalOperationFuture allows canceling startup if it gets stuck. Null out now that EventHubClient creation has completed. + this.internalOperationFuture = null; + }, this.hostContext.getExecutor()) + // Stage 2: get initial offset for receiver + .thenComposeAsync((empty) -> this.partitionContext.getInitialOffset(), this.hostContext.getExecutor()) + // Stage 3: set up other receiver options, create receiver if initial offset is valid + .thenComposeAsync((startAt) -> + { + long epoch = this.lease.getEpoch(); + + TRACE_LOGGER.info(this.hostContext.withHostAndPartition(this.partitionContext, + "Opening EH receiver with epoch " + epoch + " at location " + startAt)); + + CompletableFuture receiverFuture = null; + + try { + ReceiverOptions options = new ReceiverOptions(); + options.setReceiverRuntimeMetricEnabled(this.hostContext.getEventProcessorOptions().getReceiverRuntimeMetricEnabled()); + options.setPrefetchCount(this.hostContext.getEventProcessorOptions().getPrefetchCount()); + + receiverFuture = this.eventHubClient.createEpochReceiver(this.partitionContext.getConsumerGroupName(), + this.partitionContext.getPartitionId(), startAt, epoch, options); + this.internalOperationFuture = receiverFuture; + } catch (EventHubException e) { + TRACE_LOGGER.error(this.hostContext.withHostAndPartition(this.partitionContext, "Opening EH receiver failed with an error "), e); + receiverFuture = new CompletableFuture(); + receiverFuture.completeExceptionally(e); + } + + return receiverFuture; + }, this.hostContext.getExecutor()) + // Stage 4: save PartitionReceiver on success, trace on error + .whenCompleteAsync((receiver, e) -> + { + if ((receiver != null) && (e == null)) { + this.partitionReceiver = receiver; + } else if (this.eventHubClient != null) { + if (e instanceof ReceiverDisconnectedException) { + TRACE_LOGGER.info(this.hostContext.withHostAndPartition(this.partitionContext, "PartitionReceiver disconnected during startup")); + } else { + TRACE_LOGGER.error(this.hostContext.withHostAndPartition(this.partitionContext, "PartitionReceiver creation failed"), e); + } + } + // else if this.eventHubClient is null then we failed in stage 0 and already traced in stage 1 + + // this.internalOperationFuture allows canceling startup if it gets stuck. Null out now that PartitionReceiver creation has completed. + this.internalOperationFuture = null; + }, this.hostContext.getExecutor()) + // Stage 5: on success, set up the receiver + .thenApplyAsync((receiver) -> + { + this.partitionReceiver.setReceiveTimeout(this.hostContext.getEventProcessorOptions().getReceiveTimeOut()); + + TRACE_LOGGER.info(this.hostContext.withHostAndPartition(this.partitionContext, + "EH client and receiver creation finished")); + + return true; + }, this.hostContext.getExecutor()); + } + + private CompletableFuture cleanUpAll(CloseReason reason) // swallows all exceptions + { + return cleanUpClients() + .thenRunAsync(() -> + { + if (this.processor != null) { + try { + synchronized (this.processingSynchronizer) { + // When we take the lock, any existing onEvents call has finished. + // Because the client has been closed, there will not be any more + // calls to onEvents in the future. Therefore we can safely call onClose. + this.processor.onClose(this.partitionContext, reason); + } + } catch (Exception e) { + TRACE_LOGGER.warn(this.hostContext.withHostAndPartition(this.partitionContext, + "Failure closing processor"), e); + // If closing the processor has failed, the state of the processor is suspect. + // Report the failure to the general error handler instead. + this.hostContext.getEventProcessorOptions().notifyOfException(this.hostContext.getHostName(), e, EventProcessorHostActionStrings.CLOSING_EVENT_PROCESSOR, + this.lease.getPartitionId()); + } + } + }, this.hostContext.getExecutor()); + } + + private CompletableFuture cleanUpClients() // swallows all exceptions + { + CompletableFuture cleanupFuture = null; + if (this.partitionReceiver != null) { + // Disconnect the processor from the receiver we're about to close. + // Fortunately this is idempotent -- setting the handler to null when it's already been + // nulled by code elsewhere is harmless! + // Setting to null also waits for the in-progress calls to complete + TRACE_LOGGER.info(this.hostContext.withHostAndPartition(this.partitionContext, "Setting receive handler to null")); + cleanupFuture = this.partitionReceiver.setReceiveHandler(null); + } else { + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(this.partitionContext, "partitionReceiver is null in cleanup")); + cleanupFuture = CompletableFuture.completedFuture(null); + } + cleanupFuture = cleanupFuture.handleAsync((empty, e) -> + { + if (e != null) { + TRACE_LOGGER.warn(this.hostContext.withHostAndPartition(this.partitionContext, + "Got exception when ReceiveHandler is set to null."), LoggingUtils.unwrapException(e, null)); + } + return null; // stop propagation of exceptions + }, this.hostContext.getExecutor()) + .thenApplyAsync((empty) -> + { + TRACE_LOGGER.info(this.hostContext.withHostAndPartition(this.partitionContext, "Closing EH receiver")); + PartitionReceiver partitionReceiverTemp = this.partitionReceiver; + this.partitionReceiver = null; + return partitionReceiverTemp; + }, this.hostContext.getExecutor()) + .thenComposeAsync((partitionReceiverTemp) -> + { + return (partitionReceiverTemp != null) ? partitionReceiverTemp.close() : CompletableFuture.completedFuture(null); + }, this.hostContext.getExecutor()) + .handleAsync((empty, e) -> + { + if (e != null) { + TRACE_LOGGER.warn(this.hostContext.withHostAndPartition(this.partitionContext, + "Closing EH receiver failed."), LoggingUtils.unwrapException(e, null)); + } + return null; // stop propagation of exceptions + }, this.hostContext.getExecutor()) + .thenApplyAsync((empty) -> + { + TRACE_LOGGER.info(this.hostContext.withHostAndPartition(this.partitionContext, "Closing EH client")); + final EventHubClient eventHubClientTemp = this.eventHubClient; + this.eventHubClient = null; + if (eventHubClientTemp == null) { + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(this.partitionContext, + "eventHubClient is null in cleanup")); + } + return eventHubClientTemp; + }, this.hostContext.getExecutor()) + .thenComposeAsync((eventHubClientTemp) -> + { + return (eventHubClientTemp != null) ? eventHubClientTemp.close() : CompletableFuture.completedFuture(null); + }, this.hostContext.getExecutor()) + .handleAsync((empty, e) -> + { + if (e != null) { + TRACE_LOGGER.warn(this.hostContext.withHostAndPartition(this.partitionContext, "Closing EH client failed."), + LoggingUtils.unwrapException(e, null)); + } + return null; // stop propagation of exceptions + }, this.hostContext.getExecutor()); + + return cleanupFuture; + } + + protected Void cancelPendingOperations() { + // If an open operation is stuck, this lets us shut down anyway. + CompletableFuture captured = this.internalOperationFuture; + if (captured != null) { + captured.cancel(true); + } + + ScheduledFuture capturedLeaseRenewer = this.leaseRenewerFuture; + if (capturedLeaseRenewer != null) { + capturedLeaseRenewer.cancel(true); + } + return null; + } + + private CompletableFuture releaseLeaseOnShutdown() // swallows all exceptions + { + CompletableFuture result = CompletableFuture.completedFuture(null); + + if (this.shutdownReason != CloseReason.LeaseLost) { + // Since this pump is dead, release the lease. Don't care about any errors that may occur. Worst case is + // that the lease eventually expires, since the lease renewer has been cancelled. + result = PartitionPump.this.hostContext.getLeaseManager().releaseLease(this.lease) + .handleAsync((empty, e) -> + { + if (e != null) { + TRACE_LOGGER.warn(this.hostContext.withHostAndPartition(this.partitionContext, + "Failure releasing lease on pump shutdown"), LoggingUtils.unwrapException(e, null)); + } + return null; // stop propagation of exceptions + }, this.hostContext.getExecutor()); + } + // else we already lost the lease, releasing is unnecessary and would fail if we try + + return result; + } + + protected void internalShutdown(CloseReason reason, Throwable e) { + setClosing(); + + this.shutdownReason = reason; + if (e == null) { + this.shutdownTriggerFuture.complete(null); + } else { + this.shutdownTriggerFuture.completeExceptionally(e); + } + } + + CompletableFuture shutdown(CloseReason reason) { + TRACE_LOGGER.info(this.hostContext.withHostAndPartition(this.partitionContext, + "pump shutdown for reason " + reason.toString())); + internalShutdown(reason, null); + return this.shutdownFinishedFuture; + } + + private void leaseRenewer() { + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(this.lease, "leaseRenewer()")); + + // Theoretically, if the future is cancelled then this method should never fire, but + // there's no harm in being sure. + if (this.leaseRenewerFuture.isCancelled()) { + return; + } + if (getIsClosingOrClosed()) { + return; + } + + // Stage 0: renew the lease + this.hostContext.getLeaseManager().renewLease(this.lease) + // Stage 1: check result of renewing + .thenApplyAsync((renewed) -> + { + Boolean scheduleNext = true; + if (!renewed) { + // False return from renewLease means that lease was lost. + // Start pump shutdown process and do not schedule another call to leaseRenewer. + TRACE_LOGGER.info(this.hostContext.withHostAndPartition(this.lease, "Lease lost, shutting down pump")); + internalShutdown(CloseReason.LeaseLost, null); + scheduleNext = false; + } + return scheduleNext; + }, this.hostContext.getExecutor()) + // Stage 2: RUN REGARDLESS OF EXCEPTIONS -- trace exceptions, schedule next iteration + .whenCompleteAsync((scheduleNext, e) -> + { + if (e != null) { + // Failure renewing lease due to storage exception or whatever. + // Trace error and leave scheduleNext as true to schedule another try. + Exception notifyWith = (Exception) LoggingUtils.unwrapException(e, null); + TRACE_LOGGER.info(this.hostContext.withHostAndPartition(this.lease, "Transient failure renewing lease"), notifyWith); + // Notify the general error handler rather than calling this.processor.onError so we can provide context (RENEWING_LEASE) + this.hostContext.getEventProcessorOptions().notifyOfException(this.hostContext.getHostName(), notifyWith, EventProcessorHostActionStrings.RENEWING_LEASE, + this.lease.getPartitionId()); + } + + if ((scheduleNext != null) && scheduleNext.booleanValue() && !this.leaseRenewerFuture.isCancelled() && !getIsClosingOrClosed()) { + scheduleLeaseRenewer(); + } + }, this.hostContext.getExecutor()); + } + + @Override + public int getMaxEventCount() { + return this.hostContext.getEventProcessorOptions().getMaxBatchSize(); + } + + @Override + public void onReceive(Iterable events) { + if (this.hostContext.getEventProcessorOptions().getReceiverRuntimeMetricEnabled()) { + this.partitionContext.setRuntimeInformation(this.partitionReceiver.getRuntimeInformation()); + } + + // This method is called on the thread that the Java EH client uses to run the pump. + // There is one pump per EventHubClient. Since each PartitionPump creates a new EventHubClient, + // using that thread to call onEvents does no harm. Even if onEvents is slow, the pump will + // get control back each time onEvents returns, and be able to receive a new batch of events + // with which to make the next onEvents call. The pump gains nothing by running faster than onEvents. + + // The underlying client returns null if there are no events, but the contract for IEventProcessor + // is different and is expecting an empty iterable if there are no events (and invoke processor after + // receive timeout is turned on). + + Iterable effectiveEvents = events; + if (effectiveEvents == null) { + effectiveEvents = new ArrayList(); + } + + // Update offset and sequence number in the PartitionContext to support argument-less overload of PartitionContext.checkpoint() + Iterator iter = effectiveEvents.iterator(); + EventData last = null; + while (iter.hasNext()) { + last = iter.next(); + } + if (last != null) { + this.partitionContext.setOffsetAndSequenceNumber(last); + } + + try { + // Synchronize to serialize calls to the processor. + // The handler is not installed until after onOpen returns, so onEvents cannot overlap with onOpen. + // onEvents and onClose are synchronized via this.processingSynchronizer to prevent calls to onClose + // while an onEvents call is still in progress. + synchronized (this.processingSynchronizer) { + this.processor.onEvents(this.partitionContext, effectiveEvents); + } + } catch (Exception e) { + // TODO -- do we pass errors from IEventProcessor.onEvents to IEventProcessor.onError? + // Depending on how you look at it, that's either pointless (if the user's code throws, the user's code should already know about it) or + // a convenient way of centralizing error handling. + // In the meantime, just trace it. + TRACE_LOGGER.warn(this.hostContext.withHostAndPartition(this.partitionContext, + "Got exception from onEvents"), e); + } + } + + @Override + public void onError(Throwable error) { + if (error == null) { + error = new Throwable("No error info supplied by EventHub client"); + } + if (error instanceof ReceiverDisconnectedException) { + TRACE_LOGGER.info(this.hostContext.withHostAndPartition(this.partitionContext, + "EventHub client disconnected, probably another host took the partition")); + } else { + TRACE_LOGGER.warn(this.hostContext.withHostAndPartition(this.partitionContext, "EventHub client error: " + error.toString())); + if (error instanceof Exception) { + TRACE_LOGGER.warn(this.hostContext.withHostAndPartition(this.partitionContext, "EventHub client error continued"), (Exception) error); + } + } + + // It is vital to perform the rest of cleanup in a separate thread and not block this one. This thread is the client's + // receive pump thread, and blocking it means that the receive pump never completes its CompletableFuture, which in turn + // blocks other client calls that we would like to make during cleanup. Specifically, this issue was found when + // PartitionReceiver.setReceiveHandler(null).get() was called and never returned. + final Throwable capturedError = error; + CompletableFuture.runAsync(() -> PartitionPump.this.processor.onError(PartitionPump.this.partitionContext, capturedError), this.hostContext.getExecutor()) + .thenRunAsync(() -> internalShutdown(CloseReason.Shutdown, capturedError), this.hostContext.getExecutor()); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionScanner.java b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionScanner.java new file mode 100644 index 0000000000000..4bd0a3386909c --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PartitionScanner.java @@ -0,0 +1,323 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.*; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Consumer; + +class PartitionScanner extends Closable { + private static final Logger TRACE_LOGGER = LoggerFactory.getLogger(PartitionScanner.class); + private static final Random randomizer = new Random(); + private final HostContext hostContext; + private final Consumer addPump; + + // Populated by getAllLeaseStates() + private List allLeaseStates = null; + + // Values populated by sortLeasesAndCalculateDesiredCount + private int desiredCount; + private int unownedCount; // updated by acquireExpiredInChunksParallel + final private ConcurrentHashMap leasesOwnedByOthers; // updated by acquireExpiredInChunksParallel + + PartitionScanner(HostContext hostContext, Consumer addPump, Closable parent) { + super(parent); + + this.hostContext = hostContext; + this.addPump = addPump; + + this.desiredCount = 0; + this.unownedCount = 0; + this.leasesOwnedByOthers = new ConcurrentHashMap(); + } + + public CompletableFuture scan(boolean isFirst) { + return getAllLeaseStates() + .thenComposeAsync((unused) -> { + throwIfClosingOrClosed("PartitionScanner is shutting down"); + int ourLeasesCount = sortLeasesAndCalculateDesiredCount(isFirst); + return acquireExpiredInChunksParallel(0, this.desiredCount - ourLeasesCount); + }, this.hostContext.getExecutor()) + .thenApplyAsync((remainingNeeded) -> { + throwIfClosingOrClosed("PartitionScanner is shutting down"); + ArrayList stealThese = new ArrayList(); + if (remainingNeeded > 0) { + TRACE_LOGGER.debug(this.hostContext.withHost("Looking to steal: " + remainingNeeded)); + stealThese = findLeasesToSteal(remainingNeeded); + } + return stealThese; + }, this.hostContext.getExecutor()) + .thenComposeAsync((stealThese) -> { + throwIfClosingOrClosed("PartitionScanner is shutting down"); + return stealLeases(stealThese); + }, this.hostContext.getExecutor()) + .handleAsync((didSteal, e) -> { + if ((e != null) && !(e instanceof ClosingException)) { + StringBuilder outAction = new StringBuilder(); + Exception notifyWith = (Exception) LoggingUtils.unwrapException(e, outAction); + TRACE_LOGGER.warn(this.hostContext.withHost("Exception scanning leases"), notifyWith); + this.hostContext.getEventProcessorOptions().notifyOfException(this.hostContext.getHostName(), notifyWith, outAction.toString(), + ExceptionReceivedEventArgs.NO_ASSOCIATED_PARTITION); + didSteal = false; + } + return didSteal; + }, this.hostContext.getExecutor()); + } + + private CompletableFuture getAllLeaseStates() { + throwIfClosingOrClosed("PartitionScanner is shutting down"); + return this.hostContext.getLeaseManager().getAllLeases() + .thenAcceptAsync((states) -> { + throwIfClosingOrClosed("PartitionScanner is shutting down"); + this.allLeaseStates = states; + Collections.sort(this.allLeaseStates); + }, this.hostContext.getExecutor()); + } + + // NONBLOCKING + private int sortLeasesAndCalculateDesiredCount(boolean isFirst) { + TRACE_LOGGER.debug(this.hostContext.withHost("Accounting input: allLeaseStates size is " + this.allLeaseStates.size())); + + HashSet uniqueOwners = new HashSet(); + uniqueOwners.add(this.hostContext.getHostName()); + int ourLeasesCount = 0; + this.unownedCount = 0; + for (BaseLease info : this.allLeaseStates) { + boolean ownedByUs = info.getIsOwned() && info.getOwner() != null && (info.getOwner().compareTo(this.hostContext.getHostName()) == 0); + if (info.getIsOwned() && info.getOwner() != null) { + uniqueOwners.add(info.getOwner()); + } else { + this.unownedCount++; + } + if (ownedByUs) { + ourLeasesCount++; + } else if (info.getIsOwned()) { + this.leasesOwnedByOthers.put(info.getPartitionId(), info); + } + } + int hostCount = uniqueOwners.size(); + int countPerHost = this.allLeaseStates.size() / hostCount; + this.desiredCount = isFirst ? 1 : countPerHost; + if (!isFirst && (this.unownedCount > 0) && (this.unownedCount < hostCount) && ((this.allLeaseStates.size() % hostCount) != 0)) { + // Distribute leftovers. + this.desiredCount++; + } + + ArrayList sortedHosts = new ArrayList(uniqueOwners); + Collections.sort(sortedHosts); + int hostOrdinal = -1; + int startingPoint = 0; + if (isFirst) { + // If the entire system is starting up, the list of hosts is probably not complete and we can't really + // compute a meaningful hostOrdinal. But we only want hostOrdinal to calculate startingPoint. Instead, + // just randomly select a startingPoint. + startingPoint = PartitionScanner.randomizer.nextInt(this.allLeaseStates.size()); + } else { + for (hostOrdinal = 0; hostOrdinal < sortedHosts.size(); hostOrdinal++) { + if (sortedHosts.get(hostOrdinal).compareTo(this.hostContext.getHostName()) == 0) { + break; + } + } + startingPoint = countPerHost * hostOrdinal; + } + // Rotate allLeaseStates + TRACE_LOGGER.debug(this.hostContext.withHost("Host ordinal: " + hostOrdinal + " Rotating leases to start at " + startingPoint)); + if (startingPoint != 0) { + ArrayList rotatedList = new ArrayList(this.allLeaseStates.size()); + for (int j = 0; j < this.allLeaseStates.size(); j++) { + rotatedList.add(this.allLeaseStates.get((j + startingPoint) % this.allLeaseStates.size())); + } + this.allLeaseStates = rotatedList; + } + + TRACE_LOGGER.debug(this.hostContext.withHost("Host count is " + hostCount + " Desired owned count is " + this.desiredCount)); + TRACE_LOGGER.debug(this.hostContext.withHost("ourLeasesCount " + ourLeasesCount + " leasesOwnedByOthers " + this.leasesOwnedByOthers.size() + + " unowned " + unownedCount)); + + return ourLeasesCount; + } + + // NONBLOCKING + // Returns a CompletableFuture as a convenience for the caller + private CompletableFuture> findExpiredLeases(int startAt, int endAt) { + final ArrayList expiredLeases = new ArrayList(); + TRACE_LOGGER.debug(this.hostContext.withHost("Finding expired leases from '" + this.allLeaseStates.get(startAt).getPartitionId() + "'[" + startAt + "] up to '" + + ((endAt < this.allLeaseStates.size()) ? this.allLeaseStates.get(endAt).getPartitionId() : "end") + "'[" + endAt + "]")); + + for (BaseLease info : this.allLeaseStates.subList(startAt, endAt)) { + if (!info.getIsOwned()) { + expiredLeases.add(info); + } + } + + TRACE_LOGGER.debug(this.hostContext.withHost("Found in range: " + expiredLeases.size())); + return CompletableFuture.completedFuture(expiredLeases); + } + + private CompletableFuture acquireExpiredInChunksParallel(int startAt, int needed) { + throwIfClosingOrClosed("PartitionScanner is shutting down"); + + CompletableFuture resultFuture = CompletableFuture.completedFuture(needed); + if (startAt < this.allLeaseStates.size()) { + TRACE_LOGGER.debug(this.hostContext.withHost("Examining chunk at '" + this.allLeaseStates.get(startAt).getPartitionId() + "'[" + startAt + "] need " + needed)); + } else { + TRACE_LOGGER.debug(this.hostContext.withHost("Examining chunk skipping, startAt is off end: " + startAt)); + } + + if ((needed > 0) && (this.unownedCount > 0) && (startAt < this.allLeaseStates.size())) { + final AtomicInteger runningNeeded = new AtomicInteger(needed); + final int endAt = Math.min(startAt + needed, this.allLeaseStates.size()); + + resultFuture = findExpiredLeases(startAt, endAt) + .thenComposeAsync((getThese) -> { + throwIfClosingOrClosed("PartitionScanner is shutting down"); + CompletableFuture acquireFuture = CompletableFuture.completedFuture(null); + if (getThese.size() > 0) { + ArrayList> getFutures = new ArrayList>(); + for (BaseLease info : getThese) { + throwIfClosingOrClosed("PartitionScanner is shutting down"); + final AcquisitionHolder holder = new AcquisitionHolder(); + CompletableFuture getOneFuture = this.hostContext.getLeaseManager().getLease(info.getPartitionId()) + .thenComposeAsync((lease) -> { + throwIfClosingOrClosed("PartitionScanner is shutting down"); + holder.setAcquiredLease(lease); + return this.hostContext.getLeaseManager().acquireLease(lease); + }, this.hostContext.getExecutor()) + .thenAcceptAsync((acquired) -> { + throwIfClosingOrClosed("PartitionScanner is shutting down"); + if (acquired) { + runningNeeded.decrementAndGet(); + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(holder.getAcquiredLease().getPartitionId(), "Acquired unowned/expired")); + if (this.leasesOwnedByOthers.containsKey(holder.getAcquiredLease().getPartitionId())) { + this.leasesOwnedByOthers.remove(holder.getAcquiredLease().getPartitionId()); + this.unownedCount--; + } + this.addPump.accept(holder.getAcquiredLease()); + } else { + this.leasesOwnedByOthers.put(holder.getAcquiredLease().getPartitionId(), holder.getAcquiredLease()); + } + }, this.hostContext.getExecutor()); + getFutures.add(getOneFuture); + } + CompletableFuture[] dummy = new CompletableFuture[getFutures.size()]; + acquireFuture = CompletableFuture.allOf(getFutures.toArray(dummy)); + } + return acquireFuture; + }, this.hostContext.getExecutor()) + .handleAsync((empty, e) -> { + // log/notify if exception occurred, then swallow exception and continue with next chunk + if ((e != null) && !(e instanceof ClosingException)) { + Exception notifyWith = (Exception) LoggingUtils.unwrapException(e, null); + TRACE_LOGGER.warn(this.hostContext.withHost("Failure getting/acquiring lease, continuing"), notifyWith); + this.hostContext.getEventProcessorOptions().notifyOfException(this.hostContext.getHostName(), notifyWith, + EventProcessorHostActionStrings.CHECKING_LEASES, ExceptionReceivedEventArgs.NO_ASSOCIATED_PARTITION); + } + return null; + }, this.hostContext.getExecutor()) + .thenComposeAsync((unused) -> acquireExpiredInChunksParallel(endAt, runningNeeded.get()), this.hostContext.getExecutor()); + } else { + TRACE_LOGGER.debug(this.hostContext.withHost("Short circuit: needed is 0, unowned is 0, or off end")); + } + + return resultFuture; + } + + // NONBLOCKING + private ArrayList findLeasesToSteal(int stealAsk) { + // Generate a map of hostnames and owned counts. + HashMap hostOwns = new HashMap(); + for (BaseLease info : this.leasesOwnedByOthers.values()) { + if (hostOwns.containsKey(info.getOwner())) { + int newCount = hostOwns.get(info.getOwner()) + 1; + hostOwns.put(info.getOwner(), newCount); + } else { + hostOwns.put(info.getOwner(), 1); + } + } + + // Extract hosts which own more than the desired count + ArrayList bigOwners = new ArrayList(); + for (Map.Entry pair : hostOwns.entrySet()) { + if (pair.getValue() > this.desiredCount) { + bigOwners.add(pair.getKey()); + TRACE_LOGGER.debug(this.hostContext.withHost("Big owner " + pair.getKey() + " has " + pair.getValue())); + } + } + + ArrayList stealInfos = new ArrayList(); + + if (bigOwners.size() > 0) { + // Randomly pick one of the big owners + String bigVictim = bigOwners.get(PartitionScanner.randomizer.nextInt(bigOwners.size())); + int victimExtra = hostOwns.get(bigVictim) - this.desiredCount - 1; + int stealCount = Math.min(victimExtra, stealAsk); + TRACE_LOGGER.debug(this.hostContext.withHost("Stealing " + stealCount + " from " + bigVictim)); + + // Grab stealCount partitions owned by bigVictim and return the infos. + for (BaseLease candidate : this.allLeaseStates) { + if (candidate.getOwner() != null && candidate.getOwner().compareTo(bigVictim) == 0) { + stealInfos.add(candidate); + if (stealInfos.size() >= stealCount) { + break; + } + } + } + } else { + TRACE_LOGGER.debug(this.hostContext.withHost("No big owners found, skipping steal")); + } + + return stealInfos; + } + + private CompletableFuture stealLeases(List stealThese) { + CompletableFuture allSteals = CompletableFuture.completedFuture(false); + + if (stealThese.size() > 0) { + ArrayList> steals = new ArrayList>(); + for (BaseLease info : stealThese) { + throwIfClosingOrClosed("PartitionScanner is shutting down"); + + final AcquisitionHolder holder = new AcquisitionHolder(); + CompletableFuture oneSteal = this.hostContext.getLeaseManager().getLease(info.getPartitionId()) + .thenComposeAsync((lease) -> { + throwIfClosingOrClosed("PartitionScanner is shutting down"); + holder.setAcquiredLease(lease); + return this.hostContext.getLeaseManager().acquireLease(lease); + }, this.hostContext.getExecutor()) + .thenAcceptAsync((acquired) -> { + throwIfClosingOrClosed("PartitionScanner is shutting down"); + if (acquired) { + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(holder.getAcquiredLease().getPartitionId(), "Stole lease")); + this.addPump.accept(holder.getAcquiredLease()); + } + }, this.hostContext.getExecutor()); + steals.add(oneSteal); + } + + CompletableFuture dummy[] = new CompletableFuture[steals.size()]; + allSteals = CompletableFuture.allOf(steals.toArray(dummy)).thenApplyAsync((empty) -> true, this.hostContext.getExecutor()); + } + + return allSteals; + } + + private class AcquisitionHolder { + private CompleteLease acquiredLease; + + void setAcquiredLease(CompleteLease l) { + this.acquiredLease = l; + } + + CompleteLease getAcquiredLease() { + return this.acquiredLease; + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PumpManager.java b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PumpManager.java new file mode 100644 index 0000000000000..fbd4c7279d587 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/main/java/com/microsoft/azure/eventprocessorhost/PumpManager.java @@ -0,0 +1,96 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Consumer; + + +class PumpManager extends Closable implements Consumer { + private static final Logger TRACE_LOGGER = LoggerFactory.getLogger(PumpManager.class); + protected final HostContext hostContext; + protected ConcurrentHashMap pumpStates; // protected for testability + + public PumpManager(HostContext hostContext, Closable parent) { + super(parent); + + this.hostContext = hostContext; + + this.pumpStates = new ConcurrentHashMap(); + } + + public void addPump(CompleteLease lease) { + if (getIsClosingOrClosed()) { + TRACE_LOGGER.info(this.hostContext.withHostAndPartition(lease, "Shutting down, not creating new pump")); + return; + } + + PartitionPump capturedPump = this.pumpStates.get(lease.getPartitionId()); // CONCURRENTHASHTABLE + if (capturedPump != null) { + // There already is a pump. This should never happen and it's not harmless if it does. If we get here, + // it implies that the existing pump is a zombie which is not renewing its lease. + TRACE_LOGGER.error(this.hostContext.withHostAndPartition(lease, "throwing away zombie pump")); + // Shutdown should remove the pump from the hashmap, but we don't know what state this pump is in so + // remove it manually. ConcurrentHashMap specifies that removing an item that doesn't exist is a safe no-op. + this.pumpStates.remove(lease.getPartitionId()); + // Call shutdown to try to clean up, but do not wait. + capturedPump.shutdown(CloseReason.Shutdown); + } + + TRACE_LOGGER.info(this.hostContext.withHostAndPartition(lease, "creating new pump")); + PartitionPump newPartitionPump = createNewPump(lease); + this.pumpStates.put(lease.getPartitionId(), newPartitionPump); + newPartitionPump.startPump(); + } + + // Callback used by pumps during pump shutdown. + @Override + public void accept(String partitionId) { + // These are fast, non-blocking actions. + this.pumpStates.remove(partitionId); + removingPumpTestHook(partitionId); + } + + // Separated out so that tests can override and substitute their own pump class. + protected PartitionPump createNewPump(CompleteLease lease) { + return new PartitionPump(this.hostContext, lease, this, this); + } + + public CompletableFuture removePump(String partitionId, final CloseReason reason) { + CompletableFuture retval = CompletableFuture.completedFuture(null); + PartitionPump capturedPump = this.pumpStates.get(partitionId); // CONCURRENTHASHTABLE + if (capturedPump != null) { + TRACE_LOGGER.info(this.hostContext.withHostAndPartition(partitionId, + "closing pump for reason " + reason.toString())); + retval = capturedPump.shutdown(reason); + } else { + // Shouldn't get here but not really harmful, so just trace. + TRACE_LOGGER.debug(this.hostContext.withHostAndPartition(partitionId, + "no pump found to remove for partition " + partitionId)); + } + return retval; + } + + public CompletableFuture removeAllPumps(CloseReason reason) { + setClosing(); + + CompletableFuture[] futures = new CompletableFuture[this.pumpStates.size()]; + int i = 0; + for (String partitionId : this.pumpStates.keySet()) { + futures[i++] = removePump(partitionId, reason); + } + + return CompletableFuture.allOf(futures).whenCompleteAsync((empty, e) -> { setClosed(); }, this.hostContext.getExecutor()); + } + + protected void removingPumpTestHook(String partitionId) { + // For test use. MUST BE FAST, NON-BLOCKING. + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/CheckpointManagerTest.java b/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/CheckpointManagerTest.java new file mode 100644 index 0000000000000..d4d7455314e6d --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/CheckpointManagerTest.java @@ -0,0 +1,259 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +import com.microsoft.azure.eventhubs.EventHubClient; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.UUID; + +import static org.junit.Assert.*; + +public class CheckpointManagerTest extends TestBase { + private ILeaseManager[] leaseManagers; + private ICheckpointManager[] checkpointManagers; + private EventProcessorHost[] hosts; + + @Test + public void singleManangerInMemoryCheckpointSmokeTest() throws Exception { + singleManagerCheckpointSmokeTest(false, 8); + } + + @Test + public void twoManagerInMemoryCheckpointSmokeTest() throws Exception { + twoManagerCheckpointSmokeTest(false, 8); + } + + @Test + public void singleManagerAzureCheckpointSmokeTest() throws Exception { + singleManagerCheckpointSmokeTest(true, 8); + } + + @Test + public void twoManagerAzureCheckpointSmokeTest() throws Exception { + twoManagerCheckpointSmokeTest(true, 8); + } + + public void singleManagerCheckpointSmokeTest(boolean useAzureStorage, int partitionCount) throws Exception { + this.leaseManagers = new ILeaseManager[1]; + this.checkpointManagers = new ICheckpointManager[1]; + this.hosts = new EventProcessorHost[1]; + setupOneManager(useAzureStorage, 0, "0", generateContainerName("0")); + + TestBase.logInfo("Check whether checkpoint store exists before create"); + boolean boolret = this.checkpointManagers[0].checkpointStoreExists().get(); + assertFalse("checkpoint store should not exist yet", boolret); + + TestBase.logInfo("Create checkpoint store"); + if (useAzureStorage) { + // Storage implementation optimizes checkpoint store creation to a no-op. Have to call lease manager + // to actually create. + this.leaseManagers[0].createLeaseStoreIfNotExists().get(); + } else { + this.checkpointManagers[0].createCheckpointStoreIfNotExists().get(); + } + + TestBase.logInfo("Check whether checkpoint store exists after create"); + boolret = this.checkpointManagers[0].checkpointStoreExists().get(); + assertTrue("checkpoint store should exist but does not", boolret); + + ArrayList partitionIds = new ArrayList(); + for (int i = 0; i < partitionCount; i++) { + partitionIds.add(String.valueOf(i)); + } + TestBase.logInfo("Create checkpoint holders for all partitions"); + if (useAzureStorage) { + // Storage implementation optimizes checkpoint creation to a no-op. Have to create the leases instead. + this.leaseManagers[0].createAllLeasesIfNotExists(partitionIds); + } else { + this.checkpointManagers[0].createAllCheckpointsIfNotExists(partitionIds); + } + + TestBase.logInfo("Trying to get checkpoints for all partitions"); + for (int i = 0; i < partitionCount; i++) { + Checkpoint blah = this.checkpointManagers[0].getCheckpoint(String.valueOf(i)).get(); + assertNull("unexpectedly successful retrieve checkpoint for " + i, blah); + } + + // AzureStorageCheckpointLeaseManager tries to pretend that checkpoints and leases are separate, but they really aren't. + // Because the checkpoint data is stored in the lease, updating the checkpoint means updating the lease, and it is + // necessary to hold the lease in order to update it. + HashMap leases = new HashMap(); + if (useAzureStorage) { + for (int i = 0; i < partitionCount; i++) { + CompleteLease l = this.leaseManagers[0].getLease(partitionIds.get(i)).get(); + assertTrue("null lease for " + partitionIds.get(i), l != null); + leases.put(l.getPartitionId(), l); + boolret = this.leaseManagers[0].acquireLease(l).get(); + assertTrue("failed to acquire lease for " + l.getPartitionId(), boolret); + } + } + + Checkpoint[] checkpoints = new Checkpoint[partitionCount]; + TestBase.logInfo("Creating checkpoints for all partitions"); + for (int i = 0; i < partitionCount; i++) { + // Arbitrary values, just checking that they are persisted + checkpoints[i] = new Checkpoint(String.valueOf(i)); + checkpoints[i].setOffset(String.valueOf(i * 234)); + checkpoints[i].setSequenceNumber(i + 77); + this.checkpointManagers[0].updateCheckpoint(leases.get(String.valueOf(i)), checkpoints[i]).get(); + } + + TestBase.logInfo("Getting checkpoints for all partitions and verifying"); + for (int i = 0; i < partitionCount; i++) { + Checkpoint blah = this.checkpointManagers[0].getCheckpoint(String.valueOf(i)).get(); + assertNotNull("failed to retrieve checkpoint for " + i, blah); + assertEquals("retrieved offset does not match written offset", blah.getOffset(), checkpoints[i].getOffset()); + assertEquals("retrieved seqno does not match written seqno", blah.getSequenceNumber(), checkpoints[i].getSequenceNumber()); + } + + // Have to release the leases before we can delete the store. + if (useAzureStorage) { + for (CompleteLease l : leases.values()) { + this.leaseManagers[0].releaseLease(l).get(); + } + } + + TestBase.logInfo("Cleaning up checkpoint store"); + this.checkpointManagers[0].deleteCheckpointStore().get(); + } + + public void twoManagerCheckpointSmokeTest(boolean useAzureStorage, int partitionCount) throws Exception { + this.leaseManagers = new ILeaseManager[2]; + this.checkpointManagers = new ICheckpointManager[2]; + this.hosts = new EventProcessorHost[2]; + String containerName = generateContainerName(null); + setupOneManager(useAzureStorage, 0, "twoCheckpoint", containerName); + setupOneManager(useAzureStorage, 1, "twoCheckpoint", containerName); + + TestBase.logInfo("Check whether checkpoint store exists before create"); + boolean boolret = this.checkpointManagers[0].checkpointStoreExists().get(); + assertFalse("checkpoint store should not exist yet", boolret); + + TestBase.logInfo("Second manager create checkpoint store"); + if (useAzureStorage) { + // Storage implementation optimizes checkpoint store creation to a no-op. Have to call lease manager + // to actually create. + this.leaseManagers[1].createLeaseStoreIfNotExists().get(); + } else { + this.checkpointManagers[1].createCheckpointStoreIfNotExists().get(); + } + + TestBase.logInfo("First mananger check whether checkpoint store exists after create"); + boolret = this.checkpointManagers[0].checkpointStoreExists().get(); + assertTrue("checkpoint store should exist but does not", boolret); + + ArrayList partitionIds = new ArrayList(); + for (int i = 0; i < partitionCount; i++) { + partitionIds.add(String.valueOf(i)); + } + TestBase.logInfo("Create checkpoint holders for all partitions"); + if (useAzureStorage) { + // Storage implementation optimizes checkpoint creation to a no-op. Have to create the leases instead. + this.leaseManagers[0].createAllLeasesIfNotExists(partitionIds); + } else { + this.checkpointManagers[0].createAllCheckpointsIfNotExists(partitionIds); + } + + TestBase.logInfo("Try to get each others checkpoints for all partitions"); + for (int i = 0; i < partitionCount; i++) { + Checkpoint blah = this.checkpointManagers[(i + 1) % 2].getCheckpoint(String.valueOf(i)).get(); + assertNull("unexpected successful retrieve checkpoint for " + i, blah); + } + + // AzureStorageCheckpointLeaseManager tries to pretend that checkpoints and leases are separate, but they really aren't. + // Because the checkpoint data is stored in the lease, updating the checkpoint means updating the lease, and it is + // necessary to hold the lease in order to update it. + HashMap leases = new HashMap(); + if (useAzureStorage) { + for (int i = 0; i < partitionCount; i++) { + CompleteLease l = this.leaseManagers[1].getLease(partitionIds.get(i)).get(); + leases.put(l.getPartitionId(), l); + boolret = this.leaseManagers[1].acquireLease(l).get(); + assertTrue("failed to acquire lease for " + l.getPartitionId(), boolret); + } + } + + Checkpoint[] checkpoints = new Checkpoint[partitionCount]; + TestBase.logInfo("Second manager update checkpoints for all partitions"); + for (int i = 0; i < partitionCount; i++) { + // Arbitrary values, just checking that they are persisted + checkpoints[i] = new Checkpoint(String.valueOf(i)); + checkpoints[i].setOffset(String.valueOf(i * 234)); + checkpoints[i].setSequenceNumber(i + 77); + this.checkpointManagers[1].updateCheckpoint(leases.get(String.valueOf(i)), checkpoints[i]).get(); + } + + TestBase.logInfo("First manager get and verify checkpoints for all partitions"); + for (int i = 0; i < partitionCount; i++) { + Checkpoint blah = this.checkpointManagers[0].getCheckpoint(String.valueOf(i)).get(); + assertNotNull("failed to retrieve checkpoint for " + i, blah); + assertEquals("retrieved offset does not match written offset", blah.getOffset(), checkpoints[i].getOffset()); + assertEquals("retrieved seqno does not match written seqno", blah.getSequenceNumber(), checkpoints[i].getSequenceNumber()); + } + + // Have to release the leases before we can delete the store. + if (useAzureStorage) { + for (CompleteLease l : leases.values()) { + assertNotNull("failed to retrieve lease", l); + this.leaseManagers[1].releaseLease(l).get(); + } + } + + TestBase.logInfo("Clean up checkpoint store"); + this.checkpointManagers[0].deleteCheckpointStore().get(); + } + + private String generateContainerName(String infix) { + StringBuilder containerName = new StringBuilder(64); + containerName.append("ckptmgrtest-"); + if (infix != null) { + containerName.append(infix); + containerName.append('-'); + } + containerName.append(UUID.randomUUID().toString()); + return containerName.toString(); + } + + private void setupOneManager(boolean useAzureStorage, int index, String suffix, String containerName) throws Exception { + ILeaseManager leaseMgr = null; + ICheckpointManager checkpointMgr = null; + + if (!useAzureStorage) { + leaseMgr = new InMemoryLeaseManager(); + checkpointMgr = new InMemoryCheckpointManager(); + } else { + TestBase.logInfo("Container name: " + containerName); + String azureStorageConnectionString = TestUtilities.getStorageConnectionString(); + AzureStorageCheckpointLeaseManager azMgr = new AzureStorageCheckpointLeaseManager(azureStorageConnectionString, containerName); + leaseMgr = azMgr; + checkpointMgr = azMgr; + } + + // Host name needs to be unique per host so use index. Event hub should be the same for all hosts in a test, so use the supplied suffix. + EventProcessorHost host = new EventProcessorHost("dummyHost" + String.valueOf(index), RealEventHubUtilities.syntacticallyCorrectDummyEventHubPath + suffix, + EventHubClient.DEFAULT_CONSUMER_GROUP_NAME, RealEventHubUtilities.syntacticallyCorrectDummyConnectionString + suffix, checkpointMgr, leaseMgr); + + + try { + if (!useAzureStorage) { + ((InMemoryLeaseManager) leaseMgr).initialize(host.getHostContext()); + ((InMemoryCheckpointManager) checkpointMgr).initialize(host.getHostContext()); + } else { + ((AzureStorageCheckpointLeaseManager) checkpointMgr).initialize(host.getHostContext()); + } + } catch (Exception e) { + TestBase.logError("Manager initializion failed"); + throw e; + } + + this.leaseManagers[index] = leaseMgr; + this.checkpointManagers[index] = checkpointMgr; + this.hosts[index] = host; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/DummyPump.java b/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/DummyPump.java new file mode 100644 index 0000000000000..543dda08cf01a --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/DummyPump.java @@ -0,0 +1,82 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +import com.microsoft.azure.eventhubs.EventData; + +import java.util.concurrent.Callable; +import java.util.concurrent.CompletableFuture; +import java.util.function.Consumer; + + +class DummyPump extends PumpManager { + public DummyPump(HostContext hostContext, Closable parent) { + super(hostContext, parent); + } + + Iterable getPumpsList() { + return this.pumpStates.keySet(); + } + + @Override + protected PartitionPump createNewPump(CompleteLease lease) { + return new DummyPartitionPump(this.hostContext, lease, this, this); + } + + @Override + protected void removingPumpTestHook(String partitionId) { + TestBase.logInfo("Steal detected, host " + this.hostContext.getHostName() + " removing " + partitionId); + } + + + private class DummyPartitionPump extends PartitionPump implements Callable { + CompletableFuture blah = null; + + DummyPartitionPump(HostContext hostContext, CompleteLease lease, Closable parent, Consumer pumpManagerCallback) { + super(hostContext, lease, parent, pumpManagerCallback); + } + + @Override + CompletableFuture startPump() { + this.blah = new CompletableFuture(); + ((InMemoryLeaseManager) this.hostContext.getLeaseManager()).notifyOnSteal(this.hostContext.getHostName(), this.lease.getPartitionId(), this); + super.scheduleLeaseRenewer(); + return this.blah; + } + + @Override + protected void internalShutdown(CloseReason reason, Throwable e) { + super.cancelPendingOperations(); + if (e != null) { + this.blah.completeExceptionally(e); + } else { + this.blah.complete(null); + } + } + + @Override + CompletableFuture shutdown(CloseReason reason) { + internalShutdown(reason, null); + return this.blah; + } + + @Override + public void onReceive(Iterable events) { + } + + @Override + public void onError(Throwable error) { + } + + @Override + public Void call() { + if (this.blah != null) { + this.blah.completeExceptionally(new LeaseLostException(this.lease, "lease stolen")); + } + return null; + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/EPHConstructorTests.java b/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/EPHConstructorTests.java new file mode 100644 index 0000000000000..6d3bfd9521893 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/EPHConstructorTests.java @@ -0,0 +1,282 @@ +package com.microsoft.azure.eventprocessorhost; + +import org.junit.Test; + +import static org.junit.Assert.fail; + +public class EPHConstructorTests extends TestBase { + @Test + public void conflictingEventHubPathsTest() throws Exception { + PerTestSettings settings = new PerTestSettings("ConflictingEventHubPaths"); + settings.inHasSenders = false; + settings.inEventHubDoesNotExist = true; + settings.inoutEPHConstructorArgs.dummyStorageConnection(); + + settings.inoutEPHConstructorArgs.setEHPath("thisisdifferentfromtheconnectionstring", PerTestSettings.EPHConstructorArgs.EH_PATH_OVERRIDE); + + try { + settings = testSetup(settings); + fail("No exception occurred"); + } catch (IllegalArgumentException e) { + if ((e.getMessage() != null) && (e.getMessage().compareTo("Provided EventHub path in eventHubPath parameter conflicts with the path in provided EventHub connection string") == 0)) { + TestBase.logInfo("Got expected exception"); + } else { + throw e; + } + } finally { + testFinish(settings, NO_CHECKS); + } + } + + @Test + public void missingEventHubPathTest() throws Exception { + PerTestSettings settings = new PerTestSettings("MissingEventHubPath"); + settings.inHasSenders = false; + settings.inEventHubDoesNotExist = true; + settings.inoutEPHConstructorArgs.dummyStorageConnection(); + + settings.inoutEPHConstructorArgs.setEHPath("", PerTestSettings.EPHConstructorArgs.EH_PATH_OVERRIDE_AND_REPLACE); + + try { + settings = testSetup(settings); + fail("No exception occurred"); + } catch (IllegalArgumentException e) { + if ((e.getMessage() != null) && (e.getMessage().compareTo("Provide EventHub entity path in either eventHubPath argument or in eventHubConnectionString") == 0)) { + TestBase.logInfo("Got expected exception"); + } else { + throw e; + } + } finally { + testFinish(settings, NO_CHECKS); + } + } + + @Test + public void nullHostNameTest() throws Exception { + PerTestSettings settings = new PerTestSettings("NullHostName"); + settings.inHasSenders = false; + settings.inEventHubDoesNotExist = true; + settings.inoutEPHConstructorArgs.dummyStorageConnection(); + + settings.inoutEPHConstructorArgs.setHostName(null); + + try { + settings = testSetup(settings); + fail("No exception occurred"); + } catch (IllegalArgumentException e) { + TestBase.logInfo("Got expected exception"); + } finally { + testFinish(settings, NO_CHECKS); + } + } + + @Test + public void emptyHostNameTest() throws Exception { + PerTestSettings settings = new PerTestSettings("EmptyHostName"); + settings.inHasSenders = false; + settings.inEventHubDoesNotExist = true; + settings.inoutEPHConstructorArgs.dummyStorageConnection(); + + settings.inoutEPHConstructorArgs.setHostName(""); + + try { + settings = testSetup(settings); + fail("No exception occurred"); + } catch (IllegalArgumentException e) { + TestBase.logInfo("Got expected exception"); + } finally { + testFinish(settings, NO_CHECKS); + } + } + + @Test + public void nullConsumerGroupNameTest() throws Exception { + PerTestSettings settings = new PerTestSettings("NullConsumerGroupName"); + settings.inHasSenders = false; + settings.inEventHubDoesNotExist = true; + settings.inoutEPHConstructorArgs.dummyStorageConnection(); + + settings.inoutEPHConstructorArgs.setConsumerGroupName(null); + + try { + settings = testSetup(settings); + fail("No exception occurred"); + } catch (IllegalArgumentException e) { + TestBase.logInfo("Got expected exception"); + } finally { + testFinish(settings, NO_CHECKS); + } + } + + @Test + public void emptyConsumerGroupNameTest() throws Exception { + PerTestSettings settings = new PerTestSettings("EmptyConsumerGroupName"); + settings.inHasSenders = false; + settings.inEventHubDoesNotExist = true; + settings.inoutEPHConstructorArgs.dummyStorageConnection(); + + settings.inoutEPHConstructorArgs.setConsumerGroupName(""); + + try { + settings = testSetup(settings); + fail("No exception occurred"); + } catch (IllegalArgumentException e) { + TestBase.logInfo("Got expected exception"); + } finally { + testFinish(settings, NO_CHECKS); + } + } + + @Test + public void nullEHConnectionStringTest() throws Exception { + PerTestSettings settings = new PerTestSettings("NullEHConnectionString"); + settings.inHasSenders = false; + settings.inEventHubDoesNotExist = true; + settings.inoutEPHConstructorArgs.dummyStorageConnection(); + + settings.inoutEPHConstructorArgs.setEHConnection(null); + + try { + settings = testSetup(settings); + fail("No exception occurred"); + } catch (IllegalArgumentException e) { + TestBase.logInfo("Got expected exception"); + } finally { + testFinish(settings, NO_CHECKS); + } + } + + @Test + public void emptyEHConnectionStringTest() throws Exception { + PerTestSettings settings = new PerTestSettings("EmptyEHConnectionString"); + settings.inHasSenders = false; + settings.inEventHubDoesNotExist = true; + settings.inoutEPHConstructorArgs.dummyStorageConnection(); + + settings.inoutEPHConstructorArgs.setEHConnection(""); + + try { + settings = testSetup(settings); + fail("No exception occurred"); + } catch (IllegalArgumentException e) { + TestBase.logInfo("Got expected exception"); + } finally { + testFinish(settings, NO_CHECKS); + } + } + + @Test + public void ehPathOnlySeparateTest() throws Exception { + PerTestSettings settings = new PerTestSettings("EHPathOnlySeparate"); + settings.inHasSenders = false; + settings.inEventHubDoesNotExist = true; + settings.inoutEPHConstructorArgs.dummyStorageConnection(); + + settings.inoutEPHConstructorArgs.removePathFromEHConnection(); + + try { + settings = testSetup(settings); + } finally { + testFinish(settings, NO_CHECKS); + } + } + + @Test + public void ehPathOnlyInConnStringTest() throws Exception { + PerTestSettings settings = new PerTestSettings("EHPathOnlyInConnString"); + settings.inHasSenders = false; + settings.inEventHubDoesNotExist = true; + settings.inoutEPHConstructorArgs.dummyStorageConnection(); + + settings.inoutEPHConstructorArgs.setEHPath("", PerTestSettings.EPHConstructorArgs.EH_PATH_OVERRIDE); + + try { + settings = testSetup(settings); + } finally { + testFinish(settings, NO_CHECKS); + } + } + + @Test + public void nullCheckpointManagerTest() throws Exception { + PerTestSettings settings = new PerTestSettings("NullCheckpointManager"); + settings.inHasSenders = false; + settings.inEventHubDoesNotExist = true; + settings.inoutEPHConstructorArgs.dummyStorageConnection(); + + settings.inoutEPHConstructorArgs.setCheckpointManager(null); + + try { + settings = testSetup(settings); + fail("No exception occurred"); + } catch (IllegalArgumentException e) { + TestBase.logInfo("Got expected exception"); + } finally { + testFinish(settings, NO_CHECKS); + } + } + + @Test + public void nullLeaseManagerTest() throws Exception { + PerTestSettings settings = new PerTestSettings("NullLeaseManager"); + settings.inHasSenders = false; + settings.inEventHubDoesNotExist = true; + settings.inoutEPHConstructorArgs.dummyStorageConnection(); + + settings.inoutEPHConstructorArgs.setLeaseManager(null); + + try { + settings = testSetup(settings); + fail("No exception occurred"); + } catch (IllegalArgumentException e) { + TestBase.logInfo("Got expected exception"); + } finally { + testFinish(settings, NO_CHECKS); + } + } + + @Test + public void nullStorageConnectionStringTest() throws Exception { + PerTestSettings settings = new PerTestSettings("NullStorageConnectionString"); + settings.inHasSenders = false; + settings.inEventHubDoesNotExist = true; + + settings.inoutEPHConstructorArgs.setStorageConnection(null); + try { + settings = testSetup(settings); + fail("No exception occurred"); + } catch (IllegalArgumentException e) { + TestBase.logInfo("Got expected exception"); + } finally { + testFinish(settings, NO_CHECKS); + } + } + + @Test + public void emptyStorageConnectionStringTest() throws Exception { + PerTestSettings settings = new PerTestSettings("EmptyStorageConnectionString"); + settings.inHasSenders = false; + settings.inEventHubDoesNotExist = true; + + settings.inoutEPHConstructorArgs.setStorageConnection(""); + + try { + settings = testSetup(settings); + fail("No exception occurred"); + } catch (IllegalArgumentException e) { + TestBase.logInfo("Got expected exception"); + } finally { + testFinish(settings, NO_CHECKS); + } + } + + // TODO + // @Test + // public void verifyStorageContainerNameTest() throws Exception + // Uses Storage APIs to check that the expected container has been created. + + // TODO + // @Test + // public void verifyStorageBlobPrefixTest() throws Exception + // Uses Storage APIs to check that the blobs have the expected prefix in their names +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/LeaseManagerTest.java b/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/LeaseManagerTest.java new file mode 100644 index 0000000000000..00f4d06c3e353 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/LeaseManagerTest.java @@ -0,0 +1,307 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +import com.microsoft.azure.eventhubs.EventHubClient; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutionException; + +import static org.junit.Assert.*; + +public class LeaseManagerTest extends TestBase { + private ILeaseManager[] leaseManagers; + private EventProcessorHost[] hosts; + + @Test + public void singleManangerInMemoryLeaseSmokeTest() throws Exception { + singleManagerLeaseSmokeTest(false, 8); + } + + @Test + public void singleManagerAzureLeaseSmokeTest() throws Exception { + singleManagerLeaseSmokeTest(true, 8); + } + + @Test + public void twoManagerInMemoryLeaseStealingTest() throws Exception { + twoManagerLeaseStealingTest(false); + } + + @Test + public void twoManangerAzureLeaseStealingTest() throws Exception { + twoManagerLeaseStealingTest(true); + } + + void singleManagerLeaseSmokeTest(boolean useAzureStorage, int partitionCount) throws Exception { + this.leaseManagers = new ILeaseManager[1]; + this.hosts = new EventProcessorHost[1]; + setupOneManager(useAzureStorage, 0, "0", generateContainerName("0")); + + TestBase.logInfo("Check whether lease store exists before create"); + Boolean boolret = this.leaseManagers[0].leaseStoreExists().get(); + assertFalse("lease store should not exist yet", boolret); + + TestBase.logInfo("Creating lease store"); + this.leaseManagers[0].createLeaseStoreIfNotExists().get(); + + TestBase.logInfo("Checking whether lease store exists after create"); + boolret = this.leaseManagers[0].leaseStoreExists().get(); + assertTrue("lease store should exist but does not", boolret); + + ArrayList partitionIds = new ArrayList(); + for (int i = 0; i < partitionCount; i++) { + partitionIds.add(String.valueOf(i)); + } + TestBase.logInfo("Creating leases for all partitions"); + this.leaseManagers[0].createAllLeasesIfNotExists(partitionIds).get(); // throws on failure + + CompleteLease[] leases = new CompleteLease[partitionCount]; + TestBase.logInfo("Getting leases for all partitions"); + for (int i = 0; i < partitionIds.size(); i++) { + leases[i] = this.leaseManagers[0].getLease(partitionIds.get(i)).get(); + assertNotNull("getLease returned null", leases[i]); + } + + TestBase.logInfo("Acquiring leases for all partitions"); + for (int i = 0; i < partitionCount; i++) { + if (useAzureStorage) { + TestBase.logInfo("Partition " + i + " state before: " + leases[i].getStateDebug()); + } + boolret = this.leaseManagers[0].acquireLease(leases[i]).get(); + assertTrue("failed to acquire lease for " + i, boolret); + if (useAzureStorage) { + TestBase.logInfo("Partition " + i + " state after: " + leases[i].getStateDebug()); + } + } + + Thread.sleep(5000); + + TestBase.logInfo("Getting state for all leases"); + List states = this.leaseManagers[0].getAllLeases().get(); // throws on failure + for (BaseLease s : states) { + TestBase.logInfo("Partition " + s.getPartitionId() + " owned by " + s.getOwner() + " isowned: " + s.getIsOwned()); + } + + TestBase.logInfo("Renewing leases for all partitions"); + for (int i = 0; i < partitionCount; i++) { + if (useAzureStorage) { + TestBase.logInfo("Partition " + i + " state before: " + leases[i].getStateDebug()); + } + boolret = this.leaseManagers[0].renewLease(leases[i]).get(); + assertTrue("failed to renew lease for " + i, boolret); + if (useAzureStorage) { + TestBase.logInfo("Partition " + i + " state after: " + leases[i].getStateDebug()); + } + } + + int x = 1; + while (getOneState(leases[0].getPartitionId(), this.leaseManagers[0]).getIsOwned()) { + Thread.sleep(5000); + TestBase.logInfo("Still waiting for lease on 0 to expire: " + (5 * x)); + assertFalse("lease 0 expiration is overdue", (5000 * x) > (this.leaseManagers[0].getLeaseDurationInMilliseconds() + 10000)); + for (int i = 1; i < partitionCount; i++) { + boolret = this.leaseManagers[0].renewLease(leases[i]).get(); + assertTrue("failed to renew lease for " + i, boolret); + } + x++; + } + + TestBase.logInfo("Updating lease 1"); + leases[1].setEpoch(5); + boolret = this.leaseManagers[0].updateLease(leases[1]).get(); + assertTrue("failed to update lease for 1", boolret); + CompleteLease retrievedLease = this.leaseManagers[0].getLease("1").get(); + assertNotNull("failed to get lease for 1", retrievedLease); + assertEquals("epoch was not persisted, expected " + leases[1].getEpoch() + " got " + retrievedLease.getEpoch(), leases[1].getEpoch(), retrievedLease.getEpoch()); + + // Release for 0 should not throw even though lease has expired -- it just won't do anything + TestBase.logInfo("Trying to release expired lease 0"); + this.leaseManagers[0].releaseLease(leases[0]).get(); + + // Renew for 0 succeeds even though it has expired. + // This is the behavior of AzureStorageCheckpointLeaseManager, which is dictated by the behavior of Azure Storage leases. + TestBase.logInfo("Renewing expired lease 0"); + boolret = this.leaseManagers[0].renewLease(leases[0]).get(); + assertTrue("renew lease on 0 failed unexpectedly", boolret); + + TestBase.logInfo("Releasing leases for all partitions"); + for (int i = 0; i < partitionCount; i++) { + if (useAzureStorage) { + TestBase.logInfo("Partition " + i + " state before: " + leases[i].getStateDebug()); + } + this.leaseManagers[0].releaseLease(leases[i]).get(); + if (useAzureStorage) { + TestBase.logInfo("Partition " + i + " state after: " + leases[i].getStateDebug()); + } + } + + TestBase.logInfo("Trying to acquire released lease 0"); + boolret = this.leaseManagers[0].acquireLease(leases[0]).get(); + assertTrue("failed to acquire previously released 0", boolret); + + TestBase.logInfo("Trying to release lease 0"); + this.leaseManagers[0].releaseLease(leases[0]).get(); + + TestBase.logInfo("Cleaning up lease store"); + this.leaseManagers[0].deleteLeaseStore().get(); + } + + + void twoManagerLeaseStealingTest(boolean useAzureStorage) throws Exception { + this.leaseManagers = new ILeaseManager[2]; + this.hosts = new EventProcessorHost[2]; + String containerName = generateContainerName(null); + setupOneManager(useAzureStorage, 0, "StealTest", containerName); + setupOneManager(useAzureStorage, 1, "StealTest", containerName); + + TestBase.logInfo("Check whether lease store exists before create"); + Boolean boolret = this.leaseManagers[0].leaseStoreExists().get(); + assertFalse("lease store should not exist yet", boolret); + + TestBase.logInfo("Creating lease store"); + this.leaseManagers[0].createLeaseStoreIfNotExists().get(); + + TestBase.logInfo("Check whether lease store exists after create"); + boolret = this.leaseManagers[0].leaseStoreExists().get(); + assertTrue("lease store should exist but does not", boolret); + + TestBase.logInfo("Check whether second manager can see lease store"); + boolret = this.leaseManagers[1].leaseStoreExists().get(); + assertTrue("second manager cannot see lease store", boolret); + + TestBase.logInfo("First manager creating lease for partition 0"); + ArrayList partitionIds = new ArrayList(); + partitionIds.add("0"); + this.leaseManagers[0].createAllLeasesIfNotExists(partitionIds).get(); + + TestBase.logInfo("Checking whether second manager can see lease 0"); + CompleteLease mgr2Lease = this.leaseManagers[1].getLease("0").get(); + assertNotNull("second manager cannot see lease for 0", mgr2Lease); + + TestBase.logInfo("Checking whether first manager can see lease 0"); + CompleteLease mgr1Lease = this.leaseManagers[0].getLease("0").get(); + assertNotNull("second manager cannot see lease for 0", mgr1Lease); + + TestBase.logInfo("First manager acquiring lease 0"); + boolret = this.leaseManagers[0].acquireLease(mgr1Lease).get(); + assertTrue("first manager failed acquiring lease for 0", boolret); + if (useAzureStorage) { + TestBase.logInfo("Lease token is " + ((AzureBlobLease)mgr1Lease).getToken()); + } + + int x = 0; + while (getOneState("0", this.leaseManagers[0]).getIsOwned()) { + assertFalse("lease 0 expiration is overdue", (5000 * x) > (this.leaseManagers[0].getLeaseDurationInMilliseconds() + 10000)); + Thread.sleep(5000); + TestBase.logInfo("Still waiting for lease on 0 to expire: " + (5 * ++x)); + } + + TestBase.logInfo("Second manager acquiring lease 0"); + boolret = this.leaseManagers[1].acquireLease(mgr2Lease).get(); + assertTrue("second manager failed acquiring expired lease for 0", boolret); + if (useAzureStorage) { + TestBase.logInfo("Lease token is " + ((AzureBlobLease)mgr2Lease).getToken()); + } + + TestBase.logInfo("First manager trying to renew lease 0"); + boolret = this.leaseManagers[0].renewLease(mgr1Lease).get(); + assertFalse("first manager unexpected success renewing lease for 0", boolret); + + TestBase.logInfo("First manager getting lease 0"); + mgr1Lease = this.leaseManagers[0].getLease("0").get(); + assertNotNull("first manager cannot see lease for 0", mgr1Lease); + + TestBase.logInfo("First manager stealing lease 0"); + boolret = this.leaseManagers[0].acquireLease(mgr1Lease).get(); + assertTrue("first manager failed stealing lease 0", boolret); + if (useAzureStorage) { + TestBase.logInfo("Lease token is " + ((AzureBlobLease)mgr1Lease).getToken()); + } + + TestBase.logInfo("Second mananger getting lease 0"); + mgr2Lease = this.leaseManagers[1].getLease("0").get(); + assertNotNull("second manager cannot see lease for 0", mgr2Lease); + + TestBase.logInfo("Second mananger stealing lease 0"); + boolret = this.leaseManagers[1].acquireLease(mgr2Lease).get(); + assertTrue("second manager failed stealing lease 0", boolret); + if (useAzureStorage) { + TestBase.logInfo("Lease token is " + ((AzureBlobLease)mgr2Lease).getToken()); + } + + TestBase.logInfo("Second mananger releasing lease 0"); + this.leaseManagers[1].releaseLease(mgr2Lease).get(); + + // Won't do anything because first manager didn't own lease 0, but shouldn't throw either + TestBase.logInfo("First mananger tyring to release lease 0"); + this.leaseManagers[0].releaseLease(mgr1Lease).get(); + + TestBase.logInfo("Cleaning up lease store"); + this.leaseManagers[1].deleteLeaseStore().get(); + } + + private String generateContainerName(String infix) { + StringBuilder containerName = new StringBuilder(64); + containerName.append("leasemgrtest-"); + if (infix != null) { + containerName.append(infix); + containerName.append('-'); + } + containerName.append(UUID.randomUUID().toString()); + return containerName.toString(); + } + + private BaseLease getOneState(String partitionId, ILeaseManager leaseMgr) throws InterruptedException, ExecutionException { + List states = leaseMgr.getAllLeases().get(); + BaseLease returnState = null; + for (BaseLease s : states) { + if (s.getPartitionId().compareTo(partitionId) == 0) { + returnState = s; + break; + } + } + return returnState; + } + + private void setupOneManager(boolean useAzureStorage, int index, String suffix, String containerName) throws Exception { + ILeaseManager leaseMgr = null; + ICheckpointManager checkpointMgr = null; + + if (!useAzureStorage) { + leaseMgr = new InMemoryLeaseManager(); + checkpointMgr = new InMemoryCheckpointManager(); + } else { + TestBase.logInfo("Container name: " + containerName); + String azureStorageConnectionString = TestUtilities.getStorageConnectionString(); + AzureStorageCheckpointLeaseManager azMgr = new AzureStorageCheckpointLeaseManager(azureStorageConnectionString, containerName); + leaseMgr = azMgr; + checkpointMgr = azMgr; + } + + // Host name needs to be unique per host so use index. Event hub should be the same for all hosts in a test, so use the supplied suffix. + EventProcessorHost host = new EventProcessorHost("dummyHost" + String.valueOf(index), RealEventHubUtilities.syntacticallyCorrectDummyEventHubPath + suffix, + EventHubClient.DEFAULT_CONSUMER_GROUP_NAME, RealEventHubUtilities.syntacticallyCorrectDummyConnectionString + suffix, checkpointMgr, leaseMgr); + + try { + if (!useAzureStorage) { + ((InMemoryLeaseManager) leaseMgr).initialize(host.getHostContext()); + ((InMemoryCheckpointManager) checkpointMgr).initialize(host.getHostContext()); + } else { + ((AzureStorageCheckpointLeaseManager) leaseMgr).initialize(host.getHostContext()); + } + } catch (Exception e) { + TestBase.logError("Manager initializion failed"); + throw e; + } + + this.leaseManagers[index] = leaseMgr; + this.hosts[index] = host; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PartitionManagerTest.java b/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PartitionManagerTest.java new file mode 100644 index 0000000000000..d604715e91e82 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PartitionManagerTest.java @@ -0,0 +1,440 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +import com.microsoft.azure.eventhubs.EventHubClient; + +import java.util.ArrayList; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; + +import org.junit.Test; +import static org.junit.Assert.assertTrue; + +public class PartitionManagerTest extends TestBase { + private ILeaseManager[] leaseManagers; + private ICheckpointManager[] checkpointManagers; + private EventProcessorHost[] hosts; + private TestPartitionManager[] partitionManagers; + private int partitionCount; + private boolean[] running; + + private int countOfChecks; + private int desiredDistributionDetected; + + private boolean keepGoing; + private boolean expectEqualDistribution; + private int overrideHostCount = -1; + private int maxChecks; + private boolean shuttingDown; + + @Test + public void partitionBalancingExactMultipleTest() throws Exception { + setup(2, 4, 0, 0); // two hosts, four partitions, no latency, default threadpool + this.countOfChecks = 0; + this.desiredDistributionDetected = 0; + this.keepGoing = true; + this.expectEqualDistribution = true; + this.maxChecks = 20; + startManagers(); + + // Poll until checkPartitionDistribution() declares that it's time to stop. + while (this.keepGoing) { + try { + Thread.sleep(15000); + } catch (InterruptedException e) { + TestBase.logError("Sleep interrupted, emergency bail"); + Thread.currentThread().interrupt(); + throw e; + } + } + + stopManagers(); + + assertTrue("Desired distribution never reached or was not stable", this.desiredDistributionDetected >= this.partitionManagers.length); + + this.leaseManagers[0].deleteLeaseStore().get(); + this.checkpointManagers[0].deleteCheckpointStore().get(); + } + + @Test + public void partitionBalancingUnevenTest() throws Exception { + setup(5, 16, 250, 0); // five hosts, sixteen partitions, 250ms latency, default threadpool + this.countOfChecks = 0; + this.desiredDistributionDetected = 0; + this.keepGoing = true; + this.expectEqualDistribution = false; + this.maxChecks = 35; + startManagers(); + + // Poll until checkPartitionDistribution() declares that it's time to stop. + while (this.keepGoing) { + try { + Thread.sleep(15000); + } catch (InterruptedException e) { + TestBase.logError("Sleep interrupted, emergency bail"); + Thread.currentThread().interrupt(); + throw e; + } + } + + stopManagers(); + + assertTrue("Desired distribution never reached or was not stable", this.desiredDistributionDetected >= this.partitionManagers.length); + + this.leaseManagers[0].deleteLeaseStore().get(); + this.checkpointManagers[0].deleteCheckpointStore().get(); + } + + + @Test + public void partitionBalancingHugeTest() throws Exception { + setup(10, 201, 250, 20); // ten hosts, 201 partitions, 250ms latency, threadpool with 20 threads + this.countOfChecks = 0; + this.desiredDistributionDetected = 0; + this.keepGoing = true; + this.expectEqualDistribution = false; + this.maxChecks = 99; + startManagers(); + + // Poll until checkPartitionDistribution() declares that it's time to stop. + while (this.keepGoing) { + try { + Thread.sleep(15000); + } catch (InterruptedException e) { + TestBase.logError("Sleep interrupted, emergency bail"); + Thread.currentThread().interrupt(); + throw e; + } + } + + stopManagers(); + + assertTrue("Desired distribution never reached or was not stable", this.desiredDistributionDetected >= this.partitionManagers.length); + + this.leaseManagers[0].deleteLeaseStore().get(); + this.checkpointManagers[0].deleteCheckpointStore().get(); + } + + @Test + public void partitionRebalancingTest() throws Exception { + setup(3, 8, 0, 8); // three hosts, eight partitions, 250ms latency, default threadpool + + // + // Start two hosts of three, expect 4/4/0. + // + this.countOfChecks = 0; + this.desiredDistributionDetected = 0; + this.keepGoing = true; + this.expectEqualDistribution = true; // only going to start two of the three hosts + this.maxChecks = 20; + this.overrideHostCount = 2; + startManagers(2); + while (this.keepGoing) { + try { + Thread.sleep(15000); + } catch (InterruptedException e) { + TestBase.logError("Sleep interrupted, emergency bail"); + Thread.currentThread().interrupt(); + throw e; + } + } + assertTrue("Desired distribution 4/4/0 never reached or was not stable", this.desiredDistributionDetected >= this.partitionManagers.length); + + // + // Start up the third host and wait for rebalance + // + this.countOfChecks = 0; + this.desiredDistributionDetected = 0; + this.keepGoing = true; + this.expectEqualDistribution = false; + this.maxChecks = 30; + this.overrideHostCount = 3; + startSingleManager(2); + while (this.keepGoing) { + try { + Thread.sleep(15000); + } catch (InterruptedException e) { + TestBase.logError("Sleep interrupted, emergency bail"); + Thread.currentThread().interrupt(); + throw e; + } + } + assertTrue("Desired distribution never reached or was not stable", this.desiredDistributionDetected >= this.partitionManagers.length); + + // + // Now stop host 0 and wait for 0/4/4 + // + this.countOfChecks = 0; + this.desiredDistributionDetected = 0; + this.keepGoing = true; + this.expectEqualDistribution = true; // only two of the three hosts running + this.maxChecks = 20; + this.overrideHostCount = 2; + stopSingleManager(0); + while (this.keepGoing) { + try { + Thread.sleep(15000); + } catch (InterruptedException e) { + TestBase.logError("Sleep interrupted, emergency bail"); + Thread.currentThread().interrupt(); + throw e; + } + } + assertTrue("Desired distribution 4/4/0 never reached or was not stable", this.desiredDistributionDetected >= this.partitionManagers.length); + + stopManagers(); + + this.leaseManagers[1].deleteLeaseStore().get(); + this.checkpointManagers[1].deleteCheckpointStore().get(); + } + + @Test + public void partitionBalancingTooManyHostsTest() throws Exception { + setup(10, 4, 0, 8); // ten hosts, four partitions + this.countOfChecks = 0; + this.desiredDistributionDetected = 0; + this.keepGoing = true; + this.expectEqualDistribution = false; + this.maxChecks = 20; + startManagers(); + + // Poll until checkPartitionDistribution() declares that it's time to stop. + while (this.keepGoing) { + try { + Thread.sleep(15000); + } catch (InterruptedException e) { + TestBase.logError("Sleep interrupted, emergency bail"); + Thread.currentThread().interrupt(); + throw e; + } + } + + stopManagers(); + + assertTrue("Desired distribution never reached or was not stable", this.desiredDistributionDetected >= this.partitionManagers.length); + + this.leaseManagers[0].deleteLeaseStore().get(); + this.checkpointManagers[0].deleteCheckpointStore().get(); + } + + synchronized void checkPartitionDistribution() { + if (this.shuttingDown) { + return; + } + + TestBase.logInfo("Checking partition distribution"); + int[] countsPerHost = new int[this.partitionManagers.length]; + int totalCounts = 0; + int runningCount = 0; + for (int i = 0; i < this.partitionManagers.length; i++) { + StringBuilder blah = new StringBuilder(); + blah.append("\tHost "); + blah.append(this.hosts[i].getHostContext().getHostName()); + blah.append(" has "); + countsPerHost[i] = 0; + for (String id : this.partitionManagers[i].getOwnedPartitions()) { + blah.append(id); + blah.append(", "); + countsPerHost[i]++; + totalCounts++; + } + TestBase.logInfo(blah.toString()); + if (this.running[i]) { + runningCount++; + } + } + + if (totalCounts != this.partitionCount) { + TestBase.logInfo("Unowned leases, " + totalCounts + " owned versus " + this.partitionCount + " partitions, skipping checks"); + return; + } + if (this.overrideHostCount > 0) { + if (runningCount != this.overrideHostCount) { + TestBase.logInfo("Hosts not running, " + this.overrideHostCount + " expected versus " + runningCount + " found, skipping checks"); + return; + } + } else if (runningCount != this.partitionManagers.length) { + TestBase.logInfo("Hosts not running, " + this.partitionManagers.length + " expected versus " + runningCount + " found, skipping checks"); + return; + } + + boolean desired = true; + int highest = Integer.MIN_VALUE; + int lowest = Integer.MAX_VALUE; + for (int i = 0; i < countsPerHost.length; i++) { + if (!this.running[i]) { + // Skip + } else { + highest = Integer.max(highest, countsPerHost[i]); + lowest = Integer.min(lowest, countsPerHost[i]); + } + } + TestBase.logInfo("Check " + this.countOfChecks + " Highest " + highest + " Lowest " + lowest + " Descnt " + this.desiredDistributionDetected); + if (this.expectEqualDistribution) { + // All hosts should have exactly equal counts, so highest == lowest + desired = (highest == lowest); + } else { + // An equal distribution isn't possible, but the maximum difference between counts should be 1. + // Max(counts[]) - Min(counts[]) == 1 + desired = ((highest - lowest) == 1); + } + if (desired) { + TestBase.logInfo("Evenest distribution detected"); + this.desiredDistributionDetected++; + if (this.desiredDistributionDetected > this.partitionManagers.length) { + // Every partition manager has looked at the current distribution and + // it has not changed. The algorithm is stable once it reaches the desired state. + // No need to keep iterating. + TestBase.logInfo("Desired distribution is stable"); + this.keepGoing = false; + } + } else { + if ((this.desiredDistributionDetected > 0) && !this.shuttingDown) { + // If we have detected the desired distribution on previous iterations + // but not on this one, then the algorithm is unstable. Bail and fail. + TestBase.logInfo("Desired distribution was not stable"); + this.keepGoing = false; + } + } + + this.countOfChecks++; + if (this.countOfChecks > this.maxChecks) { + // Ran out of iterations without reaching the desired distribution. Bail and fail. + this.keepGoing = false; + } + } + + private void setup(int hostCount, int partitionCount, long latency, int threads) { + // PartitionManager tests are all long. Skip if running automated (maven, appveyor, etc.) + skipIfAutomated(); + + this.leaseManagers = new ILeaseManager[hostCount]; + this.checkpointManagers = new ICheckpointManager[hostCount]; + this.hosts = new EventProcessorHost[hostCount]; + this.partitionManagers = new TestPartitionManager[hostCount]; + this.partitionCount = partitionCount; + this.running = new boolean[hostCount]; + + for (int i = 0; i < hostCount; i++) { + InMemoryLeaseManager lm = new InMemoryLeaseManager(); + InMemoryCheckpointManager cm = new InMemoryCheckpointManager(); + + // In order to test hosts competing for partitions, each host must have a unique name, but they must share the + // target eventhub/consumer group. + ScheduledExecutorService threadpool = null; + if (threads > 0) { + threadpool = Executors.newScheduledThreadPool(threads); + } + this.hosts[i] = new EventProcessorHost("dummyHost" + String.valueOf(i), "NOTREAL", EventHubClient.DEFAULT_CONSUMER_GROUP_NAME, + RealEventHubUtilities.syntacticallyCorrectDummyConnectionString, cm, lm, threadpool, null); + + lm.initialize(this.hosts[i].getHostContext()); + lm.setLatency(latency); + this.leaseManagers[i] = lm; + cm.initialize(this.hosts[i].getHostContext()); + this.checkpointManagers[i] = cm; + this.running[i] = false; + + this.partitionManagers[i] = new TestPartitionManager(this.hosts[i].getHostContext(), partitionCount); + this.hosts[i].setPartitionManager(this.partitionManagers[i]); + this.hosts[i].getHostContext().setEventProcessorOptions(EventProcessorOptions.getDefaultOptions()); + // Quick lease expiration helps with some tests. Because we're using InMemoryLeaseManager, don't + // have to worry about storage latency, all lease operations are guaranteed to be fast. + PartitionManagerOptions opts = new PartitionManagerOptions(); + opts.setLeaseDurationInSeconds(15); + //opts.setStartupScanDelayInSeconds(17); + //opts.setSlowScanIntervalInSeconds(15); + this.hosts[i].setPartitionManagerOptions(opts); + } + } + + private void startManagers() throws Exception { + startManagers(this.partitionManagers.length); + } + + private void startManagers(int maxIndex) throws Exception { + this.shuttingDown = false; + for (int i = 0; i < maxIndex; i++) { + startSingleManager(i); + } + } + + private void startSingleManager(int index) throws Exception { + try { + this.partitionManagers[index].initialize().get(); + this.running[index] = true; + } catch (Exception e) { + TestBase.logError("TASK START FAILED " + e.toString() + " " + e.getMessage()); + throw e; + } + } + + private void stopManagers() throws InterruptedException, ExecutionException { + TestBase.logInfo("SHUTTING DOWN"); + this.shuttingDown = true; + for (int i = 0; i < this.partitionManagers.length; i++) { + if (this.running[i]) { + this.partitionManagers[i].stopPartitions().get(); + TestBase.logInfo("Host " + i + " stopped"); + } + } + } + + private void stopSingleManager(int index) throws InterruptedException, ExecutionException { + if (this.running[index]) { + this.partitionManagers[index].stopPartitions().get(); + TestBase.logInfo("Host " + index + " stopped"); + this.running[index] = false; + } + } + + private class TestPartitionManager extends PartitionManager { + private int partitionCount; + + TestPartitionManager(HostContext hostContext, int partitionCount) { + super(hostContext); + this.partitionCount = partitionCount; + } + + Iterable getOwnedPartitions() { + Iterable retval = null; + if (this.pumpManager != null) { + retval = ((DummyPump) this.pumpManager).getPumpsList(); + } else { + // If the manager isn't started, return an empty list. + retval = new ArrayList(); + } + return retval; + } + + @Override + CompletableFuture cachePartitionIds() { + this.partitionIds = new String[this.partitionCount]; + for (int i = 0; i < this.partitionCount; i++) { + this.partitionIds[i] = String.valueOf(i); + } + return CompletableFuture.completedFuture(null); + } + + @Override + PumpManager createPumpTestHook() { + return new DummyPump(this.hostContext, this); + } + + @Override + void onInitializeCompleteTestHook() { + TestBase.logInfo("PartitionManager for host " + this.hostContext.getHostName() + " initialized stores OK"); + } + + @Override + void onPartitionCheckCompleteTestHook() { + PartitionManagerTest.this.checkPartitionDistribution(); + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PerTestSettings.java b/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PerTestSettings.java new file mode 100644 index 0000000000000..3f49b3bbabca3 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PerTestSettings.java @@ -0,0 +1,203 @@ +package com.microsoft.azure.eventprocessorhost; + +import java.util.ArrayList; +import java.util.concurrent.ScheduledExecutorService; + +public class PerTestSettings { + // In-out properties: may be set before test setup and then changed by setup. + final EPHConstructorArgs inoutEPHConstructorArgs; + + // Output properties: any value set before test setup is ignored. The real value is + // established during test setup. + RealEventHubUtilities outUtils; + String outTelltale; + ArrayList outPartitionIds; + PrefabGeneralErrorHandler outGeneralErrorHandler; + PrefabProcessorFactory outProcessorFactory; + EventProcessorHost outHost; + + // Properties which are inputs to test setup. Constructor sets up defaults, except for hostName. + private String inDefaultHostName; + EventProcessorOptions inOptions; // can be null + PrefabEventProcessor.CheckpointChoices inDoCheckpoint; + boolean inEventHubDoesNotExist; // Prevents test code from doing certain checks that would fail on nonexistence before reaching product code. + boolean inSkipIfNoEventHubConnectionString; // Requires valid connection string even though event hub may not exist. + boolean inTelltaleOnTimeout; // Generates an empty telltale string, which causes PrefabEventProcessor to trigger telltale on timeout. + boolean inHasSenders; + + PerTestSettings(String defaultHostName) { + this.inDefaultHostName = defaultHostName; + this.inOptions = EventProcessorOptions.getDefaultOptions(); + this.inDoCheckpoint = PrefabEventProcessor.CheckpointChoices.CKP_NONE; + this.inEventHubDoesNotExist = false; + this.inSkipIfNoEventHubConnectionString = false; + this.inTelltaleOnTimeout = false; + this.inHasSenders = true; + + this.inoutEPHConstructorArgs = new EPHConstructorArgs(); + } + + String getDefaultHostName() { + return this.inDefaultHostName; + } + + class EPHConstructorArgs { + static final int HOST_OVERRIDE = 0x0001; + static final int EH_PATH_OVERRIDE = 0x0002; + static final int EH_PATH_REPLACE_IN_CONNECTION = 0x0004; + static final int EH_PATH_OVERRIDE_AND_REPLACE = EH_PATH_OVERRIDE | EH_PATH_REPLACE_IN_CONNECTION; + static final int CONSUMER_GROUP_OVERRIDE = 0x0008; + static final int EH_CONNECTION_OVERRIDE = 0x0010; + static final int EH_CONNECTION_REMOVE_PATH = 0x0020; + static final int STORAGE_CONNECTION_OVERRIDE = 0x0040; + static final int STORAGE_CONTAINER_OVERRIDE = 0x0080; + static final int STORAGE_BLOB_PREFIX_OVERRIDE = 0x0100; + static final int EXECUTOR_OVERRIDE = 0x0200; + static final int CHECKPOINT_MANAGER_OVERRIDE = 0x0400; + static final int LEASE_MANAGER_OVERRIDE = 0x0800; + static final int EXPLICIT_MANAGER = CHECKPOINT_MANAGER_OVERRIDE | LEASE_MANAGER_OVERRIDE; + static final int TELLTALE_ON_TIMEOUT = 0x1000; + + private int flags; + + private String hostName; + private String ehPath; + private String consumerGroupName; + private String ehConnection; + private String storageConnection; + private String storageContainerName; + private String storageBlobPrefix; + private ScheduledExecutorService executor; + private ICheckpointManager checkpointManager; + private ILeaseManager leaseManager; + + EPHConstructorArgs() { + this.flags = 0; + + this.hostName = null; + this.ehPath = null; + this.consumerGroupName = null; + this.ehConnection = null; + this.storageConnection = null; + this.storageContainerName = null; + this.storageBlobPrefix = null; + this.executor = null; + this.checkpointManager = null; + this.leaseManager = null; + } + + int getFlags() { + return this.flags; + } + + boolean isFlagSet(int testFlag) { + return ((this.flags & testFlag) != 0); + } + + String getHostName() { + return this.hostName; + } + + void setHostName(String hostName) { + this.hostName = hostName; + this.flags |= HOST_OVERRIDE; + } + + void setEHPath(String ehPath, int flags) { + this.ehPath = ehPath; + this.flags |= (flags & EH_PATH_OVERRIDE_AND_REPLACE); + } + + String getEHPath() { + return this.ehPath; + } + + String getConsumerGroupName() { + return this.consumerGroupName; + } + + void setConsumerGroupName(String consumerGroupName) { + this.consumerGroupName = consumerGroupName; + this.flags |= CONSUMER_GROUP_OVERRIDE; + } + + void removePathFromEHConnection() { + this.flags |= EH_CONNECTION_REMOVE_PATH; + } + + String getEHConnection() { + return this.ehConnection; + } + + void setEHConnection(String ehConnection) { + this.ehConnection = ehConnection; + this.flags |= EH_CONNECTION_OVERRIDE; + } + + String getStorageConnection() { + return this.storageConnection; + } + + void setStorageConnection(String storageConnection) { + this.storageConnection = storageConnection; + this.flags |= STORAGE_CONNECTION_OVERRIDE; + } + + void dummyStorageConnection() { + setStorageConnection("DefaultEndpointsProtocol=https;AccountName=doesnotexist;AccountKey=dGhpcyBpcyBub3QgYSB2YWxpZCBrZXkgYnV0IGl0IGRvZXMgaGF2ZSA2MCBjaGFyYWN0ZXJzLjEyMzQ1Njc4OTAK;EndpointSuffix=core.windows.net"); + } + + void setDefaultStorageContainerName(String defaultStorageContainerName) { + this.storageContainerName = defaultStorageContainerName; + } + + String getStorageContainerName() { + return this.storageContainerName; + } + + void setStorageContainerName(String storageContainerName) { + this.storageContainerName = storageContainerName; + this.flags |= STORAGE_CONTAINER_OVERRIDE; + } + + String getStorageBlobPrefix() { + return this.storageBlobPrefix; + } + + void setStorageBlobPrefix(String storageBlobPrefix) { + this.storageBlobPrefix = storageBlobPrefix; + this.flags |= STORAGE_BLOB_PREFIX_OVERRIDE; + } + + ScheduledExecutorService getExecutor() { + return this.executor; + } + + void setExecutor(ScheduledExecutorService executor) { + this.executor = executor; + this.flags |= EXECUTOR_OVERRIDE; + } + + boolean useExplicitManagers() { + return ((this.flags & EXPLICIT_MANAGER) != 0); + } + + void setCheckpointManager(ICheckpointManager checkpointManager) { + this.checkpointManager = checkpointManager; + this.flags |= CHECKPOINT_MANAGER_OVERRIDE; + } + + ICheckpointManager getCheckpointMananger() { + return this.checkpointManager; + } + + ILeaseManager getLeaseManager() { + return this.leaseManager; + } + + void setLeaseManager(ILeaseManager leaseManager) { + this.leaseManager = leaseManager; + this.flags |= LEASE_MANAGER_OVERRIDE; + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PrefabEventProcessor.java b/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PrefabEventProcessor.java new file mode 100644 index 0000000000000..c662e5e35100a --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PrefabEventProcessor.java @@ -0,0 +1,103 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +import com.microsoft.azure.eventhubs.EventData; + +import java.util.Arrays; + +public class PrefabEventProcessor implements IEventProcessor { + private PrefabProcessorFactory factory; + + ; + private byte[] telltaleBytes; + private CheckpointChoices doCheckpoint; + private boolean doMarker; + private boolean logEveryEvent; + private boolean telltaleOnTimeout; + private int eventCount = 0; + + PrefabEventProcessor(PrefabProcessorFactory factory, String telltale, CheckpointChoices doCheckpoint, boolean doMarker, boolean logEveryEvent) { + this.factory = factory; + this.telltaleBytes = telltale.getBytes(); + this.doCheckpoint = doCheckpoint; + this.doMarker = doMarker; + this.logEveryEvent = logEveryEvent; + this.telltaleOnTimeout = telltale.isEmpty(); + } + + @Override + public void onOpen(PartitionContext context) throws Exception { + TestBase.logInfo(context.getOwner() + " opening " + context.getPartitionId()); + } + + @Override + public void onClose(PartitionContext context, CloseReason reason) throws Exception { + TestBase.logInfo(context.getOwner() + " closing " + context.getPartitionId()); + } + + @Override + public void onEvents(PartitionContext context, Iterable events) throws Exception { + int batchSize = 0; + EventData lastEvent = null; + int baseline = this.eventCount; + if (events != null && events.iterator().hasNext()) { + this.factory.setOnEventsContext(context); + + for (EventData event : events) { + this.eventCount++; + batchSize++; + /* + if (((this.eventCount % 10) == 0) && this.doMarker) { + TestBase.logInfo("P" + context.getPartitionId() + ": " + this.eventCount); + } + */ + if (this.logEveryEvent) { + TestBase.logInfo("(" + context.getOwner() + ") P" + context.getPartitionId() + " " + new String(event.getBytes()) + " @ " + event.getSystemProperties().getOffset()); + } + if (Arrays.equals(event.getBytes(), this.telltaleBytes)) { + this.factory.setTelltaleFound(context.getPartitionId()); + } + lastEvent = event; + } + } + if (batchSize == 0) { + if (this.telltaleOnTimeout) { + TestBase.logInfo("P" + context.getPartitionId() + " got expected timeout"); + this.factory.setTelltaleFound(context.getPartitionId()); + } else { + TestBase.logError("P" + context.getPartitionId() + " got UNEXPECTED timeout"); + this.factory.putError("P" + context.getPartitionId() + " got UNEXPECTED timeout"); + } + } + this.factory.addBatch(batchSize); + if (this.doMarker) { + TestBase.logInfo("(" + context.getOwner() + ") P" + context.getPartitionId() + " total " + this.eventCount + "(" + (this.eventCount - baseline) + ")"); + } + switch (doCheckpoint) { + case CKP_NONE: + break; + + case CKP_EXPLICIT: + context.checkpoint(lastEvent).get(); + TestBase.logInfo("P" + context.getPartitionId() + " checkpointed at " + lastEvent.getSystemProperties().getOffset()); + break; + + case CKP_NOARGS: + context.checkpoint().get(); + TestBase.logInfo("P" + context.getPartitionId() + " checkpointed without arguments"); + break; + } + } + + @Override + public void onError(PartitionContext context, Throwable error) { + TestBase.logInfo("P" + context.getPartitionId() + "onError: " + error.toString() + " " + error.getMessage()); + this.factory.putError(context.getPartitionId() + " onError: " + error.toString() + " " + error.getMessage()); + } + + public enum CheckpointChoices {CKP_NONE, CKP_EXPLICIT, CKP_NOARGS} +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PrefabGeneralErrorHandler.java b/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PrefabGeneralErrorHandler.java new file mode 100644 index 0000000000000..b67f451570608 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PrefabGeneralErrorHandler.java @@ -0,0 +1,26 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +import java.util.ArrayList; +import java.util.function.Consumer; + +public class PrefabGeneralErrorHandler implements Consumer { + private ArrayList errors = new ArrayList(); + + ArrayList getErrors() { + return this.errors; + } + + int getErrorCount() { + return this.errors.size(); + } + + @Override + public void accept(ExceptionReceivedEventArgs e) { + this.errors.add("GENERAL: " + e.getHostname() + " " + e.getAction() + " " + e.getException().toString() + " " + e.getException().getMessage()); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PrefabProcessorFactory.java b/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PrefabProcessorFactory.java new file mode 100644 index 0000000000000..1c877c69b7fb2 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/PrefabProcessorFactory.java @@ -0,0 +1,78 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +import java.util.ArrayList; +import java.util.HashMap; + +public class PrefabProcessorFactory implements IEventProcessorFactory { + private String telltale; + private PrefabEventProcessor.CheckpointChoices doCheckpoint; + private boolean doMarker; + private boolean logEveryEvent; + + private ArrayList errors = new ArrayList(); + private HashMap foundTelltale = new HashMap(); + private int eventsReceivedCount = 0; + private PartitionContext partitionContextOnEvents; + + PrefabProcessorFactory(String telltale, PrefabEventProcessor.CheckpointChoices doCheckpoint, boolean doMarker) { + this(telltale, doCheckpoint, doMarker, false); + } + + PrefabProcessorFactory(String telltale, PrefabEventProcessor.CheckpointChoices doCheckpoint, boolean doMarker, boolean logEveryEvent) { + this.telltale = telltale; + this.doCheckpoint = doCheckpoint; + this.doMarker = doMarker; + this.logEveryEvent = logEveryEvent; + } + + void putError(String error) { + this.errors.add(error); + } + + ArrayList getErrors() { + return this.errors; + } + + int getErrorCount() { + return this.errors.size(); + } + + boolean getTelltaleFound(String partitionId) { + Boolean retval = this.foundTelltale.get(partitionId); + return ((retval != null) ? retval : false); + } + + boolean getAnyTelltaleFound() { + return (this.foundTelltale.size() > 0); + } + + void setTelltaleFound(String partitionId) { + this.foundTelltale.put(partitionId, true); + } + + synchronized void addBatch(int batchSize) { + this.eventsReceivedCount += batchSize; + } + + int getEventsReceivedCount() { + return this.eventsReceivedCount; + } + + PartitionContext getOnEventsContext() { + return this.partitionContextOnEvents; + } + + void setOnEventsContext(PartitionContext value) { + this.partitionContextOnEvents = value; + } + + @Override + public IEventProcessor createEventProcessor(PartitionContext context) throws Exception { + return new PrefabEventProcessor(this, this.telltale, this.doCheckpoint, this.doMarker, this.logEveryEvent); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/RealEventHubUtilities.java b/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/RealEventHubUtilities.java new file mode 100644 index 0000000000000..80f8c8ee23a77 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/RealEventHubUtilities.java @@ -0,0 +1,148 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +import com.microsoft.azure.eventhubs.*; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.concurrent.ExecutionException; + +import org.junit.Assume; + +final class RealEventHubUtilities { + static final int QUERY_ENTITY_FOR_PARTITIONS = -1; + static final String syntacticallyCorrectDummyEventHubPath = "doesnotexist"; + static final String syntacticallyCorrectDummyConnectionString = + "Endpoint=sb://doesnotexist.servicebus.windows.net/;SharedAccessKeyName=doesnotexist;SharedAccessKey=dGhpcyBpcyBub3QgYSB2YWxpZCBrZXkgLi4uLi4uLi4=;EntityPath=" + + RealEventHubUtilities.syntacticallyCorrectDummyEventHubPath; + + private ConnectionStringBuilder hubConnectionString = null; + private String hubName = null; + private String consumerGroup = EventHubClient.DEFAULT_CONSUMER_GROUP_NAME; + private EventHubClient client = null; + private ArrayList cachedPartitionIds = null; + private HashMap partitionSenders = new HashMap(); + + RealEventHubUtilities() { + } + + ArrayList setup(boolean skipIfFakeEH, int fakePartitions) throws EventHubException, IOException { + ArrayList partitionIds = setupWithoutSenders(skipIfFakeEH, fakePartitions); + + // EventHubClient is source of all senders + this.client = EventHubClient.createSync(this.hubConnectionString.toString(), TestUtilities.EXECUTOR_SERVICE); + + return partitionIds; + } + + ArrayList setupWithoutSenders(boolean skipIfFakeEH, int fakePartitions) throws EventHubException, IOException { + // Get the connection string from the environment + ehCacheCheck(skipIfFakeEH); + + // Get the consumer group from the environment, if present. + String tempConsumerGroup = System.getenv("EVENT_HUB_CONSUMER_GROUP"); + if (tempConsumerGroup != null) { + this.consumerGroup = tempConsumerGroup; + } + + ArrayList partitionIds = null; + + if (fakePartitions == RealEventHubUtilities.QUERY_ENTITY_FOR_PARTITIONS) { + partitionIds = getPartitionIdsForTest(); + } else { + partitionIds = new ArrayList(); + for (int i = 0; i < fakePartitions; i++) { + partitionIds.add(Integer.toString(i)); + } + } + + return partitionIds; + } + + void shutdown() throws EventHubException { + for (PartitionSender sender : this.partitionSenders.values()) { + sender.closeSync(); + } + if (this.client != null) { + this.client.closeSync(); + } + } + + ConnectionStringBuilder getConnectionString(boolean skipIfFakeEH) { + ehCacheCheck(skipIfFakeEH); + return this.hubConnectionString; + } + + private void ehCacheCheck(boolean skipIfFakeEH) { + if (this.hubName == null) { + if (skipIfFakeEH) { + TestUtilities.skipIfAppveyor(); + } + String rawConnectionString = System.getenv("EVENT_HUB_CONNECTION_STRING"); + if (rawConnectionString == null) { + if (skipIfFakeEH) { + TestBase.logInfo("SKIPPING - REQUIRES REAL EVENT HUB"); + Assume.assumeTrue(rawConnectionString != null); + } + TestBase.logInfo("Using dummy event hub connection string"); + rawConnectionString = RealEventHubUtilities.syntacticallyCorrectDummyConnectionString; + } + + this.hubConnectionString = new ConnectionStringBuilder(rawConnectionString); + this.hubName = this.hubConnectionString.getEventHubName(); + } + } + + String getConsumerGroup() { + return this.consumerGroup; + } + + void sendToAny(String body, int count) throws EventHubException { + for (int i = 0; i < count; i++) { + sendToAny(body); + } + } + + void sendToAny(String body) throws EventHubException { + EventData event = EventData.create(body.getBytes()); + this.client.sendSync(event); + } + + void sendToPartition(String partitionId, String body) throws IllegalArgumentException, EventHubException { + EventData event = EventData.create(body.getBytes()); + PartitionSender sender = null; + if (this.partitionSenders.containsKey(partitionId)) { + sender = this.partitionSenders.get(partitionId); + } else { + sender = this.client.createPartitionSenderSync(partitionId); + this.partitionSenders.put(partitionId, sender); + } + sender.sendSync(event); + } + + ArrayList getPartitionIdsForTest() throws EventHubException, IOException { + if (this.cachedPartitionIds == null) { + this.cachedPartitionIds = new ArrayList(); + ehCacheCheck(true); + + EventHubClient idClient = EventHubClient.createSync(this.hubConnectionString.toString(), TestUtilities.EXECUTOR_SERVICE); + try { + EventHubRuntimeInformation info = idClient.getRuntimeInformation().get(); + String ids[] = info.getPartitionIds(); + for (String id : ids) { + this.cachedPartitionIds.add(id); + } + } catch (ExecutionException | InterruptedException e) { + throw new IllegalArgumentException("Error getting partition ids in test framework", e.getCause()); + } + } + + return this.cachedPartitionIds; + } + +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/Repros.java b/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/Repros.java new file mode 100644 index 0000000000000..c7467ddef7aed --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/Repros.java @@ -0,0 +1,320 @@ +package com.microsoft.azure.eventprocessorhost; + +import com.microsoft.azure.eventhubs.*; +import org.junit.Test; + +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadPoolExecutor; + +public class Repros extends TestBase { + /* + * This class exists to preserve repro code for specific problems we have had to debug/fix. The + * cases here are not really useful/usable as general-purpose tests (most of them run infinitely, + * for example), so they are not marked as JUnit cases by default. + */ + + /* + * Two instances of EventProcessorHost with the same host name is not a valid configuration. + * Since lease ownership is determined by host name, they will both believe that they own all + * the partitions and constantly be recreating receivers and knocking the other one off. Don't + * do this. This repro exists because another test case did this scenario by accident and saw + * massive memory leaks, so I recreated it deliberately to find out what was going on. + */ + //@Test + public void conflictingHosts() throws Exception { + RealEventHubUtilities utils = new RealEventHubUtilities(); + utils.setup(true, RealEventHubUtilities.QUERY_ENTITY_FOR_PARTITIONS); + + String telltale = "conflictingHosts-telltale-" + EventProcessorHost.safeCreateUUID(); + String conflictingName = "conflictingHosts-NOTSAFE"; + String storageName = conflictingName.toLowerCase() + EventProcessorHost.safeCreateUUID(); + PrefabEventProcessor.CheckpointChoices doCheckpointing = PrefabEventProcessor.CheckpointChoices.CKP_NONE; + boolean doMarker = false; + + PrefabGeneralErrorHandler general1 = new PrefabGeneralErrorHandler(); + PrefabProcessorFactory factory1 = new PrefabProcessorFactory(telltale, doCheckpointing, doMarker); + EventProcessorHost host1 = new EventProcessorHost(conflictingName, utils.getConnectionString(true).getEventHubName(), + utils.getConsumerGroup(), utils.getConnectionString(true).toString(), + TestUtilities.getStorageConnectionString(), storageName); + EventProcessorOptions options1 = EventProcessorOptions.getDefaultOptions(); + options1.setExceptionNotification(general1); + + PrefabGeneralErrorHandler general2 = new PrefabGeneralErrorHandler(); + PrefabProcessorFactory factory2 = new PrefabProcessorFactory(telltale, doCheckpointing, doMarker); + EventProcessorHost host2 = new EventProcessorHost(conflictingName, utils.getConnectionString(true).getEventHubName(), + utils.getConsumerGroup(), utils.getConnectionString(true).toString(), + TestUtilities.getStorageConnectionString(), storageName); + EventProcessorOptions options2 = EventProcessorOptions.getDefaultOptions(); + options2.setExceptionNotification(general2); + + host1.registerEventProcessorFactory(factory1, options1); + host2.registerEventProcessorFactory(factory2, options2); + + int i = 0; + while (true) { + utils.sendToAny("conflict-" + i++, 10); + System.out.println("\n." + factory1.getEventsReceivedCount() + "." + factory2.getEventsReceivedCount() + ":" + + ((ThreadPoolExecutor) host1.getHostContext().getExecutor()).getPoolSize() + "." + + ((ThreadPoolExecutor) host2.getHostContext().getExecutor()).getPoolSize() + ":" + + Thread.activeCount()); + Thread.sleep(100); + } + } + + @Test + public void infiniteReceive() throws Exception { + RealEventHubUtilities utils = new RealEventHubUtilities(); + utils.setupWithoutSenders(true, RealEventHubUtilities.QUERY_ENTITY_FOR_PARTITIONS); + + PrefabGeneralErrorHandler genErr = new PrefabGeneralErrorHandler(); + PrefabProcessorFactory factory = new PrefabProcessorFactory("never match", PrefabEventProcessor.CheckpointChoices.CKP_NONE, true, false); + InMemoryCheckpointManager checkpointer = new InMemoryCheckpointManager(); + InMemoryLeaseManager leaser = new InMemoryLeaseManager(); + EventProcessorHost host = new EventProcessorHost("infiniteReceive-1", utils.getConnectionString(true).getEventHubName(), + utils.getConsumerGroup(), utils.getConnectionString(true).toString(), + checkpointer, leaser, Executors.newScheduledThreadPool(16), null); + checkpointer.initialize(host.getHostContext()); + leaser.initialize(host.getHostContext()); + + EventProcessorOptions opts = EventProcessorOptions.getDefaultOptions(); + opts.setExceptionNotification(genErr); + host.registerEventProcessorFactory(factory, opts).get(); + + while (System.in.available() == 0) { + System.out.println("STANDING BY AT " + Thread.activeCount()); + Thread.sleep(10000); + } + while (System.in.available() > 0) { + System.in.read(); + } + while (System.in.available() == 0) { + System.out.println("STANDING BY AT " + Thread.activeCount()); + Thread.sleep(1000); + } + + host.unregisterEventProcessor(); + } + + @Test + public void infiniteReceive2Hosts() throws Exception { + RealEventHubUtilities utils = new RealEventHubUtilities(); + utils.setup(true, RealEventHubUtilities.QUERY_ENTITY_FOR_PARTITIONS); + + String storageName = "ir2hosts" + EventProcessorHost.safeCreateUUID(); + + PrefabGeneralErrorHandler general1 = new PrefabGeneralErrorHandler(); + PrefabProcessorFactory factory1 = new PrefabProcessorFactory("never match", PrefabEventProcessor.CheckpointChoices.CKP_NONE, true, false); + EventProcessorHost host1 = new EventProcessorHost("infiniteReceive2Hosts-1", utils.getConnectionString(true).getEventHubName(), + utils.getConsumerGroup(), utils.getConnectionString(true).toString(), + TestUtilities.getStorageConnectionString(), storageName); + EventProcessorOptions options1 = EventProcessorOptions.getDefaultOptions(); + options1.setExceptionNotification(general1); + + PrefabGeneralErrorHandler general2 = new PrefabGeneralErrorHandler(); + PrefabProcessorFactory factory2 = new PrefabProcessorFactory("never match", PrefabEventProcessor.CheckpointChoices.CKP_NONE, true, false); + EventProcessorHost host2 = new EventProcessorHost("infiniteReceive2Hosts-2", utils.getConnectionString(true).getEventHubName(), + utils.getConsumerGroup(), utils.getConnectionString(true).toString(), + TestUtilities.getStorageConnectionString(), storageName); + EventProcessorOptions options2 = EventProcessorOptions.getDefaultOptions(); + options2.setExceptionNotification(general2); + + host1.registerEventProcessorFactory(factory1, options1).get(); + host2.registerEventProcessorFactory(factory2, options2).get(); + + int i = 0; + boolean upAndDown = true; + int upAndDownInterval = 25; + CompletableFuture upAndDownFuture = null; + while (true) { + if (upAndDownFuture != null) + { + if (upAndDownFuture.isDone()) + { + upAndDownFuture.get(); + System.out.println("Reg/unreg completed"); + upAndDownFuture = null; + } + } + + utils.sendToAny("blah-" + i++, 10); + + StringBuilder blah = new StringBuilder(); + blah.append("\n."); + blah.append(factory1.getEventsReceivedCount()); + blah.append('.'); + if (host2 != null) + { + blah.append(factory2.getEventsReceivedCount()); + } + blah.append(':'); + blah.append(((ThreadPoolExecutor) host1.getHostContext().getExecutor()).getPoolSize()); + blah.append('.'); + if (host2 != null) + { + blah.append(((ThreadPoolExecutor) host2.getHostContext().getExecutor()).getPoolSize()); + } + blah.append(':'); + blah.append(Thread.activeCount()); + blah.append(" i="); + blah.append(i); + System.out.println(blah.toString()); + + Thread.sleep(100); + + if (upAndDown && ((i % upAndDownInterval) == 0)) + { + if (host2 != null) + { + upAndDownFuture = host2.unregisterEventProcessor(); + System.out.println("Unregister started"); + host2 = null; + } + else + { + factory2 = new PrefabProcessorFactory("never match", PrefabEventProcessor.CheckpointChoices.CKP_NONE, true, false); + host2 = new EventProcessorHost("infiniteReceive2Hosts-2", utils.getConnectionString(true).getEventHubName(), + utils.getConsumerGroup(), utils.getConnectionString(true).toString(), + TestUtilities.getStorageConnectionString(), storageName); + options2 = EventProcessorOptions.getDefaultOptions(); + options2.setExceptionNotification(general2); + + System.out.println("Reregister started"); + upAndDownFuture = host2.registerEventProcessorFactory(factory2, options2); + } + } + } + } + + /* + * The memory leak mentioned in the previous case turned out to be a thread leak. This case was created to see if + * the thread leak was related to EPH or was in the underlying client. At first we believed that the leak was due + * to creating a new epoch receiver that was kicking the old receiver off. Then we believed that it was about epoch + * receivers. Then we finally determined that a thread was leaked every time a receiver was closed, with no special + * sauce required. + */ + //@Test + public void rawEpochStealing() throws Exception { + RealEventHubUtilities utils = new RealEventHubUtilities(); + utils.setup(true, RealEventHubUtilities.QUERY_ENTITY_FOR_PARTITIONS); + + int clientSerialNumber = 0; + while (true) { + Thread[] blah = new Thread[Thread.activeCount() + 10]; + int actual = Thread.enumerate(blah); + if (actual >= blah.length) { + System.out.println("Lost some threads"); + } + int parkedCount = 0; + String selectingList = ""; + boolean display = true; + for (int i = 0; i < actual; i++) { + display = true; + StackTraceElement[] bloo = blah[i].getStackTrace(); + String show = "nostack"; + if (bloo.length > 0) { + show = bloo[0].getClassName() + "." + bloo[0].getMethodName(); + if (show.compareTo("sun.misc.Unsafe.park") == 0) { + parkedCount++; + display = false; + } else if (show.compareTo("sun.nio.ch.WindowsSelectorImpl$SubSelector.poll0") == 0) { + selectingList += (" " + blah[i].getId()); + display = false; + } + } + if (display) { + System.out.print(" " + blah[i].getId() + ":" + show); + } + } + System.out.println("\nParked: " + parkedCount + " SELECTING: " + selectingList); + + System.out.println("Client " + clientSerialNumber + " starting"); + EventHubClient client = EventHubClient.createSync(utils.getConnectionString(true).toString(), TestUtilities.EXECUTOR_SERVICE); + PartitionReceiver receiver = client.createReceiver(utils.getConsumerGroup(), "0", EventPosition.fromStartOfStream()).get(); + //client.createEpochReceiver(utils.getConsumerGroup(), "0", PartitionReceiver.START_OF_STREAM, 1).get(); + + boolean useReceiveHandler = false; + + if (useReceiveHandler) { + Blah b = new Blah(clientSerialNumber++, receiver, client); + receiver.setReceiveHandler(b).get(); + // wait for events to start flowing + b.waitForReceivedEvents().get(); + } else { + receiver.receiveSync(1); + System.out.println("Received an event"); + } + + // Enable these lines to avoid overlap + try { + System.out.println("Non-overlap close of PartitionReceiver"); + if (useReceiveHandler) { + receiver.setReceiveHandler(null).get(); + } + receiver.close().get(); + } catch (InterruptedException | ExecutionException e) { + System.out.println("Client " + clientSerialNumber + " failed while closing PartitionReceiver: " + e.toString()); + } + try { + System.out.println("Non-overlap close of EventHubClient"); + client.close().get(); + } catch (InterruptedException | ExecutionException e) { + System.out.println("Client " + clientSerialNumber + " failed while closing EventHubClient: " + e.toString()); + } + System.out.println("Client " + clientSerialNumber + " closed"); + // Enable these lines to avoid overlap + + System.out.println("Threads: " + Thread.activeCount()); + } + } + + private class Blah implements PartitionReceiveHandler { + private int clientSerialNumber; + private PartitionReceiver receiver; + private EventHubClient client; + private CompletableFuture receivedEvents = null; + private boolean firstEvents = true; + + protected Blah(int clientSerialNumber, PartitionReceiver receiver, EventHubClient client) { + this.clientSerialNumber = clientSerialNumber; + this.receiver = receiver; + this.client = client; + } + + CompletableFuture waitForReceivedEvents() { + this.receivedEvents = new CompletableFuture(); + return this.receivedEvents; + } + + @Override + public int getMaxEventCount() { + return 300; + } + + @Override + public void onReceive(Iterable events) { + if (this.firstEvents) { + System.out.println("Client " + this.clientSerialNumber + " got events"); + this.receivedEvents.complete(null); + this.firstEvents = false; + } + } + + @Override + public void onError(Throwable error) { + System.out.println("Client " + this.clientSerialNumber + " got " + error.toString()); + try { + this.receiver.close().get(); + } catch (InterruptedException | ExecutionException e) { + System.out.println("Client " + this.clientSerialNumber + " failed while closing PartitionReceiver: " + e.toString()); + } + try { + this.client.close().get(); + } catch (InterruptedException | ExecutionException e) { + System.out.println("Client " + this.clientSerialNumber + " failed while closing EventHubClient: " + e.toString()); + } + System.out.println("Client " + this.clientSerialNumber + " closed"); + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/SadPathTests.java b/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/SadPathTests.java new file mode 100644 index 0000000000000..089cfabec5d2c --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/SadPathTests.java @@ -0,0 +1,140 @@ +package com.microsoft.azure.eventprocessorhost; + +import com.microsoft.azure.eventhubs.EventHubException; +import com.microsoft.azure.eventhubs.IllegalEntityException; + +import org.junit.Assume; +import org.junit.Test; + +import java.util.concurrent.ExecutionException; + +import static org.junit.Assert.fail; + +public class SadPathTests extends TestBase { + @Test + public void noSuchEventHubNamespaceTest() throws Exception { + PerTestSettings settings = new PerTestSettings("NoSuchEventHubNamespace"); + settings.inHasSenders = false; + settings.inEventHubDoesNotExist = true; + Assume.assumeFalse(TestUtilities.isRunningOnAzure()); + try { + settings = testSetup(settings); + fail("No exception occurred"); + } catch (EventHubException e) { + TestBase.logInfo("Got expected EventHubException"); + } finally { + testFinish(settings, NO_CHECKS); + } + } + + @Test + public void noSuchEventHubTest() throws Exception { + PerTestSettings settings = new PerTestSettings("NoSuchEventHub"); + settings.inHasSenders = false; + settings.inSkipIfNoEventHubConnectionString = true; + settings.inoutEPHConstructorArgs.setEHPath("thereisnoeventhubwiththisname", PerTestSettings.EPHConstructorArgs.EH_PATH_OVERRIDE_AND_REPLACE); + try { + settings = testSetup(settings); + fail("No exception occurred"); + } catch (ExecutionException e) { + Throwable inner = e.getCause(); + if ((inner != null) && (inner instanceof IllegalEntityException)) { + TestBase.logInfo("Got expected IllegalEntityException"); + } else { + throw e; + } + } finally { + testFinish(settings, NO_CHECKS); + } + } + + // Turned off -- we cannot detect no such consumer group at register time + //@Test + public void noSuchConsumerGroupTest() throws Exception { + PerTestSettings settings = new PerTestSettings("NoSuchConsumerGroup"); + settings.inHasSenders = false; + settings.inoutEPHConstructorArgs.setConsumerGroupName("thereisnoconsumergroupwiththisname"); + try { + settings = testSetup(settings); + fail("No exception occurred"); + } catch (ExecutionException e) { + Throwable inner = e.getCause(); + if ((inner != null) && (inner instanceof IllegalEntityException)) { + TestBase.logInfo("Got expected IllegalEntityException"); + } else { + throw e; + } + } finally { + testFinish(settings, NO_CHECKS); + } + } + + @Test + public void secondRegisterFailsTest() throws Exception { + PerTestSettings settings = new PerTestSettings("SecondRegisterFails"); + settings.inHasSenders = false; + settings = testSetup(settings); + + try { + settings.outHost.registerEventProcessorFactory(settings.outProcessorFactory, settings.inOptions).get(); + Thread.sleep(10000); + fail("No exception occurred"); + } catch (IllegalStateException e) { + if ((e.getMessage() != null) && (e.getMessage().compareTo("Register has already been called on this EventProcessorHost") == 0)) { + TestBase.logInfo("Got expected exception"); + } else { + fail("Got IllegalStateException but text is wrong"); + } + } finally { + testFinish(settings, NO_CHECKS); + } + } + + @Test + public void reregisterFailsTest() throws Exception { + PerTestSettings settings = new PerTestSettings("ReregisterFails"); + settings.inHasSenders = false; + settings = testSetup(settings); + + try { + Thread.sleep(15000); + settings.outHost.unregisterEventProcessor().get(); + + settings.outHost.registerEventProcessorFactory(settings.outProcessorFactory, settings.inOptions).get(); + Thread.sleep(10000); + fail("No exception occurred"); + } catch (IllegalStateException e) { + if ((e.getMessage() != null) && (e.getMessage().compareTo("Register cannot be called on an EventProcessorHost after unregister. Please create a new EventProcessorHost instance.") == 0)) { + TestBase.logInfo("Got expected exception"); + } else { + fail("Got IllegalStateException but text is wrong"); + } + } finally { + testFinish(settings, NO_CHECKS); + } + } + + // Turned off -- requires special setup beyond just having a real event hub + // @Test + public void badEventHubNameTest() throws Exception { + // This case requires an eventhub with a bad name (not legal as storage container name). + // Within EPH the validation of the name occurs after other operations that fail if the eventhub + // doesn't exist, so this case can't use arbitrary bad names. + PerTestSettings settings = new PerTestSettings("BadEventHubName"); + settings.inHasSenders = false; + try { + settings.inoutEPHConstructorArgs.setStorageContainerName(null); // otherwise test framework creates unique storage container name + settings = testSetup(settings); + fail("No exception occurred"); + } catch (IllegalArgumentException e) { + String message = e.getMessage(); + if ((message != null) && message.startsWith("EventHub names must conform to the following rules")) { + TestBase.logInfo("Got expected IllegalArgumentException"); + } else { + throw e; + } + } finally { + testFinish(settings, NO_CHECKS); + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/SmokeTest.java b/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/SmokeTest.java new file mode 100644 index 0000000000000..5fb1c7347f336 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/SmokeTest.java @@ -0,0 +1,234 @@ + /* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ + + package com.microsoft.azure.eventprocessorhost; + + import com.microsoft.azure.eventhubs.EventPosition; + import org.junit.Assert; + import org.junit.Test; + + import java.time.Instant; + import java.util.concurrent.Executors; + + + public class SmokeTest extends TestBase { + @Test + public void SendRecv1MsgTest() throws Exception { + PerTestSettings settings = new PerTestSettings("SendRecv1Msg"); + settings = testSetup(settings); + + settings.outUtils.sendToAny(settings.outTelltale); + waitForTelltale(settings); + + testFinish(settings, SmokeTest.ANY_NONZERO_COUNT); + } + + @Test + public void ReceiverRuntimeMetricsTest() throws Exception { + PerTestSettings settings = new PerTestSettings("ReceiverRuntimeMetrics"); + settings.inOptions.setReceiverRuntimeMetricEnabled(true); + settings = testSetup(settings); + + settings.outUtils.sendToAny(settings.outTelltale); + waitForTelltale(settings); + + // correctness of runtimeInfo is already tested in javaclient - this is only testing for presence of non-default value + Assert.assertTrue(settings.outProcessorFactory.getOnEventsContext().getRuntimeInformation() != null); + Assert.assertTrue(settings.outProcessorFactory.getOnEventsContext().getRuntimeInformation().getLastEnqueuedSequenceNumber() > 0); + + testFinish(settings, SmokeTest.ANY_NONZERO_COUNT); + } + + @Test + public void receiveFromNowTest() throws Exception { + // Doing two iterations with the same "now" requires storing the "now" value instead of + // using the current time when the initial offset provider is executed. It also requires + // that the "now" be before the first send. + final Instant storedNow = Instant.now(); + + // Do the first iteration. + PerTestSettings firstSettings = receiveFromNowIteration(storedNow, 1, 1, null); + + // Do a second iteration with the same "now". Because the first iteration does not checkpoint, + // it should receive the telltale from the first iteration AND the telltale from this iteration. + // The purpose of running a second iteration is to look for bugs that occur when leases have been + // created and persisted but checkpoints have not, so it is vital that the second iteration uses the + // same storage container. + receiveFromNowIteration(storedNow, 2, 2, firstSettings.inoutEPHConstructorArgs.getStorageContainerName()); + } + + private PerTestSettings receiveFromNowIteration(final Instant storedNow, int iteration, int expectedEvents, String containerName) throws Exception { + PerTestSettings settings = new PerTestSettings("receiveFromNow-iter-" + iteration); + if (containerName != null) { + settings.inoutEPHConstructorArgs.setStorageContainerName(containerName); + } + settings.inOptions.setInitialPositionProvider((partitionId) -> { + return EventPosition.fromEnqueuedTime(storedNow); + }); + settings = testSetup(settings); + + settings.outUtils.sendToAny(settings.outTelltale); + waitForTelltale(settings); + + testFinish(settings, expectedEvents); + + return settings; + } + + @Test + public void receiveFromCheckpoint() throws Exception { + PerTestSettings firstSettings = receiveFromCheckpointIteration(1, SmokeTest.ANY_NONZERO_COUNT, null, PrefabEventProcessor.CheckpointChoices.CKP_EXPLICIT); + + receiveFromCheckpointIteration(2, firstSettings.outPartitionIds.size(), firstSettings.inoutEPHConstructorArgs.getStorageContainerName(), + firstSettings.inDoCheckpoint); + } + + @Test + public void receiveFromCheckpointNoArgs() throws Exception { + PerTestSettings firstSettings = receiveFromCheckpointIteration(1, SmokeTest.ANY_NONZERO_COUNT, null, PrefabEventProcessor.CheckpointChoices.CKP_NOARGS); + + receiveFromCheckpointIteration(2, firstSettings.outPartitionIds.size(), firstSettings.inoutEPHConstructorArgs.getStorageContainerName(), + firstSettings.inDoCheckpoint); + } + + private PerTestSettings receiveFromCheckpointIteration(int iteration, int expectedEvents, String containerName, + PrefabEventProcessor.CheckpointChoices checkpointCallType) throws Exception { + String distinguisher = "e"; + if (checkpointCallType == PrefabEventProcessor.CheckpointChoices.CKP_NOARGS) { + distinguisher = "n"; + } + PerTestSettings settings = new PerTestSettings("recvFromCkpt-" + iteration + "-" + distinguisher); + if (containerName != null) { + settings.inoutEPHConstructorArgs.setStorageContainerName(containerName); + } + settings.inDoCheckpoint = checkpointCallType; + settings = testSetup(settings); + + for (String id : settings.outPartitionIds) { + settings.outUtils.sendToPartition(id, settings.outTelltale); + waitForTelltale(settings, id); + } + + testFinish(settings, expectedEvents); + + return settings; + } + + @Test + public void receiveInvokeOnTimeout() throws Exception { + PerTestSettings settings = new PerTestSettings("receiveInvokeOnTimeout"); + settings.inOptions.setInvokeProcessorAfterReceiveTimeout(true); + settings.inTelltaleOnTimeout = true; + settings.inHasSenders = false; + settings = testSetup(settings); + + waitForTelltale(settings, "0"); + + testFinish(settings, SmokeTest.SKIP_COUNT_CHECK); + } + + @Test + public void receiveNotInvokeOnTimeout() throws Exception { + PerTestSettings settings = new PerTestSettings("receiveNotInvokeOnTimeout"); + settings = testSetup(settings); + + // Receive timeout is one minute. If event processor is invoked on timeout, it will + // record an error that will fail the case on shutdown. + Thread.sleep(120 * 1000); + + settings.outUtils.sendToAny(settings.outTelltale); + waitForTelltale(settings); + + testFinish(settings, SmokeTest.ANY_NONZERO_COUNT); + } + + @Test + public void receiveAllPartitionsTest() throws Exception { + // Save "now" to avoid race with sender startup. + final Instant savedNow = Instant.now(); + + PerTestSettings settings = new PerTestSettings("receiveAllPartitions"); + settings.inOptions.setInitialPositionProvider((partitionId) -> { + return EventPosition.fromEnqueuedTime(savedNow); + }); + settings = testSetup(settings); + + final int maxGeneration = 10; + for (int generation = 0; generation < maxGeneration; generation++) { + for (String id : settings.outPartitionIds) { + settings.outUtils.sendToPartition(id, "receiveAllPartitions-" + id + "-" + generation); + } + TestBase.logInfo("Generation " + generation + " sent\n"); + } + for (String id : settings.outPartitionIds) { + settings.outUtils.sendToPartition(id, settings.outTelltale); + TestBase.logInfo("Telltale " + id + " sent\n"); + } + for (String id : settings.outPartitionIds) { + waitForTelltale(settings, id); + } + + testFinish(settings, (settings.outPartitionIds.size() * (maxGeneration + 1))); // +1 for the telltales + } + + @Test + public void receiveAllPartitionsWithUserExecutorTest() throws Exception { + // Save "now" to avoid race with sender startup. + final Instant savedNow = Instant.now(); + + PerTestSettings settings = new PerTestSettings("rcvAllPartsUserExec"); + settings.inOptions.setInitialPositionProvider((partitionId) -> { + return EventPosition.fromEnqueuedTime(savedNow); + }); + settings.inoutEPHConstructorArgs.setExecutor(Executors.newScheduledThreadPool(4)); + settings = testSetup(settings); + + final int maxGeneration = 10; + for (int generation = 0; generation < maxGeneration; generation++) { + for (String id : settings.outPartitionIds) { + settings.outUtils.sendToPartition(id, "receiveAllPartitionsWithUserExecutor-" + id + "-" + generation); + } + TestBase.logInfo("Generation " + generation + " sent\n"); + } + for (String id : settings.outPartitionIds) { + settings.outUtils.sendToPartition(id, settings.outTelltale); + TestBase.logInfo("Telltale " + id + " sent\n"); + } + for (String id : settings.outPartitionIds) { + waitForTelltale(settings, id); + } + + testFinish(settings, (settings.outPartitionIds.size() * (maxGeneration + 1))); // +1 for the telltales + } + + @Test + public void receiveAllPartitionsWithSingleThreadExecutorTest() throws Exception { + // Save "now" to avoid race with sender startup. + final Instant savedNow = Instant.now(); + + PerTestSettings settings = new PerTestSettings("rcvAllParts1ThrdExec"); + settings.inOptions.setInitialPositionProvider((partitionId) -> { + return EventPosition.fromEnqueuedTime(savedNow); + }); + settings.inoutEPHConstructorArgs.setExecutor(Executors.newSingleThreadScheduledExecutor()); + settings = testSetup(settings); + + final int maxGeneration = 10; + for (int generation = 0; generation < maxGeneration; generation++) { + for (String id : settings.outPartitionIds) { + settings.outUtils.sendToPartition(id, "receiveAllPartitionsWithSingleThreadExecutor-" + id + "-" + generation); + } + TestBase.logInfo("Generation " + generation + " sent\n"); + } + for (String id : settings.outPartitionIds) { + settings.outUtils.sendToPartition(id, settings.outTelltale); + TestBase.logInfo("Telltale " + id + " sent\n"); + } + for (String id : settings.outPartitionIds) { + waitForTelltale(settings, id); + } + + testFinish(settings, (settings.outPartitionIds.size() * (maxGeneration + 1))); // +1 for the telltales + } + } diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/TestBase.java b/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/TestBase.java new file mode 100644 index 0000000000000..969b02aa9c791 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/TestBase.java @@ -0,0 +1,328 @@ +package com.microsoft.azure.eventprocessorhost; + +import com.microsoft.azure.eventhubs.ConnectionStringBuilder; +import com.microsoft.azure.eventhubs.EventHubClient; +import com.microsoft.azure.eventhubs.EventHubException; + +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assume; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.rules.TestName; +import org.slf4j.LoggerFactory; +import org.slf4j.Logger; + + +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ScheduledExecutorService; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +public class TestBase { + final static int SKIP_COUNT_CHECK = -3; // expectedEvents could be anything, don't check it at all + final static int NO_CHECKS = -2; // do no checks at all, used for tests which are expected fail in startup + final static int ANY_NONZERO_COUNT = -1; // if expectedEvents is -1, just check for > 0 + + static boolean logInfo = false; + static boolean logConsole = false; + static final Logger TRACE_LOGGER = LoggerFactory.getLogger("servicebus.test-eph.trace"); + + @Rule + public final TestName name = new TestName(); + + @BeforeClass + public static void allTestStart() { + String env = System.getenv("VERBOSELOG"); + if (env != null) { + TestBase.logInfo = true; + if (env.compareTo("CONSOLE") == 0) { + TestBase.logConsole = true; + } + } + } + + @AfterClass + public static void allTestFinish() { + } + + static void logError(String message) { + if (TestBase.logConsole) { + System.err.println("TEST ERROR: " + message); + } else { + TestBase.TRACE_LOGGER.error(message); + } + } + + static void logInfo(String message) { + if (TestBase.logInfo) { + if (TestBase.logConsole) { + System.err.println("TEST INFO: " + message); + } else { + TestBase.TRACE_LOGGER.info(message); + } + } + } + + void skipIfAutomated() { + Assume.assumeTrue(System.getenv("VERBOSELOG") != null); + } + + @Before + public void logCaseStart() { + String usemsg = "CASE START: " + this.name.getMethodName(); + if (TestBase.logConsole) { + System.err.println(usemsg); + } else { + TestBase.TRACE_LOGGER.info(usemsg); + } + } + + @After + public void logCaseEnd() { + String usemsg = "CASE END: " + this.name.getMethodName(); + if (TestBase.logConsole) { + System.err.println(usemsg); + } else { + TestBase.TRACE_LOGGER.info(usemsg); + } + } + + PerTestSettings testSetup(PerTestSettings settings) throws Exception { + String effectiveHostName = settings.inoutEPHConstructorArgs.isFlagSet(PerTestSettings.EPHConstructorArgs.HOST_OVERRIDE) ? + settings.inoutEPHConstructorArgs.getHostName() : settings.getDefaultHostName() + "-1"; + + settings.outUtils = new RealEventHubUtilities(); + boolean skipIfNoEventHubConnectionString = !settings.inEventHubDoesNotExist || settings.inSkipIfNoEventHubConnectionString; + if (settings.inHasSenders) { + settings.outPartitionIds = settings.outUtils.setup(skipIfNoEventHubConnectionString, settings.inEventHubDoesNotExist ? 8 : RealEventHubUtilities.QUERY_ENTITY_FOR_PARTITIONS); + } else { + settings.outPartitionIds = settings.outUtils.setupWithoutSenders(skipIfNoEventHubConnectionString, settings.inEventHubDoesNotExist ? 8 : RealEventHubUtilities.QUERY_ENTITY_FOR_PARTITIONS); + } + ConnectionStringBuilder environmentCSB = settings.outUtils.getConnectionString(skipIfNoEventHubConnectionString); + + String effectiveEntityPath = settings.inoutEPHConstructorArgs.isFlagSet(PerTestSettings.EPHConstructorArgs.EH_PATH_OVERRIDE) ? + settings.inoutEPHConstructorArgs.getEHPath() : environmentCSB.getEventHubName(); + + String effectiveConsumerGroup = settings.inoutEPHConstructorArgs.isFlagSet(PerTestSettings.EPHConstructorArgs.CONSUMER_GROUP_OVERRIDE) ? + settings.inoutEPHConstructorArgs.getConsumerGroupName() : EventHubClient.DEFAULT_CONSUMER_GROUP_NAME; + + String effectiveConnectionString = environmentCSB.toString(); + if (settings.inoutEPHConstructorArgs.isFlagSet(PerTestSettings.EPHConstructorArgs.EH_PATH_REPLACE_IN_CONNECTION) || + settings.inoutEPHConstructorArgs.isFlagSet(PerTestSettings.EPHConstructorArgs.EH_CONNECTION_REMOVE_PATH)) { + ConnectionStringBuilder replacedCSB = new ConnectionStringBuilder() + .setEndpoint(environmentCSB.getEndpoint()) + .setEventHubName( + settings.inoutEPHConstructorArgs.isFlagSet(PerTestSettings.EPHConstructorArgs.EH_CONNECTION_REMOVE_PATH) ? + "" : + settings.inoutEPHConstructorArgs.getEHPath() + ) + .setSasKeyName(environmentCSB.getSasKeyName()) + .setSasKey(environmentCSB.getSasKey()); + replacedCSB.setOperationTimeout(environmentCSB.getOperationTimeout()); + effectiveConnectionString = replacedCSB.toString(); + } + if (settings.inoutEPHConstructorArgs.isFlagSet(PerTestSettings.EPHConstructorArgs.EH_CONNECTION_OVERRIDE)) { + effectiveConnectionString = settings.inoutEPHConstructorArgs.getEHConnection(); + } + + ScheduledExecutorService effectiveExecutor = settings.inoutEPHConstructorArgs.isFlagSet(PerTestSettings.EPHConstructorArgs.EXECUTOR_OVERRIDE) ? + settings.inoutEPHConstructorArgs.getExecutor() : null; + + if (settings.inTelltaleOnTimeout) { + settings.outTelltale = ""; + } else { + settings.outTelltale = settings.getDefaultHostName() + "-telltale-" + EventProcessorHost.safeCreateUUID(); + } + settings.outGeneralErrorHandler = new PrefabGeneralErrorHandler(); + settings.outProcessorFactory = new PrefabProcessorFactory(settings.outTelltale, settings.inDoCheckpoint, false, false); + + settings.inOptions.setExceptionNotification(settings.outGeneralErrorHandler); + + if (settings.inoutEPHConstructorArgs.useExplicitManagers()) { + ICheckpointManager effectiveCheckpointMananger = settings.inoutEPHConstructorArgs.isFlagSet(PerTestSettings.EPHConstructorArgs.CHECKPOINT_MANAGER_OVERRIDE) ? + settings.inoutEPHConstructorArgs.getCheckpointMananger() : new BogusCheckpointMananger(); + ILeaseManager effectiveLeaseManager = settings.inoutEPHConstructorArgs.isFlagSet(PerTestSettings.EPHConstructorArgs.LEASE_MANAGER_OVERRIDE) ? + settings.inoutEPHConstructorArgs.getLeaseManager() : new BogusLeaseManager(); + + settings.outHost = new EventProcessorHost(effectiveHostName, effectiveEntityPath, effectiveConsumerGroup, effectiveConnectionString, + effectiveCheckpointMananger, effectiveLeaseManager, effectiveExecutor, null); + } else { + String effectiveStorageConnectionString = settings.inoutEPHConstructorArgs.isFlagSet(PerTestSettings.EPHConstructorArgs.STORAGE_CONNECTION_OVERRIDE) ? + settings.inoutEPHConstructorArgs.getStorageConnection() : TestUtilities.getStorageConnectionString(); + + String effectiveStorageContainerName = settings.getDefaultHostName().toLowerCase() + "-" + EventProcessorHost.safeCreateUUID(); + if (settings.inoutEPHConstructorArgs.isFlagSet(PerTestSettings.EPHConstructorArgs.STORAGE_CONTAINER_OVERRIDE)) { + effectiveStorageContainerName = settings.inoutEPHConstructorArgs.getStorageContainerName(); + if (effectiveStorageContainerName != null) { + effectiveStorageContainerName = effectiveStorageContainerName.toLowerCase(); + } + } else { + settings.inoutEPHConstructorArgs.setDefaultStorageContainerName(effectiveStorageContainerName); + } + + String effectiveBlobPrefix = settings.inoutEPHConstructorArgs.isFlagSet(PerTestSettings.EPHConstructorArgs.STORAGE_BLOB_PREFIX_OVERRIDE) ? + settings.inoutEPHConstructorArgs.getStorageBlobPrefix() : null; + + settings.outHost = new EventProcessorHost(effectiveHostName, effectiveEntityPath, effectiveConsumerGroup, effectiveConnectionString, + effectiveStorageConnectionString, effectiveStorageContainerName, effectiveBlobPrefix, effectiveExecutor); + } + + if (!settings.inEventHubDoesNotExist) { + settings.outHost.registerEventProcessorFactory(settings.outProcessorFactory, settings.inOptions).get(); + } + + return settings; + } + + void waitForTelltale(PerTestSettings settings) throws InterruptedException { + for (int i = 0; i < 100; i++) { + if (settings.outProcessorFactory.getAnyTelltaleFound()) { + TestBase.logInfo("Telltale found\n"); + break; + } + Thread.sleep(5000); + } + } + + void waitForTelltale(PerTestSettings settings, String partitionId) throws InterruptedException { + for (int i = 0; i < 100; i++) { + if (settings.outProcessorFactory.getTelltaleFound(partitionId)) { + TestBase.logInfo("Telltale " + partitionId + " found\n"); + break; + } + Thread.sleep(5000); + } + } + + void testFinish(PerTestSettings settings, int expectedEvents) throws InterruptedException, ExecutionException, EventHubException { + if ((settings.outHost != null) && !settings.inEventHubDoesNotExist) { + settings.outHost.unregisterEventProcessor().get(); + TestBase.logInfo("Host unregistered"); + } + + if (expectedEvents != NO_CHECKS) { + TestBase.logInfo("Events received: " + settings.outProcessorFactory.getEventsReceivedCount() + "\n"); + if (expectedEvents == ANY_NONZERO_COUNT) { + assertTrue("no events received", settings.outProcessorFactory.getEventsReceivedCount() > 0); + } else if (expectedEvents != SKIP_COUNT_CHECK) { + assertEquals("wrong number of events received", expectedEvents, settings.outProcessorFactory.getEventsReceivedCount()); + } + + assertTrue("telltale event was not found", settings.outProcessorFactory.getAnyTelltaleFound()); + assertEquals("partition errors seen", 0, settings.outProcessorFactory.getErrors().size()); + assertEquals("general errors seen", 0, settings.outGeneralErrorHandler.getErrors().size()); + for (String err : settings.outProcessorFactory.getErrors()) { + logError(err); + } + for (String err : settings.outGeneralErrorHandler.getErrors()) { + logError(err); + } + } + + settings.outUtils.shutdown(); + } + + class BogusCheckpointMananger implements ICheckpointManager { + @Override + public CompletableFuture checkpointStoreExists() { + return CompletableFuture.completedFuture(true); + } + + @Override + public CompletableFuture createCheckpointStoreIfNotExists() { + return CompletableFuture.completedFuture(null); + } + + @Override + public CompletableFuture deleteCheckpointStore() { + return CompletableFuture.completedFuture(null); + } + + @Override + public CompletableFuture getCheckpoint(String partitionId) { + return CompletableFuture.completedFuture(null); + } + + @Override + public CompletableFuture createAllCheckpointsIfNotExists(List partitionIds) { + return CompletableFuture.completedFuture(null); + } + + @Override + public CompletableFuture updateCheckpoint(CompleteLease lease, Checkpoint checkpoint) { + return CompletableFuture.completedFuture(null); + } + + @Override + public CompletableFuture deleteCheckpoint(String partitionId) { + return CompletableFuture.completedFuture(null); + } + } + + class BogusLeaseManager implements ILeaseManager { + @Override + public int getLeaseDurationInMilliseconds() { + return 0; + } + + @Override + public CompletableFuture leaseStoreExists() { + return CompletableFuture.completedFuture(true); + } + + @Override + public CompletableFuture createLeaseStoreIfNotExists() { + return CompletableFuture.completedFuture(null); + } + + @Override + public CompletableFuture deleteLeaseStore() { + return CompletableFuture.completedFuture(null); + } + + @Override + public CompletableFuture getLease(String partitionId) { + return CompletableFuture.completedFuture(null); + } + + @Override + public CompletableFuture> getAllLeases() { + return CompletableFuture.completedFuture(null); + } + + @Override + public CompletableFuture createAllLeasesIfNotExists(List partitionIds) { + return CompletableFuture.completedFuture(null); + } + + @Override + public CompletableFuture deleteLease(CompleteLease lease) { + return CompletableFuture.completedFuture(null); + } + + @Override + public CompletableFuture acquireLease(CompleteLease lease) { + return CompletableFuture.completedFuture(true); + } + + @Override + public CompletableFuture renewLease(CompleteLease lease) { + return CompletableFuture.completedFuture(true); + } + + @Override + public CompletableFuture releaseLease(CompleteLease lease) { + return CompletableFuture.completedFuture(null); + } + + @Override + public CompletableFuture updateLease(CompleteLease lease) { + return CompletableFuture.completedFuture(true); + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/TestUtilities.java b/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/TestUtilities.java new file mode 100644 index 0000000000000..d0466a6b4865a --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-eph/src/test/java/com/microsoft/azure/eventprocessorhost/TestUtilities.java @@ -0,0 +1,41 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventprocessorhost; + +import org.junit.Assume; + +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; + +final class TestUtilities { + static final ScheduledExecutorService EXECUTOR_SERVICE = Executors.newScheduledThreadPool(1); + + static void skipIfAppveyor() { + String appveyor = System.getenv("APPVEYOR"); // Set to "true" by Appveyor + if (appveyor != null) { + TestBase.logInfo("SKIPPING - APPVEYOR DETECTED"); + } + Assume.assumeTrue(appveyor == null); + } + + static String getStorageConnectionString() { + TestUtilities.skipIfAppveyor(); + + String retval = System.getenv("EPHTESTSTORAGE"); + + // if EPHTESTSTORAGE is not set - we cannot run integration tests + if (retval == null) { + TestBase.logInfo("SKIPPING - NO STORAGE CONNECTION STRING"); + } + Assume.assumeTrue(retval != null); + + return ((retval != null) ? retval : ""); + } + + static Boolean isRunningOnAzure() { + return (System.getenv("EVENT_HUB_CONNECTION_STRING") != null); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs-extensions/pom.xml b/eventhubs/data-plane/azure-eventhubs-extensions/pom.xml new file mode 100644 index 0000000000000..5dc33296bbbac --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-extensions/pom.xml @@ -0,0 +1,38 @@ + + + libraries and extensions built on Microsoft Azure Event Hubs + + + com.microsoft.azure + azure-eventhubs-clients + 2.0.0 + + + 4.0.0 + + azure-eventhubs-extensions + azure-eventhubs-extensions + + + + com.microsoft.azure + azure-eventhubs + ${project.parent.version} + compile + + + org.apache.logging.log4j + log4j-api + 2.5 + + + org.apache.logging.log4j + log4j-core + 2.5 + + + + + diff --git a/eventhubs/data-plane/azure-eventhubs-extensions/src/main/java/com/microsoft/azure/eventhubs/extensions/appender/EventHubsAppender.java b/eventhubs/data-plane/azure-eventhubs-extensions/src/main/java/com/microsoft/azure/eventhubs/extensions/appender/EventHubsAppender.java new file mode 100644 index 0000000000000..6fd45684198dd --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-extensions/src/main/java/com/microsoft/azure/eventhubs/extensions/appender/EventHubsAppender.java @@ -0,0 +1,141 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.extensions.appender; + +import org.apache.logging.log4j.core.Filter; +import org.apache.logging.log4j.core.Layout; +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.appender.AbstractAppender; +import org.apache.logging.log4j.core.appender.AppenderLoggingException; +import org.apache.logging.log4j.core.config.plugins.Plugin; +import org.apache.logging.log4j.core.config.plugins.PluginAttribute; +import org.apache.logging.log4j.core.config.plugins.PluginElement; +import org.apache.logging.log4j.core.config.plugins.PluginFactory; +import org.apache.logging.log4j.core.config.plugins.validation.constraints.Required; +import org.apache.logging.log4j.core.util.StringEncoder; + +import java.io.*; +import java.nio.charset.StandardCharsets; +import java.util.*; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * Sends {@link LogEvent}'s to Microsoft Azure EventHubs. + * By default, tuned for high performance and hence, pushes a batch of Events. + */ +@Plugin(name = "EventHub", category = "Core", elementType = "appender", printObject = true) +public final class EventHubsAppender extends AbstractAppender { + private static final int MAX_BATCH_SIZE_BYTES = 200 * 1024; + + // this constant is tuned to use the MaximumAllowedMessageSize(256K) including AMQP-Headers for a LogEvent of 1Char + private static final int MAX_BATCH_SIZE = 21312; + private static final long serialVersionUID = 1L; + + private final EventHubsManager eventHubsManager; + private final boolean immediateFlush; + private final AtomicInteger currentBufferedSizeBytes; + private final ConcurrentLinkedQueue logEvents; + + private EventHubsAppender( + final String name, + final Filter filter, + final Layout layout, + final boolean ignoreExceptions, + final EventHubsManager eventHubsManager, + final boolean immediateFlush) { + super(name, filter, layout, ignoreExceptions); + + this.eventHubsManager = eventHubsManager; + this.immediateFlush = immediateFlush; + this.logEvents = new ConcurrentLinkedQueue(); + this.currentBufferedSizeBytes = new AtomicInteger(); + } + + @PluginFactory + public static EventHubsAppender createAppender( + @Required(message = "Provide a Name for EventHubs Log4j Appender") @PluginAttribute("name") final String name, + @PluginElement("Filter") final Filter filter, + @PluginElement("Layout") final Layout layout, + @PluginAttribute(value = "ignoreExceptions", defaultBoolean = true) final boolean ignoreExceptions, + @Required(message = "Provide EventHub connection string to append the events to") @PluginAttribute("eventHubConnectionString") final String connectionString, + @PluginAttribute(value = "immediateFlush", defaultBoolean = false) final boolean immediateFlush) { + final EventHubsManager eventHubsManager = new EventHubsManager(name, connectionString); + return new EventHubsAppender(name, filter, layout, ignoreExceptions, eventHubsManager, immediateFlush); + } + + @Override + public void append(LogEvent logEvent) { + byte[] serializedLogEvent = null; + + try { + Layout layout = getLayout(); + + if (layout != null) { + serializedLogEvent = layout.toByteArray(logEvent); + } else { + serializedLogEvent = StringEncoder.toBytes(logEvent.getMessage().getFormattedMessage(), StandardCharsets.UTF_8); + } + + if (serializedLogEvent != null) { + if (this.immediateFlush) { + this.eventHubsManager.send(serializedLogEvent); + return; + } else { + int currentSize = this.currentBufferedSizeBytes.addAndGet(serializedLogEvent.length); + this.logEvents.offer(serializedLogEvent); + + if (currentSize < EventHubsAppender.MAX_BATCH_SIZE_BYTES + && this.logEvents.size() < EventHubsAppender.MAX_BATCH_SIZE + && !logEvent.isEndOfBatch()) { + return; + } + + logEvent.setEndOfBatch(true); + + this.eventHubsManager.send(this.logEvents); + + this.logEvents.clear(); + this.currentBufferedSizeBytes.set(0); + } + } + } catch (final Throwable exception) { + AppenderLoggingException appenderLoggingException = exception instanceof AppenderLoggingException + ? (AppenderLoggingException) exception + : new AppenderLoggingException("Appending logEvent to EventHubs failed: " + exception.getMessage(), exception); + + LOGGER.error(String.format(Locale.US, "[%s] Appender failed to logEvent to EventHub.", this.getName())); + + // remove the current LogEvent from the inMem Q - to avoid replay + if (serializedLogEvent != null && this.logEvents.remove(serializedLogEvent)) { + this.currentBufferedSizeBytes.addAndGet(-1 * serializedLogEvent.length); + } + + throw appenderLoggingException; + } + } + + ; + + @Override + public void start() { + super.start(); + + try { + this.eventHubsManager.startup(); + } catch (Throwable exception) { + final String errMsg = String.format(Locale.US, "[%s] Appender initialization failed with error: [%s]", this.getName(), exception.getMessage()); + + LOGGER.error(errMsg); + throw new AppenderLoggingException(errMsg, exception); + } + } + + @Override + public void stop() { + super.stop(); + this.eventHubsManager.release(); + } +} \ No newline at end of file diff --git a/eventhubs/data-plane/azure-eventhubs-extensions/src/main/java/com/microsoft/azure/eventhubs/extensions/appender/EventHubsManager.java b/eventhubs/data-plane/azure-eventhubs-extensions/src/main/java/com/microsoft/azure/eventhubs/extensions/appender/EventHubsManager.java new file mode 100644 index 0000000000000..aadaac89272ef --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs-extensions/src/main/java/com/microsoft/azure/eventhubs/extensions/appender/EventHubsManager.java @@ -0,0 +1,48 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.extensions.appender; + +import com.microsoft.azure.eventhubs.EventData; +import com.microsoft.azure.eventhubs.EventHubClient; +import com.microsoft.azure.eventhubs.EventHubException; +import org.apache.logging.log4j.core.appender.AbstractManager; + +import java.io.IOException; +import java.util.LinkedList; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; + +public final class EventHubsManager extends AbstractManager { + private static final ScheduledExecutorService EXECUTOR_SERVICE = Executors.newScheduledThreadPool(1); + private final String eventHubConnectionString; + private EventHubClient eventHubSender; + + protected EventHubsManager(final String name, final String eventHubConnectionString) { + super(name); + this.eventHubConnectionString = eventHubConnectionString; + } + + public void send(final byte[] msg) throws EventHubException { + if (msg != null) { + EventData data = EventData.create(msg); + this.eventHubSender.sendSync(data); + } + } + + public void send(final Iterable messages) throws EventHubException { + if (messages != null) { + LinkedList events = new LinkedList(); + for (byte[] message : messages) { + events.add(EventData.create(message)); + } + + this.eventHubSender.sendSync(events); + } + } + + public void startup() throws EventHubException, IOException { + this.eventHubSender = EventHubClient.createSync(this.eventHubConnectionString, EXECUTOR_SERVICE); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/pom.xml b/eventhubs/data-plane/azure-eventhubs/pom.xml new file mode 100644 index 0000000000000..786b4ea51688d --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/pom.xml @@ -0,0 +1,20 @@ + + + + com.microsoft.azure + azure-eventhubs-clients + 2.0.0 + + + 4.0.0 + + azure-eventhubs + azure-eventhubs + + + scm:git:https://github.com/Azure/azure-event-hubs-java + + + libraries and extensions built on Microsoft Azure Event Hubs + diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/AuthorizationFailedException.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/AuthorizationFailedException.java new file mode 100644 index 0000000000000..fde67c966ac4b --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/AuthorizationFailedException.java @@ -0,0 +1,40 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs; + +import java.util.concurrent.ScheduledExecutorService; + +/** + * Authorization failed exception is thrown when error is encountered during authorizing user's permission to run the intended operations. + * When encountered this exception user should check whether the token/key provided in the connection string (e.g. one passed to + * {@link EventHubClient#create(String, ScheduledExecutorService)}) is valid, and has correct execution right for the intended operations (e.g. + * Receive call will need Listen claim associated with the key/token). + * + * @see http://go.microsoft.com/fwlink/?LinkId=761101 + */ +public class AuthorizationFailedException extends EventHubException { + private static final long serialVersionUID = 5384872132102860710L; + + AuthorizationFailedException() { + super(false); + } + + /** + * Constructor for the exception class + * + * @param message the actual error message detailing the reason for the failure + */ + public AuthorizationFailedException(final String message) { + super(false, message); + } + + AuthorizationFailedException(final Throwable cause) { + super(false, cause); + } + + AuthorizationFailedException(final String message, final Throwable cause) { + super(false, message, cause); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/BatchOptions.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/BatchOptions.java new file mode 100644 index 0000000000000..916660ef8d437 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/BatchOptions.java @@ -0,0 +1,65 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ + +package com.microsoft.azure.eventhubs; + +import java.util.function.Consumer; + +/** + * BatchOptions is used to create {@link EventDataBatch}es. + *

    If you're creating {@link EventDataBatch}es with {@link EventHubClient}, then you can set a partitionKey and maxMessageSize + * using the .with() method. Alternatively, if you'd like the default settings, simply construct BatchOptions with the void constructor. + * Default settings: + * - partitionKey is null + * - maxMessageSize is the maximum allowed size + *

    If you're creating {@link EventDataBatch}es with {@link PartitionSender}, then you can only set a maxMessageSize + * using the .with() method. Alternatively, if you'd like the default settings, simply construct BatchOptions with the void constructor. + * Default settings: + * - maxMessageSize is the maximum allowed size + * - Note: if you set a partition key, an {@link IllegalArgumentException} will be thrown. + *

    To construct either type of batch, create a {@link BatchOptions} object and pass it into the appropriate + * createBatch method. If using {@link PartitionSender}, then use ({@link PartitionSender#createBatch(BatchOptions)}. + * If using {@link EventHubClient}, then use {@link EventHubClient#createBatch(BatchOptions)}. + *

    + *     {@code
    + *     // Note: For all examples, 'client' is an instance of EventHubClient. The usage is the same for PartitionSender,
    + *     however, you can NOT set a partition key when using PartitionSender
    + *
    + *     // Create EventDataBatch with defaults
    + *     EventDataBatch edb1 = client.createBatch();
    + *
    + *     // Create EventDataBatch with custom partitionKey
    + *     BatchOptions options = new BatchOptions().with( options -> options.partitionKey = "foo");
    + *     EventDataBatch edb2 = client.createBatch(options);
    + *
    + *     // Create EventDataBatch with custom partitionKey and maxMessageSize
    + *     BatchOptions options = new BatchOptions().with ( options -> {
    + *         options.partitionKey = "foo";
    + *         options.maxMessageSize = 100 * 1024;
    + *     };
    + *     EventDataBatch edb3 = client.createBatch(options);
    + *     }
    + * 
    + */ +public final class BatchOptions { + + /** + * The partitionKey to use for all {@link EventData}s sent in the current {@link EventDataBatch}. + * Setting a PartitionKey will deliver the {@link EventData} to a specific Event Hubs partition. + */ + public String partitionKey = null; + + /** + * The maximum size in bytes of {@link EventDataBatch} being constructed. + * This value cannot exceed the maximum size supported by Event Hubs service. + * {@link EventDataBatch#tryAdd(EventData)} API will use this value as the upper limit. + */ + public Integer maxMessageSize = null; + + public final BatchOptions with(Consumer builderFunction) { + builderFunction.accept(this); + return this; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/CommunicationException.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/CommunicationException.java new file mode 100644 index 0000000000000..73c26f38ccac7 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/CommunicationException.java @@ -0,0 +1,37 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs; + +/** + * This exception is thrown when there is a client side connectivity issue. When receiving this exception user should + * check client connectivity settings to the service: + *
      + *
    • Check for correct hostname and port number used in endpoint. + *
    • Check for any possible proxy settings that can block amqp ports + *
    • Check for any firewall settings that can block amqp ports + *
    • Check for any general network connectivity issues, as well as network latency. + *
    + * + * @see http://go.microsoft.com/fwlink/?LinkId=761101 + */ +public class CommunicationException extends EventHubException { + private static final long serialVersionUID = 7968596830506494332L; + + CommunicationException() { + super(true); + } + + CommunicationException(final String message) { + super(true, message); + } + + CommunicationException(final Throwable cause) { + super(true, cause); + } + + public CommunicationException(final String message, final Throwable cause) { + super(true, message, cause); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ConnectionStringBuilder.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ConnectionStringBuilder.java new file mode 100644 index 0000000000000..cb05f5584daa1 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ConnectionStringBuilder.java @@ -0,0 +1,419 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs; + +import com.microsoft.azure.eventhubs.impl.MessagingFactory; +import com.microsoft.azure.eventhubs.impl.StringUtil; + +import java.net.URI; +import java.net.URISyntaxException; +import java.time.Duration; +import java.time.format.DateTimeParseException; +import java.util.Locale; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * {@link ConnectionStringBuilder} can be used to construct a connection string which can establish communication with Event Hub instances. + * In addition to constructing a connection string, the {@link ConnectionStringBuilder} can be used to modify an existing connection string. + *

    Sample Code: + *

    {@code
    + *  // Construct a new connection string
    + * 	ConnectionStringBuilder connectionStringBuilder = new ConnectionStringBuilder()
    + * 	    .setNamespaceName("EventHubsNamespaceName")
    + * 	    .setEventHubName("EventHubsEntityName")
    + * 	    .setSasKeyName("SharedAccessSignatureKeyName")
    + * 	    .setSasKey("SharedAccessSignatureKey")
    + *
    + *  string connString = connectionStringBuilder.build();
    + *
    + *  // Modify an existing connection string
    + *  ConnectionStringBuilder connectionStringBuilder = new ConnectionStringBuilder(existingConnectionString)
    + *      .setEventHubName("SomeOtherEventHubsName")
    + *      .setOperationTimeout(Duration.ofSeconds(30)
    + *
    + *  string connString = connectionStringBuilder.build();
    + * }
    + *

    + * A connection string is basically a string consisting of key-value pairs separated by ";". + * The basic format is {{@literal <}key{@literal >}={@literal <}value{@literal >}[;{@literal <}key{@literal >}={@literal <}value{@literal >}]} where supported key name are as follow: + *

      + *
    • Endpoint - the URL that contains the EventHubs namespace + *
    • EntityPath - the EventHub name which you are connecting to + *
    • SharedAccessKeyName - the key name to the corresponding shared access policy rule for the namespace, or entity. + *
    • SharedAccessKey - the key for the corresponding shared access policy rule of the namespace or entity. + *
    + */ +public final class ConnectionStringBuilder { + + final static String endpointFormat = "sb://%s.%s"; + final static String hostnameFormat = "sb://%s"; + final static String defaultDomainName = "servicebus.windows.net"; + + final static String HostnameConfigName = "Hostname"; // Hostname is a key that is used in IoTHub. + final static String EndpointConfigName = "Endpoint"; // Endpoint key is used in EventHubs. It's identical to Hostname in IoTHub. + final static String EntityPathConfigName = "EntityPath"; + final static String OperationTimeoutConfigName = "OperationTimeout"; + final static String KeyValueSeparator = "="; + final static String KeyValuePairDelimiter = ";"; + final static String SharedAccessKeyNameConfigName = "SharedAccessKeyName"; // We use a (KeyName, Key) pair OR the SAS token - never both. + final static String SharedAccessKeyConfigName = "SharedAccessKey"; + final static String SharedAccessSignatureConfigName = "SharedAccessSignature"; + final static String TransportTypeConfigName = "TransportType"; + + private static final String AllKeyEnumerateRegex = "(" + HostnameConfigName + "|" + EndpointConfigName + "|" + SharedAccessKeyNameConfigName + + "|" + SharedAccessKeyConfigName + "|" + SharedAccessSignatureConfigName + "|" + EntityPathConfigName + "|" + OperationTimeoutConfigName + + "|" + TransportTypeConfigName + ")"; + + private static final String KeysWithDelimitersRegex = KeyValuePairDelimiter + AllKeyEnumerateRegex + + KeyValueSeparator; + + private URI endpoint; + private String eventHubName; + private String sharedAccessKeyName; + private String sharedAccessKey; + private String sharedAccessSignature; + private Duration operationTimeout; + private TransportType transportType; + + /** + * Creates an empty {@link ConnectionStringBuilder}. At minimum, a namespace name, an entity path, SAS key name, and SAS key + * need to be set before a valid connection string can be built. + *

    + * For advanced users, the following replacements can be done: + *

      + *
    • An endpoint can be provided instead of a namespace name.
    • + *
    • A SAS token can be provided instead of a SAS key name and SAS key.
    • + *
    • Optionally, users can set an operation timeout instead of using the default value.
    • + *
    + */ + public ConnectionStringBuilder() { + } + + /** + * ConnectionString format: + * Endpoint=sb://namespace_DNS_Name;EntityPath=EVENT_HUB_NAME;SharedAccessKeyName=SHARED_ACCESS_KEY_NAME;SharedAccessKey=SHARED_ACCESS_KEY + * + * @param connectionString EventHubs ConnectionString + * @throws IllegalConnectionStringFormatException when the format of the ConnectionString is not valid + */ + public ConnectionStringBuilder(String connectionString) { + parseConnectionString(connectionString); + } + + /** + * Get the endpoint which can be used to connect to the EventHub instance. + * + * @return The currently set endpoint + */ + public URI getEndpoint() { + return this.endpoint; + } + + /** + * Set an endpoint which can be used to connect to the EventHub instance. + * + * @param endpoint is a combination of the namespace name and domain name. Together, these pieces make a valid + * endpoint. For example, the default domain name is "servicebus.windows.net", so a sample endpoint + * would look like this: "sb://namespace_name.servicebus.windows.net". + * @return the {@link ConnectionStringBuilder} being set. + */ + public ConnectionStringBuilder setEndpoint(URI endpoint) { + this.endpoint = endpoint; + return this; + } + + /** + * Set an endpoint which can be used to connect to the EventHub instance. + * + * @param namespaceName the name of the namespace to connect to. + * @param domainName identifies the domain the namespace is located in. For non-public and national clouds, + * the domain will not be "servicebus.windows.net". Available options include: + * - "servicebus.usgovcloudapi.net" + * - "servicebus.cloudapi.de" + * - "servicebus.chinacloudapi.cn" + * @return the {@link ConnectionStringBuilder} being set. + */ + public ConnectionStringBuilder setEndpoint(String namespaceName, String domainName) { + try { + this.endpoint = new URI(String.format(Locale.US, endpointFormat, namespaceName, domainName)); + } catch (URISyntaxException exception) { + throw new IllegalConnectionStringFormatException( + String.format(Locale.US, "Invalid namespace name: %s", namespaceName), + exception); + } + return this; + } + + /** + * Set a namespace name which will be used to connect to an EventHubs instance. This method adds + * "servicebus.windows.net" as the default domain name. + * + * @param namespaceName the name of the namespace to connect to. + * @return the {@link ConnectionStringBuilder} being set. + */ + public ConnectionStringBuilder setNamespaceName(String namespaceName) { + return this.setEndpoint(namespaceName, defaultDomainName); + } + + /** + * Get the entity path value from the connection string. + * + * @return Entity Path + */ + public String getEventHubName() { + return this.eventHubName; + } + + /** + * Set the entity path value from the connection string. + * + * @param eventHubName the name of the Event Hub to connect to. + * @return the {@link ConnectionStringBuilder} being set. + */ + public ConnectionStringBuilder setEventHubName(String eventHubName) { + this.eventHubName = eventHubName; + return this; + } + + /** + * Get the shared access policy key value from the connection string + * + * @return Shared Access Signature key + */ + public String getSasKey() { + return this.sharedAccessKey; + } + + /** + * Set the shared access policy key value from the connection string + * + * @param sasKey the SAS key + * @return the {@link ConnectionStringBuilder} being set. + */ + public ConnectionStringBuilder setSasKey(String sasKey) { + this.sharedAccessKey = sasKey; + return this; + } + + /** + * Get the shared access policy owner name from the connection string + * + * @return Shared Access Signature key name. + */ + public String getSasKeyName() { + return this.sharedAccessKeyName; + } + + /** + * Set the shared access policy owner name from the connection string + * + * @param sasKeyName the SAS key name + * @return the {@link ConnectionStringBuilder} being set. + */ + public ConnectionStringBuilder setSasKeyName(String sasKeyName) { + this.sharedAccessKeyName = sasKeyName; + return this; + } + + /** + * Get the shared access signature (also referred as SAS Token) from the connection string + * + * @return Shared Access Signature + */ + public String getSharedAccessSignature() { + return this.sharedAccessSignature; + } + + /** + * Set the shared access signature (also referred as SAS Token) from the connection string + * + * @param sharedAccessSignature the shared access key signature + * @return the {@link ConnectionStringBuilder} being set. + */ + public ConnectionStringBuilder setSharedAccessSignature(String sharedAccessSignature) { + this.sharedAccessSignature = sharedAccessSignature; + return this; + } + + /** + * OperationTimeout is applied in erroneous situations to notify the caller about the relevant {@link EventHubException} + * + * @return operationTimeout + */ + public Duration getOperationTimeout() { + return (this.operationTimeout == null ? MessagingFactory.DefaultOperationTimeout : this.operationTimeout); + } + + /** + * Set the OperationTimeout value in the Connection String. This value will be used by all operations which uses this {@link ConnectionStringBuilder}, unless explicitly over-ridden. + *

    ConnectionString with operationTimeout is not inter-operable between java and clients in other platforms. + * + * @param operationTimeout Operation Timeout + * @return the {@link ConnectionStringBuilder} being set. + */ + public ConnectionStringBuilder setOperationTimeout(final Duration operationTimeout) { + this.operationTimeout = operationTimeout; + return this; + } + + /** + * TransportType on which all the communication for the EventHub objects created using this ConnectionString. + * Default value is {@link TransportType#AMQP}. + * + * @return transportType + */ + public TransportType getTransportType() { + return (this.transportType == null ? TransportType.AMQP : transportType); + } + + /** + * Set the TransportType value in the Connection String. If no TransportType is set, this defaults to {@link TransportType#AMQP}. + * + * @param transportType Transport Type + * @return the {@link ConnectionStringBuilder} instance being set. + */ + public ConnectionStringBuilder setTransportType(final TransportType transportType) { + this.transportType = transportType; + return this; + } + + /** + * Returns an inter-operable connection string that can be used to connect to EventHubs instances. + * + * @return connection string + */ + @Override + public String toString() { + final StringBuilder connectionStringBuilder = new StringBuilder(); + if (this.endpoint != null) { + connectionStringBuilder.append(String.format(Locale.US, "%s%s%s%s", EndpointConfigName, KeyValueSeparator, + this.endpoint.toString(), KeyValuePairDelimiter)); + } + + if (!StringUtil.isNullOrWhiteSpace(this.eventHubName)) { + connectionStringBuilder.append(String.format(Locale.US, "%s%s%s%s", EntityPathConfigName, + KeyValueSeparator, this.eventHubName, KeyValuePairDelimiter)); + } + + if (!StringUtil.isNullOrWhiteSpace(this.sharedAccessKeyName)) { + connectionStringBuilder.append(String.format(Locale.US, "%s%s%s%s", SharedAccessKeyNameConfigName, + KeyValueSeparator, this.sharedAccessKeyName, KeyValuePairDelimiter)); + } + + if (!StringUtil.isNullOrWhiteSpace(this.sharedAccessKey)) { + connectionStringBuilder.append(String.format(Locale.US, "%s%s%s%s", SharedAccessKeyConfigName, + KeyValueSeparator, this.sharedAccessKey, KeyValuePairDelimiter)); + } + + if (!StringUtil.isNullOrWhiteSpace(this.sharedAccessSignature)) { + connectionStringBuilder.append(String.format(Locale.US, "%s%s%s%s", SharedAccessSignatureConfigName, + KeyValueSeparator, this.sharedAccessSignature, KeyValuePairDelimiter)); + } + + if (this.operationTimeout != null) { + connectionStringBuilder.append(String.format(Locale.US, "%s%s%s%s", OperationTimeoutConfigName, + KeyValueSeparator, this.operationTimeout.toString(), KeyValuePairDelimiter)); + } + + if (this.transportType != null) { + connectionStringBuilder.append(String.format(Locale.US, "%s%s%s%s", TransportTypeConfigName, + KeyValueSeparator, this.transportType.toString(), KeyValuePairDelimiter)); + } + + connectionStringBuilder.deleteCharAt(connectionStringBuilder.length() - 1); + return connectionStringBuilder.toString(); + } + + + private void parseConnectionString(final String connectionString) { + if (StringUtil.isNullOrWhiteSpace(connectionString)) { + throw new IllegalConnectionStringFormatException("connectionString cannot be empty"); + } + + final String connection = KeyValuePairDelimiter + connectionString; + + final Pattern keyValuePattern = Pattern.compile(KeysWithDelimitersRegex, Pattern.CASE_INSENSITIVE); + final String[] values = keyValuePattern.split(connection); + final Matcher keys = keyValuePattern.matcher(connection); + + if (values == null || values.length <= 1 || keys.groupCount() == 0) { + throw new IllegalConnectionStringFormatException("Connection String cannot be parsed."); + } + + if (!StringUtil.isNullOrWhiteSpace((values[0]))) { + throw new IllegalConnectionStringFormatException( + String.format(Locale.US, "Cannot parse part of ConnectionString: %s", values[0])); + } + + int valueIndex = 0; + while (keys.find()) { + valueIndex++; + + String key = keys.group(); + key = key.substring(1, key.length() - 1); + + if (values.length < valueIndex + 1) { + throw new IllegalConnectionStringFormatException( + String.format(Locale.US, "Value for the connection string parameter name: %s, not found", key)); + } + + if (key.equalsIgnoreCase(EndpointConfigName)) { + if (this.endpoint != null) { + // we have parsed the endpoint once, which means we have multiple config which is not allowed + throw new IllegalConnectionStringFormatException( + String.format(Locale.US, "Multiple %s and/or %s detected. Make sure only one is defined", EndpointConfigName, HostnameConfigName)); + } + + try { + this.endpoint = new URI(values[valueIndex]); + } catch (URISyntaxException exception) { + throw new IllegalConnectionStringFormatException( + String.format(Locale.US, "%s should be in format scheme://fullyQualifiedServiceBusNamespaceEndpointName", EndpointConfigName), + exception); + } + } else if (key.equalsIgnoreCase(HostnameConfigName)) { + if (this.endpoint != null) { + // we have parsed the endpoint once, which means we have multiple config which is not allowed + throw new IllegalConnectionStringFormatException( + String.format(Locale.US, "Multiple %s and/or %s detected. Make sure only one is defined", EndpointConfigName, HostnameConfigName)); + } + + try { + this.endpoint = new URI(String.format(Locale.US, hostnameFormat, values[valueIndex])); + } catch (URISyntaxException exception) { + throw new IllegalConnectionStringFormatException( + String.format(Locale.US, "%s should be a fully quantified host name address", HostnameConfigName), + exception); + } + } else if (key.equalsIgnoreCase(SharedAccessKeyNameConfigName)) { + this.sharedAccessKeyName = values[valueIndex]; + } else if (key.equalsIgnoreCase(SharedAccessKeyConfigName)) { + this.sharedAccessKey = values[valueIndex]; + } else if (key.equalsIgnoreCase(SharedAccessSignatureConfigName)) { + this.sharedAccessSignature = values[valueIndex]; + } else if (key.equalsIgnoreCase(EntityPathConfigName)) { + this.eventHubName = values[valueIndex]; + } else if (key.equalsIgnoreCase(OperationTimeoutConfigName)) { + try { + this.operationTimeout = Duration.parse(values[valueIndex]); + } catch (DateTimeParseException exception) { + throw new IllegalConnectionStringFormatException("Invalid value specified for property 'Duration' in the ConnectionString.", exception); + } + } else if (key.equalsIgnoreCase(TransportTypeConfigName)) { + try { + this.transportType = TransportType.fromString(values[valueIndex]); + } catch (IllegalArgumentException exception) { + throw new IllegalConnectionStringFormatException( + String.format("Invalid value specified for property '%s' in the ConnectionString.", TransportTypeConfigName), + exception); + } + } else { + throw new IllegalConnectionStringFormatException( + String.format(Locale.US, "Illegal connection string parameter name: %s", key)); + } + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ErrorContext.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ErrorContext.java new file mode 100644 index 0000000000000..2f56282b271a9 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ErrorContext.java @@ -0,0 +1,29 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs; + +import com.microsoft.azure.eventhubs.impl.StringUtil; + +import java.io.Serializable; +import java.util.Locale; + +public abstract class ErrorContext implements Serializable { + private final String namespaceName; + + protected ErrorContext(final String namespaceName) { + this.namespaceName = namespaceName; + } + + protected String getNamespaceName() { + return this.namespaceName; + } + + @Override + public String toString() { + return StringUtil.isNullOrEmpty(this.namespaceName) + ? null + : String.format(Locale.US, "NS: %s", this.namespaceName); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventData.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventData.java new file mode 100755 index 0000000000000..d9884bf248c53 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventData.java @@ -0,0 +1,186 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs; + +import com.microsoft.azure.eventhubs.impl.AmqpConstants; +import com.microsoft.azure.eventhubs.impl.EventDataImpl; +import org.apache.qpid.proton.amqp.Binary; + +import java.io.Serializable; +import java.nio.ByteBuffer; +import java.time.Instant; +import java.util.*; +import java.util.concurrent.ScheduledExecutorService; + +/** + * The data structure encapsulating the Event being sent-to and received-from EventHubs. + * Each EventHubs partition can be visualized as a Stream of {@link EventData}. + *

    + * Serializing a received {@link EventData} with AMQP sections other than ApplicationProperties (with primitive java types) and Data section is not supported. + *

    + * Here's how AMQP message sections map to {@link EventData}. Here's the reference used for AMQP 1.0 specification: http://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-complete-v1.0-os.pdf + *

    + * i.   {@link #getProperties()} - AMQPMessage.ApplicationProperties section
    + * ii.  {@link #getBytes()} - if AMQPMessage.Body has Data section
    + * iii. {@link #getObject()} - if AMQPMessage.Body has AMQPValue or AMQPSequence sections
    + * 
    + * While using client libraries released by Microsoft Azure EventHubs, sections (i) and (ii) alone are sufficient. + * Section (iii) is used for advanced scenarios, where the sending application uses third-party AMQP library to send the message to EventHubs and the receiving application + * uses this client library to receive {@link EventData}. + */ +public interface EventData extends Serializable, Comparable { + + /** + * Construct EventData to Send to EventHubs. + * Typical pattern to create a Sending EventData is: + *
    +     * i.	Serialize the sending ApplicationEvent to be sent to EventHubs into bytes.
    +     * ii.	If complex serialization logic is involved (for example: multiple types of data) - add a Hint using the {@link #getProperties()} for the Consumer.
    +     * 
    + *

    Sample Code: + *

    +     * EventData eventData = EventData.create(telemetryEventBytes);
    +     * eventData.getProperties().put("eventType", "com.microsoft.azure.monitoring.EtlEvent");
    +     * partitionSender.Send(eventData);
    +     * 
    + * + * @param data the actual payload of data in bytes to be Sent to EventHubs. + * @return EventData the created {@link EventData} to send to EventHubs. + * @see EventHubClient#create(String, ScheduledExecutorService) + */ + static EventData create(final byte[] data) { + return new EventDataImpl(data); + } + + /** + * Construct EventData to Send to EventHubs. + * Typical pattern to create a Sending EventData is: + *
    +     * i.	Serialize the sending ApplicationEvent to be sent to EventHubs into bytes.
    +     * ii.	If complex serialization logic is involved (for example: multiple types of data) - add a Hint using the {@link #getProperties()} for the Consumer.
    +     *  
    + *

    Illustration: + *

     {@code
    +     *  EventData eventData = EventData.create(telemetryEventBytes, offset, length);
    +     *  eventData.getProperties().put("eventType", "com.microsoft.azure.monitoring.EtlEvent");
    +     *  partitionSender.Send(eventData);
    +     *  }
    + * + * @param data the byte[] where the payload of the Event to be sent to EventHubs is present + * @param offset Offset in the byte[] to read from ; inclusive index + * @param length length of the byte[] to be read, starting from offset + * @return EventData the created {@link EventData} to send to EventHubs. + * @see EventHubClient#create(String, ScheduledExecutorService) + */ + static EventData create(final byte[] data, final int offset, final int length) { + return new EventDataImpl(data, offset, length); + } + + /** + * Construct EventData to Send to EventHubs. + * Typical pattern to create a Sending EventData is: + *
    +     * i.	Serialize the sending ApplicationEvent to be sent to EventHubs into bytes.
    +     * ii.	If complex serialization logic is involved (for example: multiple types of data) - add a Hint using the {@link #getProperties()} for the Consumer.
    +     *  
    + *

    Illustration: + *

     {@code
    +     *  EventData eventData = EventData.create(telemetryEventByteBuffer);
    +     *  eventData.getProperties().put("eventType", "com.microsoft.azure.monitoring.EtlEvent");
    +     * 	partitionSender.Send(eventData);
    +     *  }
    + * + * @param buffer ByteBuffer which references the payload of the Event to be sent to EventHubs + * @return EventData the created {@link EventData} to send to EventHubs. + * @see EventHubClient#create(String, ScheduledExecutorService) + */ + static EventData create(final ByteBuffer buffer) { + return new EventDataImpl(buffer); + } + + /** + * Use this method only if, the sender could be sending messages using third-party AMQP libraries. + *

    If all the senders of EventHub use client libraries released and maintained by Microsoft Azure EventHubs, use {@link #getBytes()} method. + *

    Get the value of AMQP messages' Body section on the received {@link EventData}. + *

    If the AMQP message Body is always guaranteed to have Data section, use {@link #getBytes()} method. + * + * @return returns the Object which could represent either Data or AmqpValue or AmqpSequence. + *

    {@link Binary} if the Body is Data section + *

    {@link List} if the Body is AmqpSequence + *

    package org.apache.qpid.proton.amqp contains various AMQP types that could be returned. + */ + Object getObject(); + + /** + * Get Actual Payload/Data wrapped by EventData. + * + * @return byte[] of the actual data + *

    null if the body of the message has other inter-operable AMQP messages, whose body does not represent byte[]. + * In that case use {@link #getObject()}. + */ + byte[] getBytes(); + + /** + * Application property bag + * + * @return returns Application properties + */ + Map getProperties(); + + /** + * SystemProperties that are populated by EventHubService. + *

    As these are populated by Service, they are only present on a Received EventData. + *

    Usage:

    + * + * final String offset = eventData.getSystemProperties().getOffset(); + * + * + * @return an encapsulation of all SystemProperties appended by EventHubs service into EventData. + * null if the {@link EventData} is not received and is created by the public constructors. + * @see SystemProperties#getOffset + * @see SystemProperties#getSequenceNumber + * @see SystemProperties#getPartitionKey + * @see SystemProperties#getEnqueuedTime + */ + SystemProperties getSystemProperties(); + + class SystemProperties extends HashMap { + private static final long serialVersionUID = -2827050124966993723L; + + public SystemProperties(final HashMap map) { + super(Collections.unmodifiableMap(map)); + } + + public String getOffset() { + return this.getSystemProperty(AmqpConstants.OFFSET_ANNOTATION_NAME); + } + + public String getPartitionKey() { + return this.getSystemProperty(AmqpConstants.PARTITION_KEY_ANNOTATION_NAME); + } + + public Instant getEnqueuedTime() { + final Date enqueuedTimeValue = this.getSystemProperty(AmqpConstants.ENQUEUED_TIME_UTC_ANNOTATION_NAME); + return enqueuedTimeValue != null ? enqueuedTimeValue.toInstant() : null; + } + + public long getSequenceNumber() { + return this.getSystemProperty(AmqpConstants.SEQUENCE_NUMBER_ANNOTATION_NAME); + } + + public String getPublisher() { + return this.getSystemProperty(AmqpConstants.PUBLISHER_ANNOTATION_NAME); + } + + @SuppressWarnings("unchecked") + private T getSystemProperty(final String key) { + if (this.containsKey(key)) { + return (T) (this.get(key)); + } + + return null; + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventDataBatch.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventDataBatch.java new file mode 100644 index 0000000000000..97364bb43733d --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventDataBatch.java @@ -0,0 +1,28 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs; + +/** + * Helper for creating a batch/collection of EventData objects to be used while Sending to EventHubs + */ +public interface EventDataBatch { + + /** + * Get the number of events present in this {@link EventDataBatch} + * + * @return the EventDataBatch size + */ + int getSize(); + + /** + * Add's {@link EventData} to {@link EventDataBatch}, if permitted by the batch's size limit. + * This method is not thread-safe. + * + * @param eventData The {@link EventData} to add. + * @return A boolean value indicating if the {@link EventData} addition to this batch/collection was successful or not. + * @throws PayloadSizeExceededException when a single {@link EventData} instance exceeds maximum allowed size of the batch + */ + boolean tryAdd(final EventData eventData) throws PayloadSizeExceededException; +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventHubClient.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventHubClient.java new file mode 100644 index 0000000000000..8fa03e6d36790 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventHubClient.java @@ -0,0 +1,477 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs; + +import com.microsoft.azure.eventhubs.impl.EventHubClientImpl; +import com.microsoft.azure.eventhubs.impl.ExceptionUtil; + +import java.io.IOException; +import java.nio.channels.UnresolvedAddressException; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Executor; +import java.util.concurrent.ScheduledExecutorService; + +/** + * Anchor class - all EventHub client operations STARTS here. + * + * @see EventHubClient#create(String, ScheduledExecutorService) + */ +public interface EventHubClient { + + String DEFAULT_CONSUMER_GROUP_NAME = "$Default"; + + /** + * Synchronous version of {@link #create(String, ScheduledExecutorService)}. + * + * @param connectionString The connection string to be used. See {@link ConnectionStringBuilder} to construct a connectionString. + * @param executor An {@link ScheduledExecutorService} to run all tasks performed by {@link EventHubClient}. + * @return EventHubClient which can be used to create Senders and Receivers to EventHub + * @throws EventHubException If Service Bus service encountered problems during connection creation. + * @throws IOException If the underlying Proton-J layer encounter network errors. + */ + static EventHubClient createSync(final String connectionString, final ScheduledExecutorService executor) + throws EventHubException, IOException { + return EventHubClient.createSync(connectionString, null, executor); + } + + /** + * Synchronous version of {@link #create(String, ScheduledExecutorService)}. + * + * @param connectionString The connection string to be used. See {@link ConnectionStringBuilder} to construct a connectionString. + * @param retryPolicy A custom {@link RetryPolicy} to be used when communicating with EventHub. + * @param executor An {@link ScheduledExecutorService} to run all tasks performed by {@link EventHubClient}. + * @return EventHubClient which can be used to create Senders and Receivers to EventHub + * @throws EventHubException If Service Bus service encountered problems during connection creation. + * @throws IOException If the underlying Proton-J layer encounter network errors. + */ + static EventHubClient createSync(final String connectionString, final RetryPolicy retryPolicy, final ScheduledExecutorService executor) + throws EventHubException, IOException { + return ExceptionUtil.syncWithIOException(() -> create(connectionString, retryPolicy, executor).get()); + } + + /** + * Factory method to create an instance of {@link EventHubClient} using the supplied connectionString. + * In a normal scenario (when re-direct is not enabled) - one EventHubClient instance maps to one Connection to the Azure ServiceBus EventHubs service. + *

    The {@link EventHubClient} created from this method creates a Sender instance internally, which is used by the {@link #send(EventData)} methods. + * + * @param connectionString The connection string to be used. See {@link ConnectionStringBuilder} to construct a connectionString. + * @param executor An {@link ScheduledExecutorService} to run all tasks performed by {@link EventHubClient}. + * @return CompletableFuture{@literal } which can be used to create Senders and Receivers to EventHub + * @throws EventHubException If Service Bus service encountered problems during connection creation. + * @throws IOException If the underlying Proton-J layer encounter network errors. + */ + static CompletableFuture create(final String connectionString, final ScheduledExecutorService executor) + throws EventHubException, IOException { + return EventHubClient.create(connectionString, null, executor); + } + + /** + * Factory method to create an instance of {@link EventHubClient} using the supplied connectionString. + * In a normal scenario (when re-direct is not enabled) - one EventHubClient instance maps to one Connection to the Azure ServiceBus EventHubs service. + *

    The {@link EventHubClient} created from this method creates a Sender instance internally, which is used by the {@link #send(EventData)} methods. + * + * @param connectionString The connection string to be used. See {@link ConnectionStringBuilder} to construct a connectionString. + * @param retryPolicy A custom {@link RetryPolicy} to be used when communicating with EventHub. + * @param executor An {@link ScheduledExecutorService} to run all tasks performed by {@link EventHubClient}. + * @return CompletableFuture{@literal } which can be used to create Senders and Receivers to EventHub + * @throws EventHubException If Service Bus service encountered problems during connection creation. + * @throws IOException If the underlying Proton-J layer encounter network errors. + */ + static CompletableFuture create( + final String connectionString, final RetryPolicy retryPolicy, final ScheduledExecutorService executor) + throws EventHubException, IOException { + return EventHubClientImpl.create(connectionString, retryPolicy, executor); + } + + /** + * @return the name of the Event Hub this client is connected to. + */ + String getEventHubName(); + + /** + * Creates an Empty Collection of {@link EventData}. + * The same partitionKey must be used while sending these events using {@link EventHubClient#send(EventDataBatch)}. + * + * @param options see {@link BatchOptions} for more details + * @return the empty {@link EventDataBatch}, after negotiating maximum message size with EventHubs service + * @throws EventHubException if the Microsoft Azure Event Hubs service encountered problems during the operation. + */ + EventDataBatch createBatch(BatchOptions options) throws EventHubException; + + /** + * Creates an Empty Collection of {@link EventData}. + * The same partitionKey must be used while sending these events using {@link EventHubClient#send(EventDataBatch)}. + * + * @return the empty {@link EventDataBatch}, after negotiating maximum message size with EventHubs service + * @throws EventHubException if the Microsoft Azure Event Hubs service encountered problems during the operation. + */ + default EventDataBatch createBatch() throws EventHubException { + return this.createBatch(new BatchOptions()); + } + + /** + * Synchronous version of {@link #send(EventData)}. + * + * @param data the {@link EventData} to be sent. + * @throws PayloadSizeExceededException if the total size of the {@link EventData} exceeds a predefined limit set by the service. Default is 256k bytes. + * @throws EventHubException if Service Bus service encountered problems during the operation. + * @throws UnresolvedAddressException if there are Client to Service network connectivity issues, if the Azure DNS resolution of the ServiceBus Namespace fails (ex: namespace deleted etc.) + */ + default void sendSync(final EventData data) throws EventHubException { + ExceptionUtil.syncVoid(() -> this.send(data).get()); + } + + /** + * Send {@link EventData} to EventHub. The sent {@link EventData} will land on any arbitrarily chosen EventHubs partition. + *

    There are 3 ways to send to EventHubs, each exposed as a method (along with its sendBatch overload): + *

      + *
    • {@link #send(EventData)}, {@link #send(Iterable)}, or {@link #send(EventDataBatch)} + *
    • {@link #send(EventData, String)} or {@link #send(Iterable, String)} + *
    • {@link PartitionSender#send(EventData)}, {@link PartitionSender#send(Iterable)}, or {@link PartitionSender#send(EventDataBatch)} + *
    + *

    Use this method to Send, if: + *

    +     * a)  the send({@link EventData}) operation should be highly available and
    +     * b)  the data needs to be evenly distributed among all partitions; exception being, when a subset of partitions are unavailable
    +     * 
    + *

    + * {@link #send(EventData)} send's the {@link EventData} to a Service Gateway, which in-turn will forward the {@link EventData} to one of the EventHubs' partitions. Here's the message forwarding algorithm: + *

    +     * i.  Forward the {@link EventData}'s to EventHub partitions, by equally distributing the data among all partitions (ex: Round-robin the {@link EventData}'s to all EventHubs' partitions)
    +     * ii. If one of the EventHub partitions is unavailable for a moment, the Service Gateway will automatically detect it and forward the message to another available partition - making the Send operation highly-available.
    +     * 
    + * + * @param data the {@link EventData} to be sent. + * @return a CompletableFuture that can be completed when the send operations is done.. + * @see #send(EventData, String) + * @see PartitionSender#send(EventData) + */ + CompletableFuture send(final EventData data); + + /** + * Synchronous version of {@link #send(Iterable)}. + * + * @param eventDatas batch of events to send to EventHub + * @throws PayloadSizeExceededException if the total size of the {@link EventData} exceeds a pre-defined limit set by the service. Default is 256k bytes. + * @throws EventHubException if Service Bus service encountered problems during the operation. + * @throws UnresolvedAddressException if there are Client to Service network connectivity issues, if the Azure DNS resolution of the ServiceBus Namespace fails (ex: namespace deleted etc.) + */ + default void sendSync(final Iterable eventDatas) throws EventHubException { + ExceptionUtil.syncVoid(() -> this.send(eventDatas).get()); + } + + /** + * Send a batch of {@link EventData} to EventHub. The sent {@link EventData} will land on any arbitrarily chosen EventHubs partition. + * This is the most recommended way to Send to EventHubs. + *

    There are 3 ways to send to EventHubs, to understand this particular type of Send refer to the overload {@link #send(EventData)}, which is used to send single {@link EventData}. + * Use this overload versus {@link #send(EventData)}, if you need to send a batch of {@link EventData}. + *

    Sending a batch of {@link EventData}'s is useful in the following cases: + *

    +     * i.	Efficient send - sending a batch of {@link EventData} maximizes the overall throughput by optimally using the number of sessions created to EventHubs' service.
    +     * ii.	Send multiple {@link EventData}'s in a Transaction. To achieve ACID properties, the Gateway Service will forward all {@link EventData}'s in the batch to a single EventHubs' partition.
    +     * 
    + *

    + * Sample code (sample uses sync version of the api but concept are identical): + *

    +     * Gson gson = new GsonBuilder().create();
    +     * EventHubClient client = EventHubClient.createSync("__connection__");
    +     *
    +     * while (true)
    +     * {
    +     *     LinkedList{@literal<}EventData{@literal>} events = new LinkedList{@literal<}EventData{@literal>}();}
    +     *     for (int count = 1; count {@literal<} 11; count++)
    +     *     {
    +     *         PayloadEvent payload = new PayloadEvent(count);
    +     *         byte[] payloadBytes = gson.toJson(payload).getBytes(Charset.defaultCharset());
    +     *         EventData sendEvent = new EventData(payloadBytes);
    +     *         sendEvent.getProperties().put("from", "javaClient");
    +     *         events.add(sendEvent);
    +     *     }
    +     *
    +     *     client.sendSync(events);
    +     *     System.out.println(String.format("Sent Batch... Size: %s", events.size()));
    +     * }
    +     * 
    + *

    for Exceptions refer to {@link #sendSync(Iterable)} + * + * @param eventDatas batch of events to send to EventHub + * @return a CompletableFuture that can be completed when the send operations is done.. + * @see #send(EventData, String) + * @see PartitionSender#send(EventData) + */ + CompletableFuture send(final Iterable eventDatas); + + /** + * Synchronous version of {@link #send(EventDataBatch)}. + * + * @param eventDatas EventDataBatch to send to EventHub + * @throws EventHubException if Service Bus service encountered problems during the operation. + */ + default void sendSync(final EventDataBatch eventDatas) throws EventHubException { + ExceptionUtil.syncVoid(() -> this.send(eventDatas).get()); + } + + /** + * Send {@link EventDataBatch} to EventHub. The sent {@link EventDataBatch} will land according the partition key + * set in the {@link EventDataBatch}. If a partition key is not set, then we will Round-robin the {@link EventData}'s + * to all EventHubs' partitions. + * + * @param eventDatas EventDataBatch to send to EventHub + * @return a CompleteableFuture that can be completed when the send operations are done + * @see #send(Iterable) + * @see EventDataBatch + */ + CompletableFuture send(final EventDataBatch eventDatas); + + /** + * Synchronous version of {@link #send(EventData, String)}. + * + * @param eventData the {@link EventData} to be sent. + * @param partitionKey the partitionKey will be hash'ed to determine the partitionId to send the eventData to. On the Received message this can be accessed at {@link EventData.SystemProperties#getPartitionKey()} + * @throws PayloadSizeExceededException if the total size of the {@link EventData} exceeds a pre-defined limit set by the service. Default is 256k bytes. + * @throws EventHubException if Service Bus service encountered problems during the operation. + */ + default void sendSync(final EventData eventData, final String partitionKey) throws EventHubException { + ExceptionUtil.syncVoid(() -> this.send(eventData, partitionKey).get()); + } + + /** + * Send an '{@link EventData} with a partitionKey' to EventHub. All {@link EventData}'s with a partitionKey are guaranteed to land on the same partition. + * This send pattern emphasize data correlation over general availability and latency. + *

    + * There are 3 ways to send to EventHubs, each exposed as a method (along with its sendBatch overload): + *

    +     * i.   {@link #send(EventData)} or {@link #send(Iterable)}
    +     * ii.  {@link #send(EventData, String)} or {@link #send(Iterable, String)}
    +     * iii. {@link PartitionSender#send(EventData)} or {@link PartitionSender#send(Iterable)}
    +     * 
    + *

    + * Use this type of Send, if: + *

    +     * i.  There is a need for correlation of events based on Sender instance; The sender can generate a UniqueId and set it as partitionKey - which on the received Message can be used for correlation
    +     * ii. The client wants to take control of distribution of data across partitions.
    +     * 
    + *

    + * Multiple PartitionKey's could be mapped to one Partition. EventHubs service uses a proprietary Hash algorithm to map the PartitionKey to a PartitionId. + * Using this type of Send (Sending using a specific partitionKey), could sometimes result in partitions which are not evenly distributed. + * + * @param eventData the {@link EventData} to be sent. + * @param partitionKey the partitionKey will be hash'ed to determine the partitionId to send the eventData to. On the Received message this can be accessed at {@link EventData.SystemProperties#getPartitionKey()} + * @return a CompletableFuture that can be completed when the send operations is done.. + * @see #send(EventData) + * @see PartitionSender#send(EventData) + */ + CompletableFuture send(final EventData eventData, final String partitionKey); + + /** + * Synchronous version of {@link #send(Iterable, String)}. + * + * @param eventDatas the batch of events to send to EventHub + * @param partitionKey the partitionKey will be hash'ed to determine the partitionId to send the eventData to. On the Received message this can be accessed at {@link EventData.SystemProperties#getPartitionKey()} + * @throws PayloadSizeExceededException if the total size of the {@link EventData} exceeds a pre-defined limit set by the service. Default is 256k bytes. + * @throws EventHubException if Service Bus service encountered problems during the operation. + * @throws UnresolvedAddressException if there are Client to Service network connectivity issues, if the Azure DNS resolution of the ServiceBus Namespace fails (ex: namespace deleted etc.) + */ + default void sendSync(final Iterable eventDatas, final String partitionKey) throws EventHubException { + ExceptionUtil.syncVoid(() -> this.send(eventDatas, partitionKey).get()); + } + + /** + * Send a 'batch of {@link EventData} with the same partitionKey' to EventHub. All {@link EventData}'s with a partitionKey are guaranteed to land on the same partition. + * Multiple PartitionKey's will be mapped to one Partition. + *

    There are 3 ways to send to EventHubs, to understand this particular type of Send refer to the overload {@link #send(EventData, String)}, which is the same type of Send and is used to send single {@link EventData}. + *

    Sending a batch of {@link EventData}'s is useful in the following cases: + *

    +     * i.	Efficient send - sending a batch of {@link EventData} maximizes the overall throughput by optimally using the number of sessions created to EventHubs service.
    +     * ii.	Send multiple events in One Transaction. This is the reason why all events sent in a batch needs to have same partitionKey (so that they are sent to one partition only).
    +     * 
    + * + * @param eventDatas the batch of events to send to EventHub + * @param partitionKey the partitionKey will be hash'ed to determine the partitionId to send the eventData to. On the Received message this can be accessed at {@link EventData.SystemProperties#getPartitionKey()} + * @return a CompletableFuture that can be completed when the send operations is done.. + * @see #send(EventData) + * @see PartitionSender#send(EventData) + */ + CompletableFuture send(final Iterable eventDatas, final String partitionKey); + + /** + * Synchronous version of {@link #createPartitionSender(String)}. + * + * @param partitionId partitionId of EventHub to send the {@link EventData}'s to + * @return PartitionSenderImpl which can be used to send events to a specific partition. + * @throws EventHubException if Service Bus service encountered problems during connection creation. + */ + default PartitionSender createPartitionSenderSync(final String partitionId) throws EventHubException, IllegalArgumentException { + return ExceptionUtil.syncWithIllegalArgException(() -> this.createPartitionSender(partitionId).get()); + } + + /** + * Create a {@link PartitionSender} which can publish {@link EventData}'s directly to a specific EventHub partition (sender type iii. in the below list). + *

    + * There are 3 patterns/ways to send to EventHubs: + *

    +     * i.   {@link #send(EventData)} or {@link #send(Iterable)}
    +     * ii.  {@link #send(EventData, String)} or {@link #send(Iterable, String)}
    +     * iii. {@link PartitionSender#send(EventData)} or {@link PartitionSender#send(Iterable)}
    +     * 
    + * + * @param partitionId partitionId of EventHub to send the {@link EventData}'s to + * @return a CompletableFuture that would result in a PartitionSenderImpl when it is completed. + * @throws EventHubException if Service Bus service encountered problems during connection creation. + * @see PartitionSender + */ + CompletableFuture createPartitionSender(final String partitionId) throws EventHubException; + + /** + * Synchronous version of {@link #createReceiver(String, String, EventPosition)}. + * + * @param consumerGroupName the consumer group name that this receiver should be grouped under. + * @param partitionId the partition Id that the receiver belongs to. All data received will be from this partition only. + * @param eventPosition the position to start receiving the events from. See {@link EventPosition} + * @return PartitionReceiver instance which can be used for receiving {@link EventData}. + * @throws EventHubException if Service Bus service encountered problems during the operation. + */ + default PartitionReceiver createReceiverSync(final String consumerGroupName, final String partitionId, final EventPosition eventPosition) throws EventHubException { + return ExceptionUtil.sync(() -> this.createReceiver(consumerGroupName, partitionId, eventPosition).get()); + } + + /** + * Create the EventHub receiver with given partition id and start receiving from the specified starting offset. + * The receiver is created for a specific EventHub Partition from the specific consumer group. + * + * @param consumerGroupName the consumer group name that this receiver should be grouped under. + * @param partitionId the partition Id that the receiver belongs to. All data received will be from this partition only. + * @param eventPosition the position to start receiving the events from. See {@link EventPosition} + * @return a CompletableFuture that would result in a PartitionReceiver instance when it is completed. + * @throws EventHubException if Service Bus service encountered problems during the operation. + * @see PartitionReceiver + */ + CompletableFuture createReceiver(final String consumerGroupName, final String partitionId, final EventPosition eventPosition) throws EventHubException; + + /** + * Synchronous version of {@link #createReceiver(String, String, EventPosition)}. + * + * @param consumerGroupName the consumer group name that this receiver should be grouped under. + * @param partitionId the partition Id that the receiver belongs to. All data received will be from this partition only. + * @param eventPosition the position to start receiving the events from. See {@link EventPosition} + * @param receiverOptions the set of options to enable on the event hubs receiver + * @return PartitionReceiver instance which can be used for receiving {@link EventData}. + * @throws EventHubException if Service Bus service encountered problems during the operation. + */ + default PartitionReceiver createReceiverSync(final String consumerGroupName, final String partitionId, final EventPosition eventPosition, final ReceiverOptions receiverOptions) throws EventHubException { + return ExceptionUtil.sync(() -> this.createReceiver(consumerGroupName, partitionId, eventPosition, receiverOptions).get()); + } + + /** + * Create the EventHub receiver with given partition id and start receiving from the specified starting offset. + * The receiver is created for a specific EventHub Partition from the specific consumer group. + * + * @param consumerGroupName the consumer group name that this receiver should be grouped under. + * @param partitionId the partition Id that the receiver belongs to. All data received will be from this partition only. + * @param eventPosition the position to start receiving the events from. See {@link EventPosition} + * @param receiverOptions the set of options to enable on the event hubs receiver + * @return a CompletableFuture that would result in a PartitionReceiver instance when it is completed. + * @throws EventHubException if Service Bus service encountered problems during the operation. + * @see PartitionReceiver + */ + CompletableFuture createReceiver(final String consumerGroupName, final String partitionId, final EventPosition eventPosition, final ReceiverOptions receiverOptions) throws EventHubException; + + /** + * Synchronous version of {@link #createEpochReceiver(String, String, EventPosition, long)}. + * + * @param consumerGroupName the consumer group name that this receiver should be grouped under. + * @param partitionId the partition Id that the receiver belongs to. All data received will be from this partition only. + * @param eventPosition the position to start receiving the events from. See {@link EventPosition} + * @param epoch an unique identifier (epoch value) that the service uses, to enforce partition/lease ownership. + * @return PartitionReceiver instance which can be used for receiving {@link EventData}. + * @throws EventHubException if Service Bus service encountered problems during the operation. + */ + default PartitionReceiver createEpochReceiverSync(final String consumerGroupName, final String partitionId, final EventPosition eventPosition, final long epoch) throws EventHubException { + return ExceptionUtil.sync(() -> this.createEpochReceiver(consumerGroupName, partitionId, eventPosition, epoch).get()); + } + + /** + * Create a Epoch based EventHub receiver with given partition id and start receiving from the beginning of the partition stream. + * The receiver is created for a specific EventHub Partition from the specific consumer group. + *

    + * It is important to pay attention to the following when creating epoch based receiver: + *

      + *
    • Ownership enforcement - Once you created an epoch based receiver, you cannot create a non-epoch receiver to the same consumerGroup-Partition combo until all receivers to the combo are closed. + *
    • Ownership stealing - If a receiver with higher epoch value is created for a consumerGroup-Partition combo, any older epoch receiver to that combo will be force closed. + *
    • Any receiver closed due to lost of ownership to a consumerGroup-Partition combo will get ReceiverDisconnectedException for all operations from that receiver. + *
    + * + * @param consumerGroupName the consumer group name that this receiver should be grouped under. + * @param partitionId the partition Id that the receiver belongs to. All data received will be from this partition only. + * @param eventPosition the position to start receiving the events from. See {@link EventPosition} + * @param epoch an unique identifier (epoch value) that the service uses, to enforce partition/lease ownership. + * @return a CompletableFuture that would result in a PartitionReceiver when it is completed. + * @throws EventHubException if Service Bus service encountered problems during the operation. + * @see PartitionReceiver + * @see ReceiverDisconnectedException + */ + CompletableFuture createEpochReceiver(final String consumerGroupName, final String partitionId, final EventPosition eventPosition, final long epoch) throws EventHubException; + + /** + * Synchronous version of {@link #createEpochReceiver(String, String, EventPosition, long)}. + * + * @param consumerGroupName the consumer group name that this receiver should be grouped under. + * @param partitionId the partition Id that the receiver belongs to. All data received will be from this partition only. + * @param eventPosition the position to start receiving the events from. See {@link EventPosition} + * @param epoch an unique identifier (epoch value) that the service uses, to enforce partition/lease ownership. + * @param receiverOptions the set of options to enable on the event hubs receiver + * @return PartitionReceiver instance which can be used for receiving {@link EventData}. + * @throws EventHubException if Service Bus service encountered problems during the operation. + */ + default PartitionReceiver createEpochReceiverSync(final String consumerGroupName, final String partitionId, final EventPosition eventPosition, final long epoch, final ReceiverOptions receiverOptions) throws EventHubException { + return ExceptionUtil.sync(() -> this.createEpochReceiver(consumerGroupName, partitionId, eventPosition, epoch, receiverOptions).get()); + } + + /** + * Create a Epoch based EventHub receiver with given partition id and start receiving from the beginning of the partition stream. + * The receiver is created for a specific EventHub Partition from the specific consumer group. + *

    + * It is important to pay attention to the following when creating epoch based receiver: + *

      + *
    • Ownership enforcement - Once you created an epoch based receiver, you cannot create a non-epoch receiver to the same consumerGroup-Partition combo until all receivers to the combo are closed. + *
    • Ownership stealing - If a receiver with higher epoch value is created for a consumerGroup-Partition combo, any older epoch receiver to that combo will be force closed. + *
    • Any receiver closed due to lost of ownership to a consumerGroup-Partition combo will get ReceiverDisconnectedException for all operations from that receiver. + *
    + * + * @param consumerGroupName the consumer group name that this receiver should be grouped under. + * @param partitionId the partition Id that the receiver belongs to. All data received will be from this partition only. + * @param eventPosition the position to start receiving the events from. See {@link EventPosition} + * @param epoch an unique identifier (epoch value) that the service uses, to enforce partition/lease ownership. + * @param receiverOptions the set of options to enable on the event hubs receiver + * @return a CompletableFuture that would result in a PartitionReceiver when it is completed. + * @throws EventHubException if Service Bus service encountered problems during the operation. + * @see PartitionReceiver + * @see ReceiverDisconnectedException + */ + CompletableFuture createEpochReceiver(final String consumerGroupName, final String partitionId, final EventPosition eventPosition, final long epoch, final ReceiverOptions receiverOptions) throws EventHubException; + + /** + * Retrieves general information about an event hub (see {@link EventHubRuntimeInformation} for details). + * Retries until it reaches the operation timeout, then either rethrows the last error if available or + * returns null to indicate timeout. + * + * @return CompletableFuture which returns an EventHubRuntimeInformation on success, or null on timeout. + */ + CompletableFuture getRuntimeInformation(); + + /** + * Retrieves dynamic information about a partition of an event hub (see {@link PartitionRuntimeInformation} for + * details. Retries until it reaches the operation timeout, then either rethrows the last error if available or + * returns null to indicate timeout. + * + * @param partitionId Partition to get information about. Must be one of the partition ids returned by {@link #getRuntimeInformation}. + * @return CompletableFuture which returns an PartitionRuntimeInformation on success, or null on timeout. + */ + CompletableFuture getPartitionRuntimeInformation(String partitionId); + + CompletableFuture close(); + + void closeSync() throws EventHubException; +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventHubException.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventHubException.java new file mode 100644 index 0000000000000..f7ae9a2db72b0 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventHubException.java @@ -0,0 +1,66 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs; + +import com.microsoft.azure.eventhubs.impl.StringUtil; + +import java.util.Locale; + +/** + * This is the base exception that service bus will produce for all error cases. + */ +public class EventHubException extends Exception { + private static final long serialVersionUID = -3654294093967132325L; + + private final boolean isTransient; + private ErrorContext errorContext; + + EventHubException(final boolean isTransient) { + super(); + this.isTransient = isTransient; + } + + public EventHubException(final boolean isTransient, final String message) { + super(message); + this.isTransient = isTransient; + } + + public EventHubException(final boolean isTransient, final Throwable cause) { + super(cause); + this.isTransient = isTransient; + } + + public EventHubException(final boolean isTransient, final String message, final Throwable cause) { + super(message, cause); + this.isTransient = isTransient; + } + + @Override + public String getMessage() { + final String baseMessage = super.getMessage(); + return this.errorContext == null || StringUtil.isNullOrEmpty(this.errorContext.toString()) + ? baseMessage + : (!StringUtil.isNullOrEmpty(baseMessage) + ? String.format(Locale.US, "%s, %s[%s]", baseMessage, "errorContext", this.errorContext.toString()) + : String.format(Locale.US, "%s[%s]", "errorContext", this.errorContext.toString())); + } + + /** + * A boolean indicating if the exception is a transient error or not. + * + * @return returns true when user can retry the operation that generated the exception without additional intervention. + */ + public boolean getIsTransient() { + return this.isTransient; + } + + public ErrorContext getContext() { + return this.errorContext; + } + + public void setContext(ErrorContext errorContext) { + this.errorContext = errorContext; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventHubRuntimeInformation.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventHubRuntimeInformation.java new file mode 100644 index 0000000000000..405756d7c005c --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventHubRuntimeInformation.java @@ -0,0 +1,66 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs; + +import java.time.Instant; + +/** + * Holds information about Event Hubs which can come handy while performing data-plane operations + * like {@link EventHubClient#createPartitionSender(String)} and {@link EventHubClient#createReceiver(String, String, EventPosition)} + */ +public final class EventHubRuntimeInformation { + + final String path; + final Instant createdAt; + final int partitionCount; + final String[] partitionIds; + + public EventHubRuntimeInformation( + final String path, + final Instant createdAt, + final int partitionCount, + final String[] partitionIds) { + this.path = path; + this.createdAt = createdAt; + this.partitionCount = partitionCount; + this.partitionIds = partitionIds; + } + + /** + * Event Hub name + * + * @return name + */ + public String getPath() { + return this.path; + } + + /** + * Time at which Event Hub was created at. + * + * @return created time + */ + public Instant getCreatedAt() { + return this.createdAt; + } + + /** + * Number of partitions in the Event Hub. + * + * @return partition count + */ + public int getPartitionCount() { + return this.partitionCount; + } + + /** + * List of Partition identifiers of the Event Hub. + * + * @return partition identifiers + */ + public String[] getPartitionIds() { + return this.partitionIds; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventPosition.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventPosition.java new file mode 100644 index 0000000000000..797fbb2d0446f --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/EventPosition.java @@ -0,0 +1,120 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs; + +import com.microsoft.azure.eventhubs.impl.EventPositionImpl; + +import java.io.Serializable; +import java.time.Instant; + +/** + * Defines a position of an {@link EventData} in the event hub partition. + * The position can be an Offset, Sequence Number, or EnqueuedTime. + */ +public interface EventPosition extends Serializable { + + /** + * Creates a position at the given offset. The specified event will not be included. + * Instead, the next event is returned. + * + * @param offset is the byte offset of the event. + * @return An {@link EventPosition} object. + */ + static EventPosition fromOffset(String offset) { + return EventPositionImpl.fromOffset(offset); + } + + /** + * Creates a position at the given offset. + * + * @param offset is the byte offset of the event. + * @param inclusiveFlag will include the specified event when set to true; otherwise, the next event is returned. + * @return An {@link EventPosition} object. + */ + static EventPosition fromOffset(String offset, boolean inclusiveFlag) { + return EventPositionImpl.fromOffset(offset, inclusiveFlag); + } + + /** + * Creates a position at the given sequence number. The specified event will not be included. + * Instead, the next event is returned. + * + * @param sequenceNumber is the sequence number of the event. + * @return An {@link EventPosition} object. + */ + static EventPosition fromSequenceNumber(Long sequenceNumber) { + return EventPositionImpl.fromSequenceNumber(sequenceNumber); + } + + /** + * Creates a position at the given sequence number. The specified event will not be included. + * Instead, the next event is returned. + * + * @param sequenceNumber is the sequence number of the event. + * @param inclusiveFlag will include the specified event when set to true; otherwise, the next event is returned. + * @return An {@link EventPosition} object. + */ + static EventPosition fromSequenceNumber(Long sequenceNumber, boolean inclusiveFlag) { + return EventPositionImpl.fromSequenceNumber(sequenceNumber, inclusiveFlag); + } + + /** + * Creates a position at the given {@link Instant}. + * + * @param dateTime is the enqueued time of the event. + * @return An {@link EventPosition} object. + */ + static EventPosition fromEnqueuedTime(Instant dateTime) { + return EventPositionImpl.fromEnqueuedTime(dateTime); + } + + /** + * Returns the position for the start of a stream. Provide this position in receiver creation + * to start receiving from the first available event in the partition. + * + * @return An {@link EventPosition} set to the start of an Event Hubs stream. + */ + static EventPosition fromStartOfStream() { + return EventPositionImpl.fromStartOfStream(); + } + + /** + * Returns the position for the end of a stream. Provide this position in receiver creation + * to start receiving from the next available event in the partition after the receiver is created. + * + * @return An {@link EventPosition} set to the end of an Event Hubs stream. + */ + static EventPosition fromEndOfStream() { + return EventPositionImpl.fromEndOfStream(); + } + + /** + * Gets the sequence number. + *

    + * @return the sequence number. + */ + Long getSequenceNumber(); + + /** + * Gets the enqueued time. + *

    + * @return the enqueued time. + */ + Instant getEnqueuedTime(); + + /** + * Gets the offset. + *

    + * @return the offset. + */ + String getOffset(); + + /** + * Gets the inclusive value. + *

    + * @return the inclusive value. + */ + boolean getInclusiveFlag(); +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/IllegalConnectionStringFormatException.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/IllegalConnectionStringFormatException.java new file mode 100644 index 0000000000000..f52fb4c533561 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/IllegalConnectionStringFormatException.java @@ -0,0 +1,28 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs; + +/** + * This exception is thrown when the connection string provided does not meet the requirement for connection. + */ +public class IllegalConnectionStringFormatException extends IllegalArgumentException { + private static final long serialVersionUID = 2514898858133972030L; + + IllegalConnectionStringFormatException() { + } + + IllegalConnectionStringFormatException(String detail) { + super(detail); + } + + IllegalConnectionStringFormatException(Throwable cause) { + super(cause); + } + + IllegalConnectionStringFormatException(String detail, Throwable cause) { + super(detail, cause); + } + +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/IllegalEntityException.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/IllegalEntityException.java new file mode 100644 index 0000000000000..94aa9abe242ce --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/IllegalEntityException.java @@ -0,0 +1,37 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs; + +/** + * This exception is thrown for the following reasons: + *

      + *
    • When the entity user attempted to connect does not exist + *
    • The entity user wants to connect is disabled + *
    + * + * @see http://go.microsoft.com/fwlink/?LinkId=761101 + */ +public class IllegalEntityException extends EventHubException { + private static final long serialVersionUID = 1842057379278310290L; + + // TEST HOOK - to be used by unit tests to inject non-transient failures + private static volatile boolean isTransient = false; + + IllegalEntityException() { + super(isTransient); + } + + public IllegalEntityException(final String message) { + super(isTransient, message); + } + + public IllegalEntityException(final Throwable cause) { + super(isTransient, cause); + } + + public IllegalEntityException(final String message, final Throwable cause) { + super(isTransient, message, cause); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/OperationCancelledException.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/OperationCancelledException.java new file mode 100644 index 0000000000000..a25a5656b5c3d --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/OperationCancelledException.java @@ -0,0 +1,28 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs; + +/** + * This exception is thrown when the underlying AMQP layer encounter an abnormal link abort or disconnect of connection in an unexpected fashion. + */ +public class OperationCancelledException extends EventHubException { + private static final long serialVersionUID = 1L; + + OperationCancelledException() { + super(false); + } + + public OperationCancelledException(final String message) { + super(false, message); + } + + OperationCancelledException(final Throwable cause) { + super(false, cause); + } + + public OperationCancelledException(final String message, final Throwable cause) { + super(false, message, cause); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PartitionReceiveHandler.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PartitionReceiveHandler.java new file mode 100644 index 0000000000000..e88776a8a7a9e --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PartitionReceiveHandler.java @@ -0,0 +1,37 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs; + +/** + * The handler to invoke after receiving {@link EventData}s from Microsoft Azure EventHubs. Use any implementation of this abstract class to specify + * user action when using PartitionReceiver's setReceiveHandler(). + * + * @see PartitionReceiver#setReceiveHandler + */ +public interface PartitionReceiveHandler { + + /** + * Maximum number of {@link EventData} to supply while invoking {@link #onReceive(Iterable)} + *

    Ensure that the value should be less than or equal to the value of {@link ReceiverOptions#getPrefetchCount()} + * + * @return value indicating the maximum number of {@link EventData} to supply while invoking {@link #onReceive(Iterable)} + */ + int getMaxEventCount(); + + /** + * user should implement this method to specify the action to be performed on the received events. + * + * @param events the list of fetched events from the corresponding PartitionReceiver. + * @see PartitionReceiver#receive + */ + void onReceive(final Iterable events); + + /** + * Implement this method to Listen to errors which lead to Closure of the {@link PartitionReceiveHandler} pump. + * + * @param error fatal error encountered while running the {@link PartitionReceiveHandler} pump + */ + void onError(final Throwable error); +} \ No newline at end of file diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PartitionReceiver.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PartitionReceiver.java new file mode 100644 index 0000000000000..83912309702ac --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PartitionReceiver.java @@ -0,0 +1,144 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs; + +import com.microsoft.azure.eventhubs.impl.ExceptionUtil; + +import java.time.Duration; +import java.util.concurrent.CompletableFuture; + +/** + * This is a logical representation of receiving from a EventHub partition. + *

    + * A {@link PartitionReceiver} is tied to a ConsumerGroup + EventHub Partition combination. + *

      + *
    • If an epoch based {@link PartitionReceiver} (i.e., PartitionReceiver.getEpoch != 0) is created, EventHubs service will guarantee only 1 active receiver exists per ConsumerGroup + Partition combo. + * This is the recommended approach to create a {@link PartitionReceiver}. + *
    • Multiple receivers per ConsumerGroup + Partition combo can be created using non-epoch receivers. + *
    + * + * @see EventHubClient#createReceiver + * @see EventHubClient#createEpochReceiver + */ +public interface PartitionReceiver { + + int MINIMUM_PREFETCH_COUNT = 1; + int DEFAULT_PREFETCH_COUNT = 500; + int MAXIMUM_PREFETCH_COUNT = 2000; + + long NULL_EPOCH = 0; + + /** + * Get EventHubs partition identifier. + * + * @return The identifier representing the partition from which this receiver is fetching data + */ + String getPartitionId(); + + Duration getReceiveTimeout(); + + void setReceiveTimeout(Duration value); + + /** + * Get the epoch value that this receiver is currently using for partition ownership. + *

    + * A value of 0 means this receiver is not an epoch-based receiver. + * + * @return the epoch value that this receiver is currently using for partition ownership. + */ + long getEpoch(); + + /** + * Gets the temporal {@link ReceiverRuntimeInformation} for this EventHub partition. + * In general, this information is a representation of, where this {@link PartitionReceiver}'s end of stream is, + * at the time {@link ReceiverRuntimeInformation#getRetrievalTime()}. + *

    This value will not be populated, unless the knob {@link ReceiverOptions#setReceiverRuntimeMetricEnabled(boolean)} is set. + * This value will be refreshed every time an {@link EventData} is consumed from {@link PartitionReceiver}. + * For ex: if no events have been consumed, then this value is not populated. + * + * @return receiver runtime information + */ + ReceiverRuntimeInformation getRuntimeInformation(); + + /** + * Get the {@link EventPosition} that corresponds to an {@link EventData} which was returned last by the receiver. + *

    This value will not be populated, unless the knob {@link ReceiverOptions#setReceiverRuntimeMetricEnabled(boolean)} is set. + * Note that EventPosition object is initialized using SequenceNumber and other parameters are not set and get will return null. + * + * @return the EventPosition object. + */ + EventPosition getEventPosition(); + + /** + * Synchronous version of {@link #receive}. + * + * @param maxEventCount maximum number of {@link EventData}'s that this call should return + * @return Batch of {@link EventData}'s from the partition on which this receiver is created. Returns 'null' if no {@link EventData} is present. + * @throws EventHubException if ServiceBus client encountered any unrecoverable/non-transient problems during {@link #receive} + */ + default Iterable receiveSync(final int maxEventCount) throws EventHubException { + return ExceptionUtil.sync(() -> this.receive(maxEventCount).get()); + } + + /** + * Receive a batch of {@link EventData}'s from an EventHub partition + *

    + * Sample code (sample uses sync version of the api but concept are identical): + *

    +     * EventHubClient client = EventHubClient.createSync("__connection__");
    +     * PartitionReceiver receiver = client.createPartitionReceiverSync("ConsumerGroup1", "1");
    +     * Iterable{@literal<}EventData{@literal>} receivedEvents = receiver.receiveSync();
    +     *
    +     * while (true)
    +     * {
    +     *     int batchSize = 0;
    +     *     if (receivedEvents != null)
    +     *     {
    +     *         for(EventData receivedEvent: receivedEvents)
    +     *         {
    +     *             System.out.println(String.format("Message Payload: %s", new String(receivedEvent.getBytes(), Charset.defaultCharset())));
    +     *             System.out.println(String.format("Offset: %s, SeqNo: %s, EnqueueTime: %s",
    +     *                 receivedEvent.getSystemProperties().getOffset(),
    +     *                 receivedEvent.getSystemProperties().getSequenceNumber(),
    +     *                 receivedEvent.getSystemProperties().getEnqueuedTime()));
    +     *             batchSize++;
    +     *         }
    +     *     }
    +     *
    +     *     System.out.println(String.format("ReceivedBatch Size: %s", batchSize));
    +     *     receivedEvents = receiver.receiveSync();
    +     * }
    +     * 
    + * + * @param maxEventCount maximum number of {@link EventData}'s that this call should return + * @return A completableFuture that will yield a batch of {@link EventData}'s from the partition on which this receiver is created. Returns 'null' if no {@link EventData} is present. + */ + CompletableFuture> receive(final int maxEventCount); + + /** + * Register a receive handler that will be called when an event is available. A + * {@link PartitionReceiveHandler} is a handler that allows user to specify a callback + * for event processing and error handling in a receive pump model. + * + * @param receiveHandler An implementation of {@link PartitionReceiveHandler}. Setting this handler to null will stop the receive pump. + * @return A completableFuture which sets receiveHandler + */ + CompletableFuture setReceiveHandler(final PartitionReceiveHandler receiveHandler); + + /** + * Register a receive handler that will be called when an event is available. A + * {@link PartitionReceiveHandler} is a handler that allows user to specify a callback + * for event processing and error handling in a receive pump model. + * + * @param receiveHandler An implementation of {@link PartitionReceiveHandler} + * @param invokeWhenNoEvents flag to indicate whether the {@link PartitionReceiveHandler#onReceive(Iterable)} should be invoked when the receive call times out + * @return A completableFuture which sets receiveHandler + */ + CompletableFuture setReceiveHandler(final PartitionReceiveHandler receiveHandler, final boolean invokeWhenNoEvents); + + CompletableFuture close(); + + void closeSync() throws EventHubException; +} \ No newline at end of file diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PartitionRuntimeInformation.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PartitionRuntimeInformation.java new file mode 100644 index 0000000000000..c086f63ca8186 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PartitionRuntimeInformation.java @@ -0,0 +1,64 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs; + +import java.time.Instant; + +public final class PartitionRuntimeInformation { + + private final String eventHubPath; + private final String partitionId; + private final long beginSequenceNumber; + private final long lastEnqueuedSequenceNumber; + private final String lastEnqueuedOffset; + private final Instant lastEnqueuedTimeUtc; + private final boolean isEmpty; + + public PartitionRuntimeInformation( + final String eventHubPath, + final String partitionId, + final long beginSequenceNumber, + final long lastEnqueuedSequenceNumber, + final String lastEnqueuedOffset, + final Instant lastEnqueuedTimeUtc, + final boolean isEmpty) { + + this.eventHubPath = eventHubPath; + this.partitionId = partitionId; + this.beginSequenceNumber = beginSequenceNumber; + this.lastEnqueuedSequenceNumber = lastEnqueuedSequenceNumber; + this.lastEnqueuedOffset = lastEnqueuedOffset; + this.lastEnqueuedTimeUtc = lastEnqueuedTimeUtc; + this.isEmpty = isEmpty; + } + + public String getEventHubPath() { + return this.eventHubPath; + } + + public String getPartitionId() { + return this.partitionId; + } + + public long getBeginSequenceNumber() { + return this.beginSequenceNumber; + } + + public long getLastEnqueuedSequenceNumber() { + return this.lastEnqueuedSequenceNumber; + } + + public String getLastEnqueuedOffset() { + return this.lastEnqueuedOffset; + } + + public Instant getLastEnqueuedTimeUtc() { + return this.lastEnqueuedTimeUtc; + } + + public boolean getIsEmpty() { + return this.isEmpty; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PartitionSender.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PartitionSender.java new file mode 100644 index 0000000000000..b394714de8d4a --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PartitionSender.java @@ -0,0 +1,161 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs; + +import com.microsoft.azure.eventhubs.impl.ExceptionUtil; + +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ScheduledExecutorService; + +/** + * This sender class is a logical representation of sending events to a specific EventHub partition. Do not use this class + * if you do not care about sending events to specific partitions. Instead, use {@link EventHubClient#send} method. + * + * @see EventHubClient#createPartitionSender(String) + * @see EventHubClient#create(String, ScheduledExecutorService) + */ +public interface PartitionSender { + + /** + * The partition id that will receive events from this sender. + * + * @return the partition id the PartitionSender is connected to. + */ + String getPartitionId(); + + /** + * Creates an Empty Collection of {@link EventData}. + * The same partitionKey must be used while sending these events using {@link PartitionSender#send(EventDataBatch)}. + * + * @param options see {@link BatchOptions} for more usage details + * @return the empty {@link EventDataBatch}, after negotiating maximum message size with EventHubs service + */ + EventDataBatch createBatch(BatchOptions options); + + /** + * Creates an Empty Collection of {@link EventData}. + * The same partitionKey must be used while sending these events using {@link PartitionSender#send(EventDataBatch)}. + * + * @return the empty {@link EventDataBatch}, after negotiating maximum message size with EventHubs service + */ + default EventDataBatch createBatch() { + return this.createBatch(new BatchOptions()); + } + + /** + * Synchronous version of {@link #send(EventData)} Api. + * + * @param data the {@link EventData} to be sent. + * @throws PayloadSizeExceededException if the total size of the {@link EventData} exceeds a pre-defined limit set by the service. Default is 256k bytes. + * @throws EventHubException if Service Bus service encountered problems during the operation. + */ + default void sendSync(final EventData data) throws EventHubException { + ExceptionUtil.syncVoid(() -> this.send(data).get()); + } + + /** + * Send {@link EventData} to a specific EventHub partition. The target partition is pre-determined when this PartitionSender was created. + * This send pattern emphasize data correlation over general availability and latency. + *

    + * There are 3 ways to send to EventHubs, each exposed as a method (along with its sendBatch overload): + *

    +     * i.   {@link EventHubClient#send(EventData)}, {@link EventHubClient#send(Iterable)}, {@link EventHubClient#send(EventDataBatch)}
    +     * ii.  {@link EventHubClient#send(EventData, String)} or {@link EventHubClient#send(Iterable, String)}
    +     * iii. {@link PartitionSender#send(EventData)}, {@link PartitionSender#send(Iterable)}, or {@link PartitionSender#send(EventDataBatch)}
    +     * 
    + *

    + * Use this type of Send, if: + *

    +     * i. The client wants to take direct control of distribution of data across partitions. In this case client is responsible for making sure there is at least one sender per event hub partition.
    +     * ii. User cannot use partition key as a mean to direct events to specific partition, yet there is a need for data correlation with partitioning scheme.
    +     * 
    + * + * @param data the {@link EventData} to be sent. + * @return a CompletableFuture that can be completed when the send operations is done.. + */ + CompletableFuture send(EventData data); + + /** + * Synchronous version of {@link #send(Iterable)} . + * + * @param eventDatas batch of events to send to EventHub + * @throws EventHubException if Service Bus service encountered problems during the operation. + */ + default void sendSync(final Iterable eventDatas) throws EventHubException { + ExceptionUtil.syncVoid(() -> this.send(eventDatas).get()); + } + + /** + * Send {@link EventData} to a specific EventHub partition. The targeted partition is pre-determined when this PartitionSender was created. + *

    + * There are 3 ways to send to EventHubs, to understand this particular type of Send refer to the overload {@link #send(EventData)}, which is the same type of Send and is used to send single {@link EventData}. + *

    + * Sending a batch of {@link EventData}'s is useful in the following cases: + *

    +     * i.	Efficient send - sending a batch of {@link EventData} maximizes the overall throughput by optimally using the number of sessions created to EventHubs' service.
    +     * ii.	Send multiple {@link EventData}'s in a Transaction. To achieve ACID properties, the Gateway Service will forward all {@link EventData}'s in the batch to a single EventHubs' partition.
    +     * 
    + *

    + * Sample code (sample uses sync version of the api but concept are identical): + *

    +     * Gson gson = new GsonBuilder().create();
    +     * EventHubClient client = EventHubClient.createSync("__connection__");
    +     * PartitionSender senderToPartitionOne = client.createPartitionSenderSync("1");
    +     *
    +     * while (true)
    +     * {
    +     *     LinkedList{@literal<}EventData{@literal>} events = new LinkedList{@literal<}EventData{@literal>}();
    +     *     for (int count = 1; count {@literal<} 11; count++)
    +     *     {
    +     *         PayloadEvent payload = new PayloadEvent(count);
    +     *         byte[] payloadBytes = gson.toJson(payload).getBytes(Charset.defaultCharset());
    +     *         EventData sendEvent = EventData.create(payloadBytes);
    +     *         sendEvent.getProperties().put("from", "javaClient");
    +     *         events.add(sendEvent);
    +     *     }
    +     *
    +     *     senderToPartitionOne.sendSync(events);
    +     *     System.out.println(String.format("Sent Batch... Size: %s", events.size()));
    +     * }
    +     * 
    + * + * @param eventDatas batch of events to send to EventHub + * @return a CompletableFuture that can be completed when the send operations is done.. + */ + CompletableFuture send(Iterable eventDatas); + + /** + * Synchronous version of {@link #send(EventDataBatch)} + * + * @param eventDatas EventDataBatch to send to EventHub + * @throws EventHubException if Service Bus service encountered problems during the operation. + */ + default void sendSync(final EventDataBatch eventDatas) throws EventHubException { + ExceptionUtil.syncVoid(() -> this.send(eventDatas).get()); + } + + /** + * Send {@link EventDataBatch} to a specific EventHub partition. The targeted partition is pre-determined when this PartitionSender was created. + * A partitionKey cannot be set when using EventDataBatch with a PartitionSender. + *

    + * There are 3 ways to send to EventHubs, to understand this particular type of Send refer to the overload {@link #send(EventData)}, which is the same type of Send and is used to send single {@link EventData}. + *

    + * Sending a batch of {@link EventData}'s is useful in the following cases: + *

    +     * i.	Efficient send - sending a batch of {@link EventData} maximizes the overall throughput by optimally using the number of sessions created to EventHubs' service.
    +     * ii.	Send multiple {@link EventData}'s in a Transaction. To achieve ACID properties, the Gateway Service will forward all {@link EventData}'s in the batch to a single EventHubs' partition.
    +     * 
    + * + * @param eventDatas EventDataBatch to send to EventHub + * @return a CompletableFuture that can be completed when the send operation is done.. + * @see #send(Iterable) + * @see EventDataBatch + */ + CompletableFuture send(EventDataBatch eventDatas); + + CompletableFuture close(); + + void closeSync() throws EventHubException; +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PayloadSizeExceededException.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PayloadSizeExceededException.java new file mode 100644 index 0000000000000..4df415f5b9a72 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/PayloadSizeExceededException.java @@ -0,0 +1,33 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs; + +/** + * this exception is thrown when user attempts to send a event data or brokered message that has exceeded the + * allowed payload size as defined by the service. Note that in a batch send scenario the limit can include possible + * batch overhead. + * + * @see http://go.microsoft.com/fwlink/?LinkId=761101 + */ +public class PayloadSizeExceededException extends EventHubException { + + private static final long serialVersionUID = 3627182744252750014L; + + PayloadSizeExceededException() { + super(false); + } + + public PayloadSizeExceededException(final String message) { + super(false, message); + } + + PayloadSizeExceededException(final Throwable cause) { + super(false, cause); + } + + public PayloadSizeExceededException(final String message, final Throwable cause) { + super(false, message, cause); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/QuotaExceededException.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/QuotaExceededException.java new file mode 100644 index 0000000000000..c622fd46835c4 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/QuotaExceededException.java @@ -0,0 +1,17 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs; + +public class QuotaExceededException extends EventHubException { + + public QuotaExceededException(String message) { + super(false, message); + } + + public QuotaExceededException(Throwable cause) { + super(false, cause); + } + +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ReceiverDisconnectedException.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ReceiverDisconnectedException.java new file mode 100644 index 0000000000000..51df584645fa5 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ReceiverDisconnectedException.java @@ -0,0 +1,38 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs; + +/** + * This exception is thrown when a EventHubReceiver is being disconnected because of one of the + * following reason: + *
      + *
    • user attempts to connect a non-epoch receiver to a event hub partition, when there is an epoch receiver connected to the partition. + *
    • you are using an epoch receiver for a given partition but another epoch receiver with a higher epoch value connects to the same partition. + *
    + * User should make sure either all code are using non-epoch receivers, or ensure that there is only one epoch receiver processing a given partition + * at any given point in time. + * + * @see http://go.microsoft.com/fwlink/?LinkId=761101 + */ +public class ReceiverDisconnectedException extends EventHubException { + + private static final long serialVersionUID = 3385140843418138213L; + + ReceiverDisconnectedException() { + super(false); + } + + public ReceiverDisconnectedException(final String message) { + super(false, message); + } + + ReceiverDisconnectedException(final Throwable cause) { + super(false, cause); + } + + ReceiverDisconnectedException(final String message, final Throwable cause) { + super(false, message, cause); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ReceiverOptions.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ReceiverOptions.java new file mode 100644 index 0000000000000..d5cdf63071376 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ReceiverOptions.java @@ -0,0 +1,123 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs; + +import com.microsoft.azure.eventhubs.impl.ClientConstants; + +import java.util.Locale; + +/** + * Represents various optional behaviors which can be turned on or off during the creation of a {@link PartitionReceiver}. + */ +public final class ReceiverOptions { + + private boolean receiverRuntimeMetricEnabled; + private String identifier; + private int prefetchCount; + + public ReceiverOptions() { + this.prefetchCount = PartitionReceiver.DEFAULT_PREFETCH_COUNT; + } + + private static void validateReceiverIdentifier(final String receiverName) { + + if (receiverName != null && + receiverName.length() > ClientConstants.MAX_RECEIVER_NAME_LENGTH) { + throw new IllegalArgumentException("receiverIdentifier length cannot exceed 64"); + } + } + + /** + * Knob to enable/disable runtime metric of the receiver. If this is set to true and is passed to {@link EventHubClient#createReceiver}, + * after the first {@link PartitionReceiver#receive(int)} call, {@link PartitionReceiver#getRuntimeInformation()} is populated. + *

    + * Enabling this knob will add 3 additional properties to all {@link EventData}'s received on the {@link EventHubClient#createReceiver}. + * + * @return the {@link boolean} indicating, whether, the runtime metric of the receiver was enabled + */ + public boolean getReceiverRuntimeMetricEnabled() { + + return this.receiverRuntimeMetricEnabled; + } + + /** + * Knob to enable/disable runtime metric of the receiver. If this is set to true and is passed to {@link EventHubClient#createReceiver}, + * after the first {@link PartitionReceiver#receive(int)} call, {@link PartitionReceiver#getRuntimeInformation()} and + * {@link PartitionReceiver#getEventPosition()} will be populated. + *

    + * This knob facilitates for an optimization where the Consumer of Event Hub has the end of stream details at the disposal, + * without making any additional {@link EventHubClient#getPartitionRuntimeInformation(String)} call to Event Hubs service. + * To achieve this, behind the scenes, along with the actual {@link EventData}, that the Event Hubs {@link PartitionReceiver} + * delivers, it includes extra information about the Event Hubs partitions end of stream details on every event. + * In summary, enabling this knob will + * help users to save an extra call to Event Hubs service to fetch Event Hubs partition information and as a result, will add that information as + * header to each {@link EventData} received by the client. + * + * @param value the {@link boolean} to indicate, whether, the runtime metric of the receiver should be enabled + */ + public void setReceiverRuntimeMetricEnabled(boolean value) { + + this.receiverRuntimeMetricEnabled = value; + } + + /** + * Gets the identifier of the {@link PartitionReceiver} + * + * @return identifier of the {@link PartitionReceiver}; null if nothing was set + */ + public String getIdentifier() { + + return this.identifier; + } + + /** + * Set an identifier to {@link PartitionReceiver}. + *

    + * This identifier will be used by EventHubs service when reporting any errors across receivers, and is caused by this receiver. + * For example, when receiver quota limit is hit, while a user is trying to create New receiver, + * EventHubs service will throw {@link QuotaExceededException} and will include this identifier. + * So, its very critical to choose a value, which can uniquely identify the whereabouts of {@link PartitionReceiver}. + *

    + *

    + * + * @param value string to identify {@link PartitionReceiver} + */ + public void setIdentifier(final String value) { + + ReceiverOptions.validateReceiverIdentifier(value); + this.identifier = value; + } + + /** + * Get Prefetch Count. + * + * @return the upper limit of events this receiver will actively receive regardless of whether a receive operation is pending. + * @see #setPrefetchCount + */ + public int getPrefetchCount() { + return this.prefetchCount; + } + + /** + * Set the number of events that can be pre-fetched and cached at the {@link PartitionReceiver}. + *

    By default the value is 500 + * + * @param prefetchCount the number of events to pre-fetch. value must be between 1 and 2000. + * @throws EventHubException if setting prefetchCount encounters error + */ + public void setPrefetchCount(final int prefetchCount) throws EventHubException { + if (prefetchCount < PartitionReceiver.MINIMUM_PREFETCH_COUNT) { + throw new IllegalArgumentException(String.format(Locale.US, + "PrefetchCount has to be above %s", PartitionReceiver.MINIMUM_PREFETCH_COUNT)); + } + + if (prefetchCount > PartitionReceiver.MAXIMUM_PREFETCH_COUNT) { + throw new IllegalArgumentException(String.format(Locale.US, + "PrefetchCount has to be below %s", PartitionReceiver.MAXIMUM_PREFETCH_COUNT)); + } + + this.prefetchCount = prefetchCount; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ReceiverRuntimeInformation.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ReceiverRuntimeInformation.java new file mode 100644 index 0000000000000..b2d7d056eafcf --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ReceiverRuntimeInformation.java @@ -0,0 +1,84 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs; + +import java.time.Instant; + +/** + * Represents the temporal end of stream information of an EventHubs Partition. + */ +public final class ReceiverRuntimeInformation { + + private final String partitionId; + + private long lastSequenceNumber; + private Instant lastEnqueuedTime; + private String lastEnqueuedOffset; + private Instant retrievalTime; + + public ReceiverRuntimeInformation(final String partitionId) { + + this.partitionId = partitionId; + } + + /** + * The Event Hubs partition id to which this information belongs to + * + * @return the partition identifier + */ + public String getPartitionId() { + + return this.partitionId; + } + + /** + * The last enqueued {@link EventData}'s sequence number on this EventHubs Partition + * + * @return last enqueued sequence number + */ + public long getLastEnqueuedSequenceNumber() { + + return this.lastSequenceNumber; + } + + /** + * The last enqueued {@link EventData}'s enqueue time stamp on this EventHubs Partition + * + * @return last enqueued time + */ + public Instant getLastEnqueuedTime() { + + return this.lastEnqueuedTime; + } + + /** + * The last enqueued {@link EventData}'s offset on this EventHubs Partition + * + * @return offset + */ + public String getLastEnqueuedOffset() { + + return this.lastEnqueuedOffset; + } + + /** + * The value indicating when this information was retrieved from the Event Hubs service + * + * @return retrieval time + */ + public Instant getRetrievalTime() { + + return this.retrievalTime; + } + + public void setRuntimeInformation(final long sequenceNumber, final Instant enqueuedTime, final String offset) { + + this.lastSequenceNumber = sequenceNumber; + this.lastEnqueuedTime = enqueuedTime; + this.lastEnqueuedOffset = offset; + + this.retrievalTime = Instant.now(); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/RetryPolicy.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/RetryPolicy.java new file mode 100644 index 0000000000000..abbd775ce0101 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/RetryPolicy.java @@ -0,0 +1,95 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs; + +import com.microsoft.azure.eventhubs.impl.ClientConstants; +import com.microsoft.azure.eventhubs.impl.RetryExponential; + +import java.time.Duration; +import java.util.concurrent.ConcurrentHashMap; + +// TODO: SIMPLIFY retryPolicy - ConcurrentHashMap is not needed +public abstract class RetryPolicy { + + private static final RetryPolicy NO_RETRY = new RetryExponential(Duration.ofSeconds(0), Duration.ofSeconds(0), 0, ClientConstants.NO_RETRY); + + private final String name; + private ConcurrentHashMap retryCounts; + private Object serverBusySync; + + protected RetryPolicy(final String name) { + this.name = name; + this.retryCounts = new ConcurrentHashMap(); + this.serverBusySync = new Object(); + } + + public static boolean isRetryableException(Exception exception) { + if (exception == null) { + throw new IllegalArgumentException("exception cannot be null"); + } + + if (exception instanceof EventHubException) { + return ((EventHubException) exception).getIsTransient(); + } + + return false; + } + + public static RetryPolicy getDefault() { + return new RetryExponential( + ClientConstants.DEFAULT_RETRY_MIN_BACKOFF, + ClientConstants.DEFAULT_RETRY_MAX_BACKOFF, + ClientConstants.DEFAULT_MAX_RETRY_COUNT, + ClientConstants.DEFAULT_RETRY); + } + + public static RetryPolicy getNoRetry() { + return RetryPolicy.NO_RETRY; + } + + public void incrementRetryCount(String clientId) { + Integer retryCount = this.retryCounts.get(clientId); + this.retryCounts.put(clientId, retryCount == null ? 1 : retryCount + 1); + } + + public void resetRetryCount(String clientId) { + Integer currentRetryCount = this.retryCounts.get(clientId); + if (currentRetryCount != null && currentRetryCount.intValue() != 0) { + this.retryCounts.put(clientId, 0); + } + } + + protected int getRetryCount(String clientId) { + Integer retryCount = this.retryCounts.get(clientId); + return retryCount == null ? 0 : retryCount; + } + + /** + * Gets the Interval after which nextRetry should be done. + * + * @param clientId clientId + * @param lastException lastException + * @param remainingTime remainingTime to retry + * @return returns 'null' Duration when not Allowed + */ + public Duration getNextRetryInterval(String clientId, Exception lastException, Duration remainingTime) { + int baseWaitTime = 0; + synchronized (this.serverBusySync) { + if (lastException != null && + (lastException instanceof ServerBusyException || (lastException.getCause() != null && lastException.getCause() instanceof ServerBusyException))) { + baseWaitTime += ClientConstants.SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS; + } + } + + return this.onGetNextRetryInterval(clientId, lastException, remainingTime, baseWaitTime); + } + + protected abstract Duration onGetNextRetryInterval(String clientId, Exception lastException, Duration remainingTime, int baseWaitTime); + + @Override + public String toString() { + return this.name; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ServerBusyException.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ServerBusyException.java new file mode 100644 index 0000000000000..f83d9d596dd67 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/ServerBusyException.java @@ -0,0 +1,35 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs; + +/** + * Server busy exception is thrown when the current entity's activity has put excessive load onto the service. + * When encountered this exception user should wait at least 4 seconds before any retry/runtime operations for the said entity again. + * + * @see http://go.microsoft.com/fwlink/?LinkId=761101 + */ +public class ServerBusyException extends EventHubException { + + private static final long serialVersionUID = 1L; + + /** + * Default constructor for the exception + */ + public ServerBusyException() { + super(true); + } + + public ServerBusyException(final String message) { + super(true, message); + } + + ServerBusyException(final Throwable cause) { + super(true, cause); + } + + ServerBusyException(final String message, final Throwable cause) { + super(true, message, cause); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/TimeoutException.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/TimeoutException.java new file mode 100644 index 0000000000000..6e18878c07e26 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/TimeoutException.java @@ -0,0 +1,35 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs; + +/** + * This exception is thrown when the operation has exceeded the predetermined time limit. + * User should check connectivity is healthy between client process and service. + * + * @see http://go.microsoft.com/fwlink/?LinkId=761101 + */ +public class TimeoutException extends EventHubException { + + private static final long serialVersionUID = -3505469991851121512L; + + /** + * Default constructor for exception type. + */ + public TimeoutException() { + super(true); + } + + public TimeoutException(final String message) { + super(true, message); + } + + public TimeoutException(final Throwable cause) { + super(true, cause); + } + + public TimeoutException(final String message, final Throwable cause) { + super(true, message, cause); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/TransportType.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/TransportType.java new file mode 100644 index 0000000000000..c7231280b56f9 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/TransportType.java @@ -0,0 +1,41 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs; + +/** + * All TransportType switches available for communicating to EventHubs service. + */ +public enum TransportType { + /** + * AMQP over TCP. Uses port 5671 - assigned by IANA for secure AMQP (AMQPS). + */ + AMQP("Amqp"), + + /** + * AMQP over Web Sockets. Uses port 443. + */ + AMQP_WEB_SOCKETS("AmqpWebSockets"); + + private final String value; + + TransportType(final String value) { + this.value = value; + } + + @Override + public String toString() { + return this.value; + } + + static TransportType fromString(final String value) { + for (TransportType transportType : values()) { + if (transportType.value.equalsIgnoreCase(value)) { + return transportType; + } + } + + throw new IllegalArgumentException(); + } +} \ No newline at end of file diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ActiveClientTokenManager.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ActiveClientTokenManager.java new file mode 100644 index 0000000000000..c2c339e369f72 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ActiveClientTokenManager.java @@ -0,0 +1,73 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.time.Duration; +import java.util.Locale; +import java.util.concurrent.CompletableFuture; + +final class ActiveClientTokenManager { + + private static final Logger TRACE_LOGGER = LoggerFactory.getLogger(ActiveClientTokenManager.class); + private final Object timerLock; + private final Runnable sendTokenTask; + private final ClientEntity clientEntity; + private final Duration tokenRefreshInterval; + private final SchedulerProvider schedulerProvider; + private final Timer timerScheduler; + private CompletableFuture timer; + + public ActiveClientTokenManager( + final ClientEntity clientEntity, + final Runnable sendTokenAsync, + final Duration tokenRefreshInterval, + final SchedulerProvider schedulerProvider) { + + this.sendTokenTask = sendTokenAsync; + this.clientEntity = clientEntity; + this.tokenRefreshInterval = tokenRefreshInterval; + this.timerLock = new Object(); + this.schedulerProvider = schedulerProvider; + this.timerScheduler = new Timer(schedulerProvider); + + synchronized (this.timerLock) { + this.timer = this.timerScheduler.schedule(new TimerCallback(), tokenRefreshInterval); + } + } + + public void cancel() { + + synchronized (this.timerLock) { + this.timer.cancel(false); + } + } + + private class TimerCallback implements Runnable { + + @Override + public void run() { + + if (!clientEntity.getIsClosingOrClosed()) { + + sendTokenTask.run(); + + synchronized (ActiveClientTokenManager.this.timerLock) { + ActiveClientTokenManager.this.timer = ActiveClientTokenManager.this.timerScheduler.schedule(new TimerCallback(), tokenRefreshInterval); + } + } else { + + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info( + String.format(Locale.US, + "clientEntity[%s] - closing ActiveClientLinkManager", clientEntity.getClientId())); + } + } + } + + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpConnection.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpConnection.java new file mode 100644 index 0000000000000..8713accb358d0 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpConnection.java @@ -0,0 +1,25 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import org.apache.qpid.proton.amqp.transport.ErrorCondition; +import org.apache.qpid.proton.engine.Link; + +public interface AmqpConnection { + + /** + * Host name intended to be used on Amqp Connection Open frame + * @return host name + */ + String getHostName(); + + void onOpenComplete(Exception exception); + + void onConnectionError(ErrorCondition error); + + void registerForConnectionError(Link link); + + void deregisterForConnectionError(Link link); +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpConstants.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpConstants.java new file mode 100644 index 0000000000000..8ca75a0d2692c --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpConstants.java @@ -0,0 +1,70 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import org.apache.qpid.proton.amqp.Symbol; + +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +public final class AmqpConstants { + + public static final String APACHE = "apache.org"; + public static final String VENDOR = "com.microsoft"; + public static final String AMQP_ANNOTATION_FORMAT = "amqp.annotation.%s >%s '%s'"; + public static final String OFFSET_ANNOTATION_NAME = "x-opt-offset"; + public static final String ENQUEUED_TIME_UTC_ANNOTATION_NAME = "x-opt-enqueued-time"; + public static final String PARTITION_KEY_ANNOTATION_NAME = "x-opt-partition-key"; + public static final String SEQUENCE_NUMBER_ANNOTATION_NAME = "x-opt-sequence-number"; + public static final String PUBLISHER_ANNOTATION_NAME = "x-opt-publisher"; + public static final Symbol PARTITION_KEY = Symbol.getSymbol(PARTITION_KEY_ANNOTATION_NAME); + public static final Symbol OFFSET = Symbol.getSymbol(OFFSET_ANNOTATION_NAME); + public static final Symbol SEQUENCE_NUMBER = Symbol.getSymbol(SEQUENCE_NUMBER_ANNOTATION_NAME); + public static final Symbol ENQUEUED_TIME_UTC = Symbol.getSymbol(ENQUEUED_TIME_UTC_ANNOTATION_NAME); + public static final Symbol STRING_FILTER = Symbol.valueOf(APACHE + ":selector-filter:string"); + public static final Symbol EPOCH = Symbol.valueOf(VENDOR + ":epoch"); + public static final Symbol PRODUCT = Symbol.valueOf("product"); + public static final Symbol VERSION = Symbol.valueOf("version"); + public static final Symbol PLATFORM = Symbol.valueOf("platform"); + public static final Symbol FRAMEWORK = Symbol.valueOf("framework"); + public static final Symbol USER_AGENT = Symbol.valueOf("user-agent"); + public static final int MAX_USER_AGENT_LENGTH = 128; + public static final int AMQP_BATCH_MESSAGE_FORMAT = 0x80013700; // 2147563264L; + public static final int MAX_FRAME_SIZE = 65536; + public static final String AMQP_PROPERTY_MESSAGE_ID = "message-id"; + public static final String AMQP_PROPERTY_USER_ID = "user-id"; + public static final String AMQP_PROPERTY_TO = "to"; + public static final String AMQP_PROPERTY_SUBJECT = "subject"; + public static final String AMQP_PROPERTY_REPLY_TO = "reply-to"; + public static final String AMQP_PROPERTY_CORRELATION_ID = "correlation-id"; + public static final String AMQP_PROPERTY_CONTENT_TYPE = "content-type"; + public static final String AMQP_PROPERTY_CONTENT_ENCODING = "content-encoding"; + public static final String AMQP_PROPERTY_ABSOLUTE_EXPRITY_TIME = "absolute-expiry-time"; + public static final String AMQP_PROPERTY_CREATION_TIME = "creation-time"; + public static final String AMQP_PROPERTY_GROUP_ID = "group-id"; + public static final String AMQP_PROPERTY_GROUP_SEQUENCE = "group-sequence"; + public static final String AMQP_PROPERTY_REPLY_TO_GROUP_ID = "reply-to-group-id"; + @SuppressWarnings("serial") + public static final Set RESERVED_PROPERTY_NAMES = Collections.unmodifiableSet(new HashSet() {{ + add(AMQP_PROPERTY_MESSAGE_ID); + add(AMQP_PROPERTY_USER_ID); + add(AMQP_PROPERTY_TO); + add(AMQP_PROPERTY_SUBJECT); + add(AMQP_PROPERTY_REPLY_TO); + add(AMQP_PROPERTY_CORRELATION_ID); + add(AMQP_PROPERTY_CONTENT_TYPE); + add(AMQP_PROPERTY_CONTENT_ENCODING); + add(AMQP_PROPERTY_ABSOLUTE_EXPRITY_TIME); + add(AMQP_PROPERTY_CREATION_TIME); + add(AMQP_PROPERTY_GROUP_ID); + add(AMQP_PROPERTY_GROUP_SEQUENCE); + add(AMQP_PROPERTY_REPLY_TO_GROUP_ID); + }}); + public static final Symbol ENABLE_RECEIVER_RUNTIME_METRIC_NAME = Symbol.valueOf(VENDOR + ":enable-receiver-runtime-metric"); + public static final Symbol RECEIVER_IDENTIFIER_NAME = Symbol.valueOf(AmqpConstants.VENDOR + ":receiver-name"); + private AmqpConstants() { + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpErrorCode.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpErrorCode.java new file mode 100644 index 0000000000000..79cd3af28b505 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpErrorCode.java @@ -0,0 +1,30 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import org.apache.qpid.proton.amqp.Symbol; + +public final class AmqpErrorCode { + + public static final Symbol NotFound = Symbol.getSymbol("amqp:not-found"); + public static final Symbol UnauthorizedAccess = Symbol.getSymbol("amqp:unauthorized-access"); + public static final Symbol ResourceLimitExceeded = Symbol.getSymbol("amqp:resource-limit-exceeded"); + public static final Symbol NotAllowed = Symbol.getSymbol("amqp:not-allowed"); + public static final Symbol InternalError = Symbol.getSymbol("amqp:internal-error"); + public static final Symbol IllegalState = Symbol.getSymbol("amqp:illegal-state"); + public static final Symbol NotImplemented = Symbol.getSymbol("amqp:not-implemented"); + + // link errors + public static final Symbol Stolen = Symbol.getSymbol("amqp:link:stolen"); + public static final Symbol PayloadSizeExceeded = Symbol.getSymbol("amqp:link:message-size-exceeded"); + public static final Symbol AmqpLinkDetachForced = Symbol.getSymbol("amqp:link:detach-forced"); + + // connection errors + public static final Symbol ConnectionForced = Symbol.getSymbol("amqp:connection:forced"); + + // proton library introduced this amqpsymbol in their code-base to communicate IOExceptions + // while performing operations on SocketChannel (in IOHandler.java) + public static final Symbol PROTON_IO_ERROR = Symbol.getSymbol("proton:io"); +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpException.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpException.java new file mode 100644 index 0000000000000..fb9da08eab0cd --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpException.java @@ -0,0 +1,24 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import org.apache.qpid.proton.amqp.transport.ErrorCondition; + +/** + * All AmqpExceptions - which EventHub client handles internally. + */ +public class AmqpException extends Exception { + private static final long serialVersionUID = -750417419234273714L; + private ErrorCondition errorCondition; + + public AmqpException(ErrorCondition errorCondition) { + super(errorCondition.getDescription()); + this.errorCondition = errorCondition; + } + + public ErrorCondition getError() { + return this.errorCondition; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpLink.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpLink.java new file mode 100644 index 0000000000000..70e77ee07d78e --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpLink.java @@ -0,0 +1,18 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import org.apache.qpid.proton.amqp.transport.ErrorCondition; + +public interface AmqpLink { + /** + * @param completionException completionException=null if open is successful + */ + void onOpenComplete(Exception completionException); + + void onError(Exception exception); + + void onClose(ErrorCondition condition); +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpReceiver.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpReceiver.java new file mode 100644 index 0000000000000..2273f2ab38649 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpReceiver.java @@ -0,0 +1,11 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import org.apache.qpid.proton.engine.Delivery; + +public interface AmqpReceiver extends AmqpLink { + void onReceiveComplete(Delivery delivery); +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpResponseCode.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpResponseCode.java new file mode 100644 index 0000000000000..4915cf3188c00 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpResponseCode.java @@ -0,0 +1,40 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import java.util.HashMap; +import java.util.Map; + +public enum AmqpResponseCode { + ACCEPTED(0xca), + OK(200), + BAD_REQUEST(400), + NOT_FOUND(0x194), + FORBIDDEN(0x193), + INTERNAL_SERVER_ERROR(500), + UNAUTHORIZED(0x191); + + private static Map valueMap = new HashMap<>(); + + static { + for (AmqpResponseCode code : AmqpResponseCode.values()) { + valueMap.put(code.value, code); + } + } + + private final int value; + + private AmqpResponseCode(final int value) { + this.value = value; + } + + public static AmqpResponseCode valueOf(final int value) { + return valueMap.get(value); + } + + public int getValue() { + return this.value; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpSender.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpSender.java new file mode 100644 index 0000000000000..cc57087363857 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpSender.java @@ -0,0 +1,13 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import org.apache.qpid.proton.engine.Delivery; + +public interface AmqpSender extends AmqpLink { + void onFlow(final int creditIssued); + + void onSendComplete(final Delivery delivery); +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpUtil.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpUtil.java new file mode 100644 index 0000000000000..ab9861ab960a9 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/AmqpUtil.java @@ -0,0 +1,127 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import org.apache.qpid.proton.amqp.Binary; +import org.apache.qpid.proton.amqp.Symbol; +import org.apache.qpid.proton.amqp.messaging.AmqpValue; +import org.apache.qpid.proton.amqp.messaging.ApplicationProperties; +import org.apache.qpid.proton.amqp.messaging.Data; +import org.apache.qpid.proton.amqp.messaging.MessageAnnotations; +import org.apache.qpid.proton.message.Message; + +import java.util.Locale; + +public final class AmqpUtil { + + private AmqpUtil() { + } + + private static int getPayloadSize(Message msg) { + + if (msg == null || msg.getBody() == null) { + return 0; + } + + if (msg.getBody() instanceof Data) { + final Data payloadSection = (Data) msg.getBody(); + if (payloadSection == null) { + return 0; + } + + final Binary payloadBytes = payloadSection.getValue(); + if (payloadBytes == null) { + return 0; + } + + return payloadBytes.getLength(); + } + + if (msg.getBody() instanceof AmqpValue) { + final AmqpValue amqpValue = (AmqpValue) msg.getBody(); + if (amqpValue == null) { + return 0; + } + + return amqpValue.getValue().toString().length() * 2; + } + + return 0; + } + + public static int getDataSerializedSize(Message amqpMessage) { + + if (amqpMessage == null) { + return 0; + } + + int payloadSize = getPayloadSize(amqpMessage); + + // EventData - accepts only PartitionKey - which is a String & stuffed into MessageAnnotation + final MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations(); + final ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties(); + + int annotationsSize = 0; + int applicationPropertiesSize = 0; + + if (messageAnnotations != null) { + for (Symbol value : messageAnnotations.getValue().keySet()) { + annotationsSize += sizeof(value); + } + + for (Object value : messageAnnotations.getValue().values()) { + annotationsSize += sizeof(value); + } + } + + if (applicationProperties != null) { + for (Object value : applicationProperties.getValue().keySet()) { + applicationPropertiesSize += sizeof(value); + } + + for (Object value : applicationProperties.getValue().values()) { + applicationPropertiesSize += sizeof(value); + } + } + + return annotationsSize + applicationPropertiesSize + payloadSize; + } + + private static int sizeof(Object obj) { + if (obj instanceof String) { + return obj.toString().length() << 1; + } + + if (obj instanceof Symbol) { + return ((Symbol) obj).length() << 1; + } + + if (obj instanceof Integer) { + return Integer.BYTES; + } + + if (obj instanceof Long) { + return Long.BYTES; + } + + if (obj instanceof Short) { + return Short.BYTES; + } + + if (obj instanceof Character) { + return Character.BYTES; + } + + if (obj instanceof Float) { + return Float.BYTES; + } + + if (obj instanceof Double) { + return Double.BYTES; + } + + throw new IllegalArgumentException(String.format(Locale.US, "Encoding Type: %s is not supported", obj.getClass())); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/BaseLinkHandler.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/BaseLinkHandler.java new file mode 100644 index 0000000000000..4386bd3ad8310 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/BaseLinkHandler.java @@ -0,0 +1,111 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import org.apache.qpid.proton.amqp.transport.ErrorCondition; +import org.apache.qpid.proton.engine.*; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class BaseLinkHandler extends BaseHandler { + protected static final Logger TRACE_LOGGER = LoggerFactory.getLogger(BaseLinkHandler.class); + + private final AmqpLink underlyingEntity; + + public BaseLinkHandler(final AmqpLink amqpLink) { + this.underlyingEntity = amqpLink; + } + + @Override + public void onLinkLocalClose(Event event) { + final Link link = event.getLink(); + final ErrorCondition condition = link.getCondition(); + + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info(String.format("onLinkLocalClose linkName[%s], errorCondition[%s], errorDescription[%s]", + link.getName(), + condition != null ? condition.getCondition() : "n/a", + condition != null ? condition.getDescription() : "n/a")); + } + + closeSession(link, link.getCondition()); + } + + @Override + public void onLinkRemoteClose(Event event) { + final Link link = event.getLink(); + final ErrorCondition condition = link.getCondition(); + + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info(String.format("onLinkRemoteClose linkName[%s], errorCondition[%s], errorDescription[%s]", + link.getName(), + condition != null ? condition.getCondition() : "n/a", + condition != null ? condition.getDescription() : "n/a")); + } + + handleRemoteLinkClosed(event); + } + + @Override + public void onLinkRemoteDetach(Event event) { + final Link link = event.getLink(); + final ErrorCondition condition = link.getCondition(); + + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info(String.format("onLinkRemoteDetach linkName[%s], errorCondition[%s], errorDescription[%s]", + link.getName(), + condition != null ? condition.getCondition() : "n/a", + condition != null ? condition.getDescription() : "n/a")); + } + + handleRemoteLinkClosed(event); + } + + public void processOnClose(Link link, ErrorCondition condition) { + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info(String.format("processOnClose linkName[%s], errorCondition[%s], errorDescription[%s]", + link.getName(), + condition != null ? condition.getCondition() : "n/a", + condition != null ? condition.getDescription() : "n/a")); + } + + this.underlyingEntity.onClose(condition); + } + + public void processOnClose(Link link, Exception exception) { + this.underlyingEntity.onError(exception); + } + + private void closeSession(Link link, ErrorCondition condition) { + final Session session = link.getSession(); + + if (session != null && session.getLocalState() != EndpointState.CLOSED) { + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info(String.format("closeSession for linkName[%s], errorCondition[%s], errorDescription[%s]", + link.getName(), + condition != null ? condition.getCondition() : "n/a", + condition != null ? condition.getDescription() : "n/a")); + } + + session.setCondition(condition); + session.close(); + } + } + + private void handleRemoteLinkClosed(final Event event) { + final Link link = event.getLink(); + + final ErrorCondition condition = link.getRemoteCondition(); + + if (link.getLocalState() != EndpointState.CLOSED) { + link.setCondition(condition); + link.close(); + } + + this.processOnClose(link, condition); + + this.closeSession(link, condition); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/CBSChannel.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/CBSChannel.java new file mode 100644 index 0000000000000..cbc0aa56378fe --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/CBSChannel.java @@ -0,0 +1,89 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import org.apache.qpid.proton.Proton; +import org.apache.qpid.proton.amqp.messaging.AmqpValue; +import org.apache.qpid.proton.amqp.messaging.ApplicationProperties; +import org.apache.qpid.proton.message.Message; + +import java.util.HashMap; +import java.util.Map; + +final class CBSChannel { + + final FaultTolerantObject innerChannel; + final SessionProvider sessionProvider; + final AmqpConnection connectionEventDispatcher; + + public CBSChannel( + final SessionProvider sessionProvider, + final AmqpConnection connection) { + + this.sessionProvider = sessionProvider; + this.connectionEventDispatcher = connection; + + RequestResponseCloser closer = new RequestResponseCloser(); + this.innerChannel = new FaultTolerantObject<>( + new RequestResponseOpener(sessionProvider, "cbs-session", "cbs", ClientConstants.CBS_ADDRESS, connection), + closer); + closer.setInnerChannel(this.innerChannel); + } + + public void sendToken( + final ReactorDispatcher dispatcher, + final String token, + final String tokenAudience, + final OperationResult sendTokenCallback) { + + final Message request = Proton.message(); + final Map properties = new HashMap<>(); + properties.put(ClientConstants.PUT_TOKEN_OPERATION, ClientConstants.PUT_TOKEN_OPERATION_VALUE); + properties.put(ClientConstants.PUT_TOKEN_TYPE, ClientConstants.SAS_TOKEN_TYPE); + properties.put(ClientConstants.PUT_TOKEN_AUDIENCE, tokenAudience); + final ApplicationProperties applicationProperties = new ApplicationProperties(properties); + request.setApplicationProperties(applicationProperties); + request.setBody(new AmqpValue(token)); + + this.innerChannel.runOnOpenedObject(dispatcher, + new OperationResult() { + @Override + public void onComplete(final RequestResponseChannel result) { + result.request(request, + new OperationResult() { + @Override + public void onComplete(final Message response) { + + final int statusCode = (int) response.getApplicationProperties().getValue().get(ClientConstants.PUT_TOKEN_STATUS_CODE); + final String statusDescription = (String) response.getApplicationProperties().getValue().get(ClientConstants.PUT_TOKEN_STATUS_DESCRIPTION); + + if (statusCode == AmqpResponseCode.ACCEPTED.getValue() || statusCode == AmqpResponseCode.OK.getValue()) { + sendTokenCallback.onComplete(null); + } else { + this.onError(ExceptionUtil.amqpResponseCodeToException(statusCode, statusDescription)); + } + } + + @Override + public void onError(final Exception error) { + sendTokenCallback.onError(error); + } + }); + } + + @Override + public void onError(Exception error) { + sendTokenCallback.onError(error); + } + }); + } + + public void close( + final ReactorDispatcher reactorDispatcher, + final OperationResult closeCallback) { + + this.innerChannel.close(reactorDispatcher, closeCallback); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ClientConstants.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ClientConstants.java new file mode 100644 index 0000000000000..80ab06af36d0e --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ClientConstants.java @@ -0,0 +1,118 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import org.apache.qpid.proton.amqp.Symbol; + +import java.time.Duration; + +public final class ClientConstants { + public final static int AMQPS_PORT = 5671; + public final static int HTTPS_PORT = 443; + public final static int MAX_PARTITION_KEY_LENGTH = 128; + public final static Symbol SERVER_BUSY_ERROR = Symbol.getSymbol(AmqpConstants.VENDOR + ":server-busy"); + public final static Symbol ARGUMENT_ERROR = Symbol.getSymbol(AmqpConstants.VENDOR + ":argument-error"); + public final static Symbol ARGUMENT_OUT_OF_RANGE_ERROR = Symbol.getSymbol(AmqpConstants.VENDOR + ":argument-out-of-range"); + public final static Symbol ENTITY_DISABLED_ERROR = Symbol.getSymbol(AmqpConstants.VENDOR + ":entity-disabled"); + public final static Symbol PARTITION_NOT_OWNED_ERROR = Symbol.getSymbol(AmqpConstants.VENDOR + ":partition-not-owned"); + public final static Symbol STORE_LOCK_LOST_ERROR = Symbol.getSymbol(AmqpConstants.VENDOR + ":store-lock-lost"); + public final static Symbol PUBLISHER_REVOKED_ERROR = Symbol.getSymbol(AmqpConstants.VENDOR + ":publisher-revoked"); + public final static Symbol TIMEOUT_ERROR = Symbol.getSymbol(AmqpConstants.VENDOR + ":timeout"); + public final static Symbol TRACKING_ID_PROPERTY = Symbol.getSymbol(AmqpConstants.VENDOR + ":tracking-id"); + public static final int MAX_MESSAGE_LENGTH_BYTES = 256 * 1024; + public static final int MAX_FRAME_SIZE_BYTES = 64 * 1024; + public static final int MAX_EVENTHUB_AMQP_HEADER_SIZE_BYTES = 512; + public final static Duration TIMER_TOLERANCE = Duration.ofSeconds(1); + public final static Duration DEFAULT_RETRY_MIN_BACKOFF = Duration.ofSeconds(0); + public final static Duration DEFAULT_RETRY_MAX_BACKOFF = Duration.ofSeconds(30); + public final static Duration TOKEN_REFRESH_INTERVAL = Duration.ofMinutes(10); // renew every 10 mins, which expires 20 mins + public final static Duration TOKEN_VALIDITY = Duration.ofMinutes(20); + public final static int DEFAULT_MAX_RETRY_COUNT = 10; + public final static boolean DEFAULT_IS_TRANSIENT = true; + public final static int REACTOR_IO_POLL_TIMEOUT = 20; + public final static int SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS = 4; + public final static int MGMT_CHANNEL_MIN_RETRY_IN_MILLIS = 5; + public final static String NO_RETRY = "NoRetry"; + public final static String DEFAULT_RETRY = "Default"; + public final static String PRODUCT_NAME = "MSJavaClient"; + public final static String CURRENT_JAVACLIENT_VERSION = "2.0.0"; + public static final String PLATFORM_INFO = getPlatformInfo(); + public static final String FRAMEWORK_INFO = getFrameworkInfo(); + public static final String CBS_ADDRESS = "$cbs"; + public static final String PUT_TOKEN_OPERATION = "operation"; + public static final String PUT_TOKEN_OPERATION_VALUE = "put-token"; + public static final String PUT_TOKEN_TYPE = "type"; + public static final String SAS_TOKEN_TYPE = "servicebus.windows.net:sastoken"; + public static final String PUT_TOKEN_AUDIENCE = "name"; + public static final String PUT_TOKEN_EXPIRY = "expiration"; + public static final String PUT_TOKEN_STATUS_CODE = "status-code"; + public static final String PUT_TOKEN_STATUS_DESCRIPTION = "status-description"; + public static final String MANAGEMENT_ADDRESS = "$management"; + public static final String MANAGEMENT_EVENTHUB_ENTITY_TYPE = AmqpConstants.VENDOR + ":eventhub"; + public static final String MANAGEMENT_PARTITION_ENTITY_TYPE = AmqpConstants.VENDOR + ":partition"; + public static final String MANAGEMENT_OPERATION_KEY = "operation"; + public static final String READ_OPERATION_VALUE = "READ"; + public static final String MANAGEMENT_ENTITY_TYPE_KEY = "type"; + public static final String MANAGEMENT_ENTITY_NAME_KEY = "name"; + public static final String MANAGEMENT_PARTITION_NAME_KEY = "partition"; + public static final String MANAGEMENT_SECURITY_TOKEN_KEY = "security_token"; + public static final String MANAGEMENT_RESULT_PARTITION_IDS = "partition_ids"; + public static final String MANAGEMENT_RESULT_PARTITION_COUNT = "partition_count"; + public static final String MANAGEMENT_RESULT_CREATED_AT = "created_at"; + public static final String MANAGEMENT_RESULT_BEGIN_SEQUENCE_NUMBER = "begin_sequence_number"; + public static final String MANAGEMENT_RESULT_LAST_ENQUEUED_SEQUENCE_NUMBER = "last_enqueued_sequence_number"; + public static final String MANAGEMENT_RESULT_LAST_ENQUEUED_OFFSET = "last_enqueued_offset"; + public static final String MANAGEMENT_RESULT_LAST_ENQUEUED_TIME_UTC = "last_enqueued_time_utc"; + public static final String MANAGEMENT_RESULT_PARTITION_IS_EMPTY = "is_partition_empty"; + public static final String MANAGEMENT_STATUS_CODE_KEY = "status-code"; + public static final String MANAGEMENT_STATUS_DESCRIPTION_KEY = "status-description"; + public static final String MANAGEMENT_RESPONSE_ERROR_CONDITION = "error-condition"; + public static final Symbol LAST_ENQUEUED_SEQUENCE_NUMBER = Symbol.valueOf(MANAGEMENT_RESULT_LAST_ENQUEUED_SEQUENCE_NUMBER); + public static final Symbol LAST_ENQUEUED_OFFSET = Symbol.valueOf(MANAGEMENT_RESULT_LAST_ENQUEUED_OFFSET); + public static final Symbol LAST_ENQUEUED_TIME_UTC = Symbol.valueOf(MANAGEMENT_RESULT_LAST_ENQUEUED_TIME_UTC); + public static final String AMQP_REQUEST_FAILED_ERROR = "status-code: %s, status-description: %s"; + public static final String TOKEN_AUDIENCE_FORMAT = "amqp://%s/%s"; + public static final String HTTPS_URI_FORMAT = "https://%s:%s"; + public static final int MAX_RECEIVER_NAME_LENGTH = 64; + + /** + * This is a constant defined to represent the start of a partition stream in EventHub. + */ + public static final String START_OF_STREAM = "-1"; + /** + * This is a constant defined to represent the current end of a partition stream in EventHub. + * This can be used as an offset argument in receiver creation to start receiving from the latest + * event, instead of a specific offset or point in time. + */ + static final String END_OF_STREAM = "@latest"; + + private ClientConstants() { + } + + private static String getPlatformInfo() { + final StringBuilder platformInfo = new StringBuilder(); + platformInfo.append("arch:"); + platformInfo.append(System.getProperty("os.arch")); + platformInfo.append(";os:"); + platformInfo.append(System.getProperty("os.name")); + platformInfo.append(";os version:"); + platformInfo.append(System.getProperty("os.version")); + + return platformInfo.toString(); + } + + private static String getFrameworkInfo() { + final Package javaRuntimeClassPkg = Runtime.class.getPackage(); + final StringBuilder frameworkInfo = new StringBuilder(); + frameworkInfo.append("jre:"); + frameworkInfo.append(javaRuntimeClassPkg.getImplementationVersion()); + frameworkInfo.append(";vendor:"); + frameworkInfo.append(javaRuntimeClassPkg.getImplementationVendor()); + frameworkInfo.append(";jvm"); + frameworkInfo.append(System.getProperty("java.vm.version")); + + return frameworkInfo.toString(); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ClientEntity.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ClientEntity.java new file mode 100644 index 0000000000000..1f760fa2fc75c --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ClientEntity.java @@ -0,0 +1,122 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import com.microsoft.azure.eventhubs.EventHubException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Locale; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ScheduledExecutorService; + +/** + * Contract for all client entities with Open-Close/Abort state m/c + * main-purpose: closeAll related entities + * Internal-class + */ +abstract class ClientEntity { + + private static final Logger TRACE_LOGGER = LoggerFactory.getLogger(ClientEntity.class); + protected final ScheduledExecutorService executor; + private final String clientId; + private final Object syncClose; + private final ClientEntity parent; + private CompletableFuture closeTask; + private boolean isClosing; + private boolean isClosed; + + protected ClientEntity(final String clientId, final ClientEntity parent, final ScheduledExecutorService executor) { + this.clientId = clientId; + this.parent = parent; + this.executor = executor; + + this.syncClose = new Object(); + } + + protected abstract CompletableFuture onClose(); + + public String getClientId() { + return this.clientId; + } + + boolean getIsClosed() { + final boolean isParentClosed = this.parent != null && this.parent.getIsClosed(); + synchronized (this.syncClose) { + return isParentClosed || this.isClosed; + } + } + + // returns true even if the Parent is (being) Closed + boolean getIsClosingOrClosed() { + final boolean isParentClosingOrClosed = this.parent != null && this.parent.getIsClosingOrClosed(); + synchronized (this.syncClose) { + return isParentClosingOrClosed || this.isClosing || this.isClosed; + } + } + + // used to force close when entity is faulted + protected final void setClosed() { + synchronized (this.syncClose) { + this.isClosed = true; + } + } + + public final CompletableFuture close() { + synchronized (this.syncClose) { + if (this.isClosed || this.isClosing) + return this.closeTask == null ? CompletableFuture.completedFuture(null) : this.closeTask; + + this.isClosing = true; + } + + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info("close: clientId[" + this.clientId + "]"); + } + + this.closeTask = this.onClose().thenRunAsync(new Runnable() { + @Override + public void run() { + synchronized (ClientEntity.this.syncClose) { + ClientEntity.this.isClosing = false; + ClientEntity.this.isClosed = true; + } + } + }, this.executor); + + return this.closeTask; + } + + public final void closeSync() throws EventHubException { + try { + this.close().get(); + } catch (InterruptedException | ExecutionException exception) { + if (exception instanceof InterruptedException) { + // Re-assert the thread's interrupted status + Thread.currentThread().interrupt(); + } + + final Throwable throwable = exception.getCause(); + if (throwable instanceof RuntimeException) { + throw (RuntimeException) throwable; + } else if (throwable instanceof EventHubException) { + throw (EventHubException) throwable; + } else { + throw new RuntimeException(throwable != null ? throwable : exception); + } + } + } + + protected final void throwIfClosed() { + if (this.getIsClosingOrClosed()) { + throw new IllegalStateException(String.format(Locale.US, "Operation not allowed after the %s instance is Closed.", this.getClass().getName()), this.getLastKnownError()); + } + } + + protected Exception getLastKnownError() { + return null; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ConnectionHandler.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ConnectionHandler.java new file mode 100644 index 0000000000000..1e13d52c6a658 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ConnectionHandler.java @@ -0,0 +1,280 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import com.microsoft.azure.eventhubs.TransportType; +import org.apache.qpid.proton.Proton; +import org.apache.qpid.proton.amqp.Symbol; +import org.apache.qpid.proton.amqp.transport.ErrorCondition; +import org.apache.qpid.proton.engine.*; +import org.apache.qpid.proton.engine.impl.TransportInternal; +import org.apache.qpid.proton.reactor.Handshaker; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +// ServiceBus <-> ProtonReactor interaction handles all +// amqp_connection/transport related events from reactor +public class ConnectionHandler extends BaseHandler { + + private static final Logger TRACE_LOGGER = LoggerFactory.getLogger(ConnectionHandler.class); + + private final AmqpConnection amqpConnection; + + protected ConnectionHandler(final AmqpConnection amqpConnection) { + + add(new Handshaker()); + this.amqpConnection = amqpConnection; + } + + static ConnectionHandler create(TransportType transportType, AmqpConnection amqpConnection) { + switch (transportType) { + case AMQP_WEB_SOCKETS: + if (WebSocketProxyConnectionHandler.shouldUseProxy(amqpConnection.getHostName())) { + return new WebSocketProxyConnectionHandler(amqpConnection); + } else { + return new WebSocketConnectionHandler(amqpConnection); + } + case AMQP: + default: + return new ConnectionHandler(amqpConnection); + } + } + + private static SslDomain makeDomain(SslDomain.Mode mode) { + + final SslDomain domain = Proton.sslDomain(); + domain.init(mode); + + // TODO: VERIFY_PEER_NAME support + domain.setPeerAuthentication(SslDomain.VerifyMode.ANONYMOUS_PEER); + return domain; + } + + protected AmqpConnection getAmqpConnection() { + return this.amqpConnection; + } + + @Override + public void onConnectionInit(Event event) { + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info(String.format(Locale.US, "onConnectionInit hostname[%s]", this.amqpConnection.getHostName())); + } + + final Connection connection = event.getConnection(); + final String hostName = new StringBuilder(this.amqpConnection.getHostName()) + .append(":") + .append(String.valueOf(this.getProtocolPort())) + .toString(); + + connection.setHostname(hostName); + connection.setContainer(StringUtil.getRandomString()); + + final Map connectionProperties = new HashMap<>(); + connectionProperties.put(AmqpConstants.PRODUCT, ClientConstants.PRODUCT_NAME); + connectionProperties.put(AmqpConstants.VERSION, ClientConstants.CURRENT_JAVACLIENT_VERSION); + connectionProperties.put(AmqpConstants.PLATFORM, ClientConstants.PLATFORM_INFO); + connectionProperties.put(AmqpConstants.FRAMEWORK, ClientConstants.FRAMEWORK_INFO); + + final String userAgent = EventHubClientImpl.USER_AGENT; + if (userAgent != null) { + connectionProperties.put(AmqpConstants.USER_AGENT, userAgent.length() < AmqpConstants.MAX_USER_AGENT_LENGTH ? + userAgent : + userAgent.substring(0, AmqpConstants.MAX_USER_AGENT_LENGTH)); + } + + connection.setProperties(connectionProperties); + connection.open(); + } + + protected void addTransportLayers(final Event event, final TransportInternal transport) { + final SslDomain domain = makeDomain(SslDomain.Mode.CLIENT); + transport.ssl(domain); + } + + protected void notifyTransportErrors(final Event event) { + // no-op + } + + /** + * HostName to be used for socket creation. + * for ex: in case of proxy server - this could be proxy ip address + * + * @return host name + */ + public String getRemoteHostName() { + return amqpConnection.getHostName(); + } + + /** + * port used to create socket. + * for ex: in case of talking to event hubs service via proxy - use proxy port + * + * @return port + */ + protected int getRemotePort() { + return this.getProtocolPort(); + } + + /** + * Port used on connection open frame + * + * @return port + */ + protected int getProtocolPort() { + return ClientConstants.AMQPS_PORT; + } + + protected int getMaxFrameSize() { + return AmqpConstants.MAX_FRAME_SIZE; + } + + @Override + public void onConnectionBound(Event event) { + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info(String.format(Locale.US, "onConnectionBound hostname[%s]", this.amqpConnection.getHostName())); + } + + final Transport transport = event.getTransport(); + + this.addTransportLayers(event, (TransportInternal) transport); + } + + @Override + public void onConnectionUnbound(Event event) { + + final Connection connection = event.getConnection(); + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info(String.format(Locale.US, "onConnectionUnbound: hostname[%s], state[%s], remoteState[%s]", + connection.getHostname(), connection.getLocalState(), connection.getRemoteState())); + } + + // if failure happened while establishing transport - nothing to free up. + if (connection.getRemoteState() != EndpointState.UNINITIALIZED) + connection.free(); + } + + @Override + public void onTransportError(Event event) { + + final Connection connection = event.getConnection(); + final Transport transport = event.getTransport(); + final ErrorCondition condition = transport.getCondition(); + + if (TRACE_LOGGER.isWarnEnabled()) { + TRACE_LOGGER.warn(String.format(Locale.US, "onTransportError: hostname[%s], error[%s]", + connection != null ? connection.getHostname() : "n/a", + condition != null ? condition.getDescription() : "n/a")); + } + + if (connection != null && connection.getRemoteState() != EndpointState.CLOSED) { + // if the remote-peer abruptly closes the connection without issuing close frame + // issue one + this.amqpConnection.onConnectionError(condition); + } + + // onTransportError event is not handled by the global IO Handler for cleanup + transport.unbind(); + + this.notifyTransportErrors(event); + } + + @Override + public void onTransportClosed(Event event) { + + final Connection connection = event.getConnection(); + final Transport transport = event.getTransport(); + final ErrorCondition condition = transport.getCondition(); + + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info(String.format(Locale.US, "onTransportClosed: hostname[%s], error[%s]", + connection != null ? connection.getHostname() : "n/a", (condition != null ? condition.getDescription() : "n/a"))); + } + + if (connection != null && connection.getRemoteState() != EndpointState.CLOSED) { + // if the remote-peer abruptly closes the connection without issuing close frame + // issue one + this.amqpConnection.onConnectionError(condition); + } + } + + @Override + public void onConnectionLocalOpen(Event event) { + final Connection connection = event.getConnection(); + final ErrorCondition error = connection.getCondition(); + + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info(String.format(Locale.US, "onConnectionLocalOpen: hostname[%s], errorCondition[%s], errorDescription[%s]", + connection.getHostname(), + error != null ? error.getCondition() : "n/a", + error != null ? error.getDescription() : "n/a")); + } + } + + @Override + public void onConnectionRemoteOpen(Event event) { + + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info(String.format(Locale.US, "onConnectionRemoteOpen: hostname[%s], remoteContainer[%s]", + event.getConnection().getHostname(), event.getConnection().getRemoteContainer())); + } + + this.amqpConnection.onOpenComplete(null); + } + + @Override + public void onConnectionLocalClose(Event event) { + + final Connection connection = event.getConnection(); + final ErrorCondition error = connection.getCondition(); + + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info(String.format(Locale.US, "onConnectionLocalClose: hostname[%s], errorCondition[%s], errorDescription[%s]", + connection.getHostname(), + error != null ? error.getCondition() : "n/a", + error != null ? error.getDescription() : "n/a")); + } + + if (connection.getRemoteState() == EndpointState.CLOSED) { + // This means that the CLOSE origin is Service + final Transport transport = connection.getTransport(); + if (transport != null) { + transport.unbind(); // we proactively dispose IO even if service fails to close + } + } + } + + @Override + public void onConnectionRemoteClose(Event event) { + + final Connection connection = event.getConnection(); + final ErrorCondition error = connection.getRemoteCondition(); + + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info(String.format(Locale.US, "onConnectionRemoteClose: hostname[%s], errorCondition[%s], errorDescription[%s]", + connection.getHostname(), + error != null ? error.getCondition() : "n/a", + error != null ? error.getDescription() : "n/a")); + } + + this.amqpConnection.onConnectionError(error); + } + + @Override + public void onConnectionFinal(Event event) { + final Connection connection = event.getConnection(); + final ErrorCondition error = connection.getCondition(); + + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info(String.format(Locale.US, "onConnectionFinal: hostname[%s], errorCondition[%s], errorDescription[%s]", + connection.getHostname(), + error != null ? error.getCondition() : "n/a", + error != null ? error.getDescription() : "n/a")); + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/CustomIOHandler.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/CustomIOHandler.java new file mode 100644 index 0000000000000..c091c00be1fcc --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/CustomIOHandler.java @@ -0,0 +1,30 @@ +package com.microsoft.azure.eventhubs.impl; + +import org.apache.qpid.proton.engine.Connection; +import org.apache.qpid.proton.engine.Event; +import org.apache.qpid.proton.engine.Transport; +import org.apache.qpid.proton.reactor.impl.IOHandler; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Locale; + +public class CustomIOHandler extends IOHandler { + + private static final Logger TRACE_LOGGER = LoggerFactory.getLogger(CustomIOHandler.class); + + @Override + public void onTransportClosed(Event event) { + final Transport transport = event.getTransport(); + final Connection connection = event.getConnection(); + + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info(String.format(Locale.US, "onTransportClosed hostname[%s]", + (connection != null ? connection.getHostname() : "n/a"))); + } + + if (transport != null && connection != null && connection.getTransport() != null) { + transport.unbind(); + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/DispatchHandler.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/DispatchHandler.java new file mode 100644 index 0000000000000..67c84a0aee7fd --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/DispatchHandler.java @@ -0,0 +1,13 @@ +package com.microsoft.azure.eventhubs.impl; + +import org.apache.qpid.proton.engine.BaseHandler; +import org.apache.qpid.proton.engine.Event; + +public abstract class DispatchHandler extends BaseHandler { + @Override + public void onTimerTask(Event e) { + this.onEvent(); + } + + public abstract void onEvent(); +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ErrorContextProvider.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ErrorContextProvider.java new file mode 100644 index 0000000000000..95468a02a6f43 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ErrorContextProvider.java @@ -0,0 +1,11 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import com.microsoft.azure.eventhubs.ErrorContext; + +interface ErrorContextProvider { + ErrorContext getContext(); +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventDataBatchImpl.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventDataBatchImpl.java new file mode 100644 index 0000000000000..1f138df8d414e --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventDataBatchImpl.java @@ -0,0 +1,92 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import com.microsoft.azure.eventhubs.EventData; +import com.microsoft.azure.eventhubs.EventDataBatch; +import com.microsoft.azure.eventhubs.PayloadSizeExceededException; +import org.apache.qpid.proton.message.Message; + +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; + +final class EventDataBatchImpl implements EventDataBatch { + + private final int maxMessageSize; + private final String partitionKey; + private final List events; + private final byte[] eventBytes; + private int currentSize = 0; + + EventDataBatchImpl(final int maxMessageSize, final String partitionKey) { + + this.maxMessageSize = maxMessageSize; + this.partitionKey = partitionKey; + this.events = new LinkedList<>(); + this.currentSize = (maxMessageSize / 65536) * 1024; // reserve 1KB for every 64KB + this.eventBytes = new byte[maxMessageSize]; + } + + public final int getSize() { + return events.size(); + } + + public final boolean tryAdd(final EventData eventData) throws PayloadSizeExceededException { + + if (eventData == null) { + throw new IllegalArgumentException("eventData cannot be null"); + } + + final EventDataImpl eventDataImpl = (EventDataImpl) eventData; + + final int size; + try { + size = getSize(eventDataImpl, events.isEmpty()); + } catch (java.nio.BufferOverflowException exception) { + throw new PayloadSizeExceededException(String.format("Size of the payload exceeded Maximum message size: %s kb", this.maxMessageSize / 1024)); + } + + if (this.currentSize + size > this.maxMessageSize) + return false; + + this.events.add(eventDataImpl); + this.currentSize += size; + return true; + } + + public Iterator iterator() { + + return this.events.iterator(); + } + + Iterable getInternalIterable() { + + return this.events; + } + + String getPartitionKey() { + + return this.partitionKey; + } + + private int getSize(final EventDataImpl eventData, final boolean isFirst) { + + final Message amqpMessage = this.partitionKey != null ? eventData.toAmqpMessage(this.partitionKey) : eventData.toAmqpMessage(); + int eventSize = amqpMessage.encode(this.eventBytes, 0, maxMessageSize); // actual encoded bytes size + eventSize += 16; // data section overhead + + if (isFirst) { + amqpMessage.setBody(null); + amqpMessage.setApplicationProperties(null); + amqpMessage.setProperties(null); + amqpMessage.setDeliveryAnnotations(null); + + eventSize += amqpMessage.encode(this.eventBytes, 0, maxMessageSize); + } + + return eventSize; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventDataImpl.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventDataImpl.java new file mode 100755 index 0000000000000..69986268623c9 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventDataImpl.java @@ -0,0 +1,269 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import com.microsoft.azure.eventhubs.EventData; +import org.apache.qpid.proton.Proton; +import org.apache.qpid.proton.amqp.Binary; +import org.apache.qpid.proton.amqp.Symbol; +import org.apache.qpid.proton.amqp.messaging.*; +import org.apache.qpid.proton.message.Message; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public final class EventDataImpl implements EventData { + private static final long serialVersionUID = -5631628195600014255L; + private static final int BODY_DATA_NULL = -1; + + transient private Binary bodyData; + transient private Object amqpBody; + + private Map properties; + private SystemProperties systemProperties; + + private EventDataImpl() { + } + + @SuppressWarnings("unchecked") + EventDataImpl(Message amqpMessage) { + if (amqpMessage == null) { + throw new IllegalArgumentException("amqpMessage cannot be null"); + } + + final Map messageAnnotations = amqpMessage.getMessageAnnotations().getValue(); + final HashMap receiveProperties = new HashMap<>(); + + for (Map.Entry annotation : messageAnnotations.entrySet()) { + receiveProperties.put(annotation.getKey().toString(), annotation.getValue() != null ? annotation.getValue() : null); + } + + if (amqpMessage.getProperties() != null) { + if (amqpMessage.getMessageId() != null) + receiveProperties.put(AmqpConstants.AMQP_PROPERTY_MESSAGE_ID, amqpMessage.getMessageId()); + if (amqpMessage.getUserId() != null) + receiveProperties.put(AmqpConstants.AMQP_PROPERTY_USER_ID, amqpMessage.getUserId()); + if (amqpMessage.getAddress() != null) + receiveProperties.put(AmqpConstants.AMQP_PROPERTY_TO, amqpMessage.getAddress()); + if (amqpMessage.getSubject() != null) + receiveProperties.put(AmqpConstants.AMQP_PROPERTY_SUBJECT, amqpMessage.getSubject()); + if (amqpMessage.getReplyTo() != null) + receiveProperties.put(AmqpConstants.AMQP_PROPERTY_REPLY_TO, amqpMessage.getReplyTo()); + if (amqpMessage.getCorrelationId() != null) + receiveProperties.put(AmqpConstants.AMQP_PROPERTY_CORRELATION_ID, amqpMessage.getCorrelationId()); + if (amqpMessage.getContentType() != null) + receiveProperties.put(AmqpConstants.AMQP_PROPERTY_CONTENT_TYPE, amqpMessage.getContentType()); + if (amqpMessage.getContentEncoding() != null) + receiveProperties.put(AmqpConstants.AMQP_PROPERTY_CONTENT_ENCODING, amqpMessage.getContentEncoding()); + if (amqpMessage.getProperties().getAbsoluteExpiryTime() != null) + receiveProperties.put(AmqpConstants.AMQP_PROPERTY_ABSOLUTE_EXPRITY_TIME, amqpMessage.getExpiryTime()); + if (amqpMessage.getProperties().getCreationTime() != null) + receiveProperties.put(AmqpConstants.AMQP_PROPERTY_CREATION_TIME, amqpMessage.getCreationTime()); + if (amqpMessage.getGroupId() != null) + receiveProperties.put(AmqpConstants.AMQP_PROPERTY_GROUP_ID, amqpMessage.getGroupId()); + if (amqpMessage.getProperties().getGroupSequence() != null) + receiveProperties.put(AmqpConstants.AMQP_PROPERTY_GROUP_SEQUENCE, amqpMessage.getGroupSequence()); + if (amqpMessage.getReplyToGroupId() != null) + receiveProperties.put(AmqpConstants.AMQP_PROPERTY_REPLY_TO_GROUP_ID, amqpMessage.getReplyToGroupId()); + } + + this.systemProperties = new SystemProperties(receiveProperties); + this.properties = amqpMessage.getApplicationProperties() == null ? null : amqpMessage.getApplicationProperties().getValue(); + + final Section bodySection = amqpMessage.getBody(); + if (bodySection != null) { + if (bodySection instanceof Data) { + this.bodyData = ((Data) bodySection).getValue(); + this.amqpBody = this.bodyData; + } else if (bodySection instanceof AmqpValue) { + this.amqpBody = ((AmqpValue) bodySection).getValue(); + } else if (bodySection instanceof AmqpSequence) { + this.amqpBody = ((AmqpSequence) bodySection).getValue(); + } + } + + amqpMessage.clear(); + } + + public EventDataImpl(byte[] data) { + this(); + + if (data == null) { + throw new IllegalArgumentException("data cannot be null"); + } + + this.bodyData = new Binary(data); + } + + public EventDataImpl(byte[] data, final int offset, final int length) { + this(); + + if (data == null) { + throw new IllegalArgumentException("data cannot be null"); + } + + this.bodyData = new Binary(data, offset, length); + } + + public EventDataImpl(ByteBuffer buffer) { + this(); + + if (buffer == null) { + throw new IllegalArgumentException("data cannot be null"); + } + + this.bodyData = Binary.create(buffer); + } + + public Object getObject() { + return this.amqpBody; + } + + public byte[] getBytes() { + + if (this.bodyData == null) + return null; + + return this.bodyData.getArray(); + } + + public Map getProperties() { + if (this.properties == null) { + this.properties = new HashMap<>(); + } + + return this.properties; + } + + public SystemProperties getSystemProperties() { + return this.systemProperties; + } + + // This is intended to be used while sending EventData - so EventData.SystemProperties will not be copied over to the AmqpMessage + Message toAmqpMessage() { + final Message amqpMessage = Proton.message(); + + if (this.properties != null && !this.properties.isEmpty()) { + final ApplicationProperties applicationProperties = new ApplicationProperties(this.properties); + amqpMessage.setApplicationProperties(applicationProperties); + } + + if (this.systemProperties != null && !this.systemProperties.isEmpty()) { + for (Map.Entry systemProperty : this.systemProperties.entrySet()) { + final String propertyName = systemProperty.getKey(); + if (!EventDataUtil.RESERVED_SYSTEM_PROPERTIES.contains(propertyName)) { + if (AmqpConstants.RESERVED_PROPERTY_NAMES.contains(propertyName)) + switch (propertyName) { + case AmqpConstants.AMQP_PROPERTY_MESSAGE_ID: + amqpMessage.setMessageId(systemProperty.getValue()); + break; + case AmqpConstants.AMQP_PROPERTY_USER_ID: + amqpMessage.setUserId((byte[]) systemProperty.getValue()); + break; + case AmqpConstants.AMQP_PROPERTY_TO: + amqpMessage.setAddress((String) systemProperty.getValue()); + break; + case AmqpConstants.AMQP_PROPERTY_SUBJECT: + amqpMessage.setSubject((String) systemProperty.getValue()); + break; + case AmqpConstants.AMQP_PROPERTY_REPLY_TO: + amqpMessage.setReplyTo((String) systemProperty.getValue()); + break; + case AmqpConstants.AMQP_PROPERTY_CORRELATION_ID: + amqpMessage.setCorrelationId(systemProperty.getValue()); + break; + case AmqpConstants.AMQP_PROPERTY_CONTENT_TYPE: + amqpMessage.setContentType((String) systemProperty.getValue()); + break; + case AmqpConstants.AMQP_PROPERTY_CONTENT_ENCODING: + amqpMessage.setContentEncoding((String) systemProperty.getValue()); + break; + case AmqpConstants.AMQP_PROPERTY_ABSOLUTE_EXPRITY_TIME: + amqpMessage.setExpiryTime((long) systemProperty.getValue()); + break; + case AmqpConstants.AMQP_PROPERTY_CREATION_TIME: + amqpMessage.setCreationTime((long) systemProperty.getValue()); + break; + case AmqpConstants.AMQP_PROPERTY_GROUP_ID: + amqpMessage.setGroupId((String) systemProperty.getValue()); + break; + case AmqpConstants.AMQP_PROPERTY_GROUP_SEQUENCE: + amqpMessage.setGroupSequence((long) systemProperty.getValue()); + break; + case AmqpConstants.AMQP_PROPERTY_REPLY_TO_GROUP_ID: + amqpMessage.setReplyToGroupId((String) systemProperty.getValue()); + break; + default: + throw new RuntimeException("unreachable"); + } + else { + final MessageAnnotations messageAnnotations = (amqpMessage.getMessageAnnotations() == null) + ? new MessageAnnotations(new HashMap<>()) + : amqpMessage.getMessageAnnotations(); + messageAnnotations.getValue().put(Symbol.getSymbol(systemProperty.getKey()), systemProperty.getValue()); + amqpMessage.setMessageAnnotations(messageAnnotations); + } + } + } + } + + if (this.bodyData != null) { + amqpMessage.setBody(new Data(this.bodyData)); + } else if (this.amqpBody != null) { + if (this.amqpBody instanceof List) { + amqpMessage.setBody(new AmqpSequence((List) this.amqpBody)); + } else { + amqpMessage.setBody(new AmqpValue(this.amqpBody)); + } + } + + return amqpMessage; + } + + Message toAmqpMessage(final String partitionKey) { + final Message amqpMessage = this.toAmqpMessage(); + + final MessageAnnotations messageAnnotations = (amqpMessage.getMessageAnnotations() == null) + ? new MessageAnnotations(new HashMap<>()) + : amqpMessage.getMessageAnnotations(); + messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); + amqpMessage.setMessageAnnotations(messageAnnotations); + + return amqpMessage; + } + + private void writeObject(ObjectOutputStream out) throws IOException { + out.defaultWriteObject(); + + out.writeInt(this.bodyData == null ? BODY_DATA_NULL : this.bodyData.getLength()); + if (this.bodyData != null) + out.write(this.bodyData.getArray(), this.bodyData.getArrayOffset(), this.bodyData.getLength()); + } + + private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { + in.defaultReadObject(); + + final int length = in.readInt(); + if (length != BODY_DATA_NULL) { + + final byte[] data = new byte[length]; + in.readFully(data, 0, length); + this.bodyData = new Binary(data, 0, length); + } + } + + @Override + public int compareTo(EventData other) { + return Long.compare( + this.getSystemProperties().getSequenceNumber(), + other.getSystemProperties().getSequenceNumber() + ); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventDataUtil.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventDataUtil.java new file mode 100644 index 0000000000000..491ee6b6e60ee --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventDataUtil.java @@ -0,0 +1,70 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import com.microsoft.azure.eventhubs.EventData; +import com.microsoft.azure.eventhubs.EventPosition; +import org.apache.qpid.proton.message.Message; + +import java.util.*; +import java.util.function.Consumer; + +/* + * Internal utility class for EventData + */ +final class EventDataUtil { + + @SuppressWarnings("serial") + static final Set RESERVED_SYSTEM_PROPERTIES = Collections.unmodifiableSet(new HashSet() {{ + add(AmqpConstants.OFFSET_ANNOTATION_NAME); + add(AmqpConstants.PARTITION_KEY_ANNOTATION_NAME); + add(AmqpConstants.SEQUENCE_NUMBER_ANNOTATION_NAME); + add(AmqpConstants.ENQUEUED_TIME_UTC_ANNOTATION_NAME); + add(AmqpConstants.PUBLISHER_ANNOTATION_NAME); + }}); + + private EventDataUtil() { + } + + static LinkedList toEventDataCollection(final Collection messages, final PassByRef lastMessageRef) { + + if (messages == null) { + return null; + } + + LinkedList events = new LinkedList<>(); + for (Message message : messages) { + EventData eventData = new EventDataImpl(message); + events.add(eventData); + + if (lastMessageRef != null) { + lastMessageRef.set(new MessageWrapper(message, + EventPosition.fromSequenceNumber(eventData.getSystemProperties().getSequenceNumber(), true))); + } + } + + return events; + } + + static Iterable toAmqpMessages(final Iterable eventDatas, final String partitionKey) { + + final LinkedList messages = new LinkedList<>(); + eventDatas.forEach(new Consumer() { + @Override + public void accept(EventData eventData) { + EventDataImpl eventDataImpl = (EventDataImpl) eventData; + Message amqpMessage = partitionKey == null ? eventDataImpl.toAmqpMessage() : eventDataImpl.toAmqpMessage(partitionKey); + messages.add(amqpMessage); + } + }); + + return messages; + } + + static Iterable toAmqpMessages(final Iterable eventDatas) { + + return EventDataUtil.toAmqpMessages(eventDatas, null); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventHubClientImpl.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventHubClientImpl.java new file mode 100644 index 0000000000000..4dc5e927bb7d1 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventHubClientImpl.java @@ -0,0 +1,412 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import com.microsoft.azure.eventhubs.*; + +import java.io.IOException; +import java.security.InvalidKeyException; +import java.security.NoSuchAlgorithmException; +import java.time.Duration; +import java.util.Date; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ScheduledExecutorService; +import java.util.function.Consumer; +import java.util.function.Function; + +public final class EventHubClientImpl extends ClientEntity implements EventHubClient { + + /** + * It will be truncated to 128 characters + */ + public static String USER_AGENT = null; + + private final String eventHubName; + private final Object senderCreateSync; + private volatile boolean isSenderCreateStarted; + private volatile MessagingFactory underlyingFactory; + private volatile MessageSender sender; + private volatile Timer timer; + + private CompletableFuture createSender; + + private EventHubClientImpl(final ConnectionStringBuilder connectionString, final ScheduledExecutorService executor) { + super("EventHubClientImpl".concat(StringUtil.getRandomString()), null, executor); + + this.eventHubName = connectionString.getEventHubName(); + this.senderCreateSync = new Object(); + } + + public static CompletableFuture create( + final String connectionString, final RetryPolicy retryPolicy, final ScheduledExecutorService executor) + throws EventHubException, IOException { + final ConnectionStringBuilder connStr = new ConnectionStringBuilder(connectionString); + final EventHubClientImpl eventHubClient = new EventHubClientImpl(connStr, executor); + + return MessagingFactory.createFromConnectionString(connectionString.toString(), retryPolicy, executor) + .thenApplyAsync(new Function() { + @Override + public EventHubClient apply(MessagingFactory factory) { + eventHubClient.underlyingFactory = factory; + eventHubClient.timer = new Timer(factory); + return eventHubClient; + } + }, executor); + } + + public String getEventHubName() { + return eventHubName; + } + + public final EventDataBatch createBatch(BatchOptions options) throws EventHubException { + + return ExceptionUtil.sync(() -> { + int maxSize = this.createInternalSender().thenApplyAsync( + (aVoid) -> this.sender.getMaxMessageSize(), + this.executor).get(); + if (options.maxMessageSize == null) { + return new EventDataBatchImpl(maxSize, options.partitionKey); + } + + if (options.maxMessageSize > maxSize) { + throw new IllegalArgumentException("The maxMessageSize set in BatchOptions is too large. You set a maxMessageSize of " + + options.maxMessageSize + ". The maximum allowed size is " + maxSize + "."); + } + + return new EventDataBatchImpl(options.maxMessageSize, options.partitionKey); + } + ); + } + + @Override + public final CompletableFuture send(final EventData data) { + if (data == null) { + throw new IllegalArgumentException("EventData cannot be empty."); + } + + return this.createInternalSender().thenComposeAsync(new Function>() { + @Override + public CompletableFuture apply(Void voidArg) { + return EventHubClientImpl.this.sender.send(((EventDataImpl) data).toAmqpMessage()); + } + }, this.executor); + } + + @Override + public final CompletableFuture send(final Iterable eventDatas) { + if (eventDatas == null || IteratorUtil.sizeEquals(eventDatas, 0)) { + throw new IllegalArgumentException("Empty batch of EventData cannot be sent."); + } + + return this.createInternalSender().thenComposeAsync(new Function>() { + @Override + public CompletableFuture apply(Void voidArg) { + return EventHubClientImpl.this.sender.send(EventDataUtil.toAmqpMessages(eventDatas)); + } + }, this.executor); + } + + @Override + public final CompletableFuture send(final EventDataBatch eventDatas) { + if (eventDatas == null || Integer.compare(eventDatas.getSize(), 0) == 0) { + throw new IllegalArgumentException("Empty batch of EventData cannot be sent."); + } + + final EventDataBatchImpl eventDataBatch = (EventDataBatchImpl) eventDatas; + return eventDataBatch.getPartitionKey() != null ? + this.send(eventDataBatch.getInternalIterable(), eventDataBatch.getPartitionKey()) : + this.send(eventDataBatch.getInternalIterable()); + } + + @Override + public final CompletableFuture send(final EventData eventData, final String partitionKey) { + if (eventData == null) { + throw new IllegalArgumentException("EventData cannot be null."); + } + + if (partitionKey == null) { + throw new IllegalArgumentException("partitionKey cannot be null"); + } + + return this.createInternalSender().thenComposeAsync(new Function>() { + @Override + public CompletableFuture apply(Void voidArg) { + return EventHubClientImpl.this.sender.send(((EventDataImpl) eventData).toAmqpMessage(partitionKey)); + } + }, this.executor); + } + + @Override + public final CompletableFuture send(final Iterable eventDatas, final String partitionKey) { + if (eventDatas == null || IteratorUtil.sizeEquals(eventDatas, 0)) { + throw new IllegalArgumentException("Empty batch of EventData cannot be sent."); + } + + if (partitionKey == null) { + throw new IllegalArgumentException("partitionKey cannot be null"); + } + + if (partitionKey.length() > ClientConstants.MAX_PARTITION_KEY_LENGTH) { + throw new IllegalArgumentException( + String.format(Locale.US, "PartitionKey exceeds the maximum allowed length of partitionKey: %s", ClientConstants.MAX_PARTITION_KEY_LENGTH)); + } + + return this.createInternalSender().thenComposeAsync(new Function>() { + @Override + public CompletableFuture apply(Void voidArg) { + return EventHubClientImpl.this.sender.send(EventDataUtil.toAmqpMessages(eventDatas, partitionKey)); + } + }, this.executor); + } + + @Override + public final CompletableFuture createPartitionSender(final String partitionId) + throws EventHubException { + return PartitionSenderImpl.Create(this.underlyingFactory, this.eventHubName, partitionId, this.executor); + } + + @Override + public final CompletableFuture createReceiver(final String consumerGroupName, final String partitionId, final EventPosition eventPosition) + throws EventHubException { + return this.createReceiver(consumerGroupName, partitionId, eventPosition, null); + } + + @Override + public final CompletableFuture createReceiver(final String consumerGroupName, final String partitionId, final EventPosition eventPosition, final ReceiverOptions receiverOptions) + throws EventHubException { + return PartitionReceiverImpl.create(this.underlyingFactory, this.eventHubName, consumerGroupName, partitionId, eventPosition, PartitionReceiverImpl.NULL_EPOCH, false, receiverOptions, this.executor); + } + + @Override + public final CompletableFuture createEpochReceiver(final String consumerGroupName, final String partitionId, final EventPosition eventPosition, final long epoch) + throws EventHubException { + return this.createEpochReceiver(consumerGroupName, partitionId, eventPosition, epoch, null); + } + + @Override + public final CompletableFuture createEpochReceiver(final String consumerGroupName, final String partitionId, final EventPosition eventPosition, final long epoch, final ReceiverOptions receiverOptions) + throws EventHubException { + return PartitionReceiverImpl.create(this.underlyingFactory, this.eventHubName, consumerGroupName, partitionId, eventPosition, epoch, true, receiverOptions, this.executor); + } + + @Override + public CompletableFuture onClose() { + if (this.underlyingFactory != null) { + synchronized (this.senderCreateSync) { + final CompletableFuture internalSenderClose = this.sender != null + ? this.sender.close().thenComposeAsync(new Function>() { + @Override + public CompletableFuture apply(Void voidArg) { + return EventHubClientImpl.this.underlyingFactory.close(); + } + }, this.executor) + : this.underlyingFactory.close(); + + return internalSenderClose; + } + } + + return CompletableFuture.completedFuture(null); + } + + private CompletableFuture createInternalSender() { + if (!this.isSenderCreateStarted) { + synchronized (this.senderCreateSync) { + if (!this.isSenderCreateStarted) { + this.createSender = MessageSender.create(this.underlyingFactory, this.getClientId().concat("-InternalSender"), this.eventHubName) + .thenAcceptAsync(new Consumer() { + public void accept(MessageSender a) { + EventHubClientImpl.this.sender = a; + } + }, this.executor); + + this.isSenderCreateStarted = true; + } + } + } + + return this.createSender; + } + + @Override + public CompletableFuture getRuntimeInformation() { + CompletableFuture future1 = null; + + throwIfClosed(); + + Map request = new HashMap(); + request.put(ClientConstants.MANAGEMENT_ENTITY_TYPE_KEY, ClientConstants.MANAGEMENT_EVENTHUB_ENTITY_TYPE); + request.put(ClientConstants.MANAGEMENT_ENTITY_NAME_KEY, this.eventHubName); + request.put(ClientConstants.MANAGEMENT_OPERATION_KEY, ClientConstants.READ_OPERATION_VALUE); + future1 = this.addManagementToken(request); + + if (future1 == null) { + future1 = managementWithRetry(request).thenComposeAsync(new Function, CompletableFuture>() { + @Override + public CompletableFuture apply(Map rawdata) { + CompletableFuture future2 = new CompletableFuture(); + future2.complete(new EventHubRuntimeInformation( + (String) rawdata.get(ClientConstants.MANAGEMENT_ENTITY_NAME_KEY), + ((Date) rawdata.get(ClientConstants.MANAGEMENT_RESULT_CREATED_AT)).toInstant(), + (int) rawdata.get(ClientConstants.MANAGEMENT_RESULT_PARTITION_COUNT), + (String[]) rawdata.get(ClientConstants.MANAGEMENT_RESULT_PARTITION_IDS))); + return future2; + } + }, this.executor); + } + + return future1; + } + + @Override + public CompletableFuture getPartitionRuntimeInformation(String partitionId) { + CompletableFuture future1 = null; + + throwIfClosed(); + + Map request = new HashMap(); + request.put(ClientConstants.MANAGEMENT_ENTITY_TYPE_KEY, ClientConstants.MANAGEMENT_PARTITION_ENTITY_TYPE); + request.put(ClientConstants.MANAGEMENT_ENTITY_NAME_KEY, this.eventHubName); + request.put(ClientConstants.MANAGEMENT_PARTITION_NAME_KEY, partitionId); + request.put(ClientConstants.MANAGEMENT_OPERATION_KEY, ClientConstants.READ_OPERATION_VALUE); + future1 = this.addManagementToken(request); + + if (future1 == null) { + future1 = managementWithRetry(request).thenComposeAsync(new Function, CompletableFuture>() { + @Override + public CompletableFuture apply(Map rawData) { + CompletableFuture future2 = new CompletableFuture(); + future2.complete(new PartitionRuntimeInformation( + (String) rawData.get(ClientConstants.MANAGEMENT_ENTITY_NAME_KEY), + (String) rawData.get(ClientConstants.MANAGEMENT_PARTITION_NAME_KEY), + (long) rawData.get(ClientConstants.MANAGEMENT_RESULT_BEGIN_SEQUENCE_NUMBER), + (long) rawData.get(ClientConstants.MANAGEMENT_RESULT_LAST_ENQUEUED_SEQUENCE_NUMBER), + (String) rawData.get(ClientConstants.MANAGEMENT_RESULT_LAST_ENQUEUED_OFFSET), + ((Date) rawData.get(ClientConstants.MANAGEMENT_RESULT_LAST_ENQUEUED_TIME_UTC)).toInstant(), + (boolean) rawData.get(ClientConstants.MANAGEMENT_RESULT_PARTITION_IS_EMPTY))); + return future2; + } + }, this.executor); + } + + return future1; + } + + private CompletableFuture addManagementToken(Map request) { + CompletableFuture retval = null; + try { + String audience = String.format("amqp://%s/%s", this.underlyingFactory.getHostName(), this.eventHubName); + String token = this.underlyingFactory.getTokenProvider().getToken(audience, ClientConstants.TOKEN_REFRESH_INTERVAL); + request.put(ClientConstants.MANAGEMENT_SECURITY_TOKEN_KEY, token); + } catch (InvalidKeyException | NoSuchAlgorithmException | IOException e) { + retval = new CompletableFuture(); + retval.completeExceptionally(e); + } + return retval; + } + + private CompletableFuture> managementWithRetry(Map request) { + final CompletableFuture> rawdataFuture = new CompletableFuture>(); + + final ManagementRetry retrier = new ManagementRetry( + rawdataFuture, + new TimeoutTracker(this.underlyingFactory.getOperationTimeout(), true), + this.underlyingFactory, + request); + + final CompletableFuture scheduledTask = this.timer.schedule(retrier, Duration.ZERO); + if (scheduledTask.isCompletedExceptionally()) { + rawdataFuture.completeExceptionally(ExceptionUtil.getExceptionFromCompletedFuture(scheduledTask)); + } + + return rawdataFuture; + } + + private class ManagementRetry implements Runnable { + private final CompletableFuture> finalFuture; + private final TimeoutTracker timeoutTracker; + private final MessagingFactory mf; + private final Map request; + + ManagementRetry(final CompletableFuture> future, + final TimeoutTracker timeoutTracker, + final MessagingFactory mf, + final Map request) { + this.finalFuture = future; + this.timeoutTracker = timeoutTracker; + this.mf = mf; + this.request = request; + } + + @Override + public void run() { + final long timeLeft = this.timeoutTracker.remaining().toMillis(); + final CompletableFuture> intermediateFuture = this.mf.getManagementChannel() + .request(this.mf.getReactorDispatcher(), + this.request, + timeLeft > 0 ? timeLeft : 0); + + intermediateFuture.whenComplete((final Map result, final Throwable error) -> { + if ((result != null) && (error == null)) { + // Success! + ManagementRetry.this.finalFuture.complete(result); + } else { + final Exception lastException; + final Throwable completeWith; + if (error == null) { + // Timeout, so fake up an exception to keep getNextRetryInternal happy. + // It has to be a EventHubException that is set to retryable or getNextRetryInterval will halt the retries. + lastException = new EventHubException(true, "timed out"); + completeWith = null; + } else if (error instanceof Exception) { + if (error instanceof EventHubException) { + lastException = (EventHubException) error; + } else if (error instanceof AmqpException) { + lastException = ExceptionUtil.toException(((AmqpException) error).getError()); + } else if (error instanceof CompletionException || error instanceof ExecutionException) { + lastException = ExceptionUtil.stripOuterException((Exception) error); + } else { + lastException = (Exception) error; + } + completeWith = lastException; + } else { + lastException = new Exception("got a throwable: " + error.toString()); + completeWith = error; + } + + if (ManagementRetry.this.mf.getIsClosingOrClosed()) { + ManagementRetry.this.finalFuture.completeExceptionally( + new OperationCancelledException( + "OperationCancelled as the underlying client instance was closed.", + lastException)); + } else { + final Duration waitTime = ManagementRetry.this.mf.getRetryPolicy().getNextRetryInterval( + ManagementRetry.this.mf.getClientId(), lastException, this.timeoutTracker.remaining()); + if (waitTime == null) { + // Do not retry again, give up and report error. + if (completeWith == null) { + ManagementRetry.this.finalFuture.complete(null); + } else { + ManagementRetry.this.finalFuture.completeExceptionally(completeWith); + } + } else { + // The only thing needed here is to schedule a new attempt. Even if the RequestResponseChannel has croaked, + // ManagementChannel uses FaultTolerantObject, so the underlying RequestResponseChannel will be recreated + // the next time it is needed. + final ManagementRetry retrier = new ManagementRetry(ManagementRetry.this.finalFuture, ManagementRetry.this.timeoutTracker, + ManagementRetry.this.mf, ManagementRetry.this.request); + EventHubClientImpl.this.timer.schedule(retrier, waitTime); + } + } + } + }); + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventPositionImpl.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventPositionImpl.java new file mode 100644 index 0000000000000..898975d5b0e22 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/EventPositionImpl.java @@ -0,0 +1,111 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import com.microsoft.azure.eventhubs.EventPosition; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.time.Instant; + +public final class EventPositionImpl implements EventPosition { + + private static final Logger TRACE_LOGGER = LoggerFactory.getLogger(EventPositionImpl.class); + + private final String offset; + private final Long sequenceNumber; + private final Instant dateTime; + private final Boolean inclusiveFlag; + + private EventPositionImpl(String o, Long s, Instant d, Boolean i) { + this.offset = o; + this.sequenceNumber = s; + this.dateTime = d; + this.inclusiveFlag = i; + } + + public static EventPositionImpl fromOffset(String offset) { + return EventPositionImpl.fromOffset(offset, false); + } + + public static EventPositionImpl fromOffset(String offset, boolean inclusiveFlag) { + return new EventPositionImpl(offset, null, null, inclusiveFlag); + } + + public static EventPositionImpl fromSequenceNumber(Long sequenceNumber) { + return EventPositionImpl.fromSequenceNumber(sequenceNumber, false); + } + + public static EventPositionImpl fromSequenceNumber(Long sequenceNumber, boolean inclusiveFlag) { + return new EventPositionImpl(null, sequenceNumber, null, inclusiveFlag); + } + + public static EventPositionImpl fromEnqueuedTime(Instant dateTime) { + return new EventPositionImpl(null, null, dateTime, null); + } + + public static EventPositionImpl fromStartOfStream() { + return new EventPositionImpl(ClientConstants.START_OF_STREAM, null, null, true); + } + + public static EventPositionImpl fromEndOfStream() { + return new EventPositionImpl(ClientConstants.END_OF_STREAM, null, null, false); + } + + public Long getSequenceNumber() { + return this.sequenceNumber; + } + + public Instant getEnqueuedTime() { + return this.dateTime; + } + + public String getOffset() { + return this.offset; + } + + public boolean getInclusiveFlag() { + return this.inclusiveFlag; + } + + String getExpression() { + // order of preference + if (this.offset != null) { + return this.inclusiveFlag ? + String.format(AmqpConstants.AMQP_ANNOTATION_FORMAT, AmqpConstants.OFFSET_ANNOTATION_NAME, "=", this.offset) : + String.format(AmqpConstants.AMQP_ANNOTATION_FORMAT, AmqpConstants.OFFSET_ANNOTATION_NAME, StringUtil.EMPTY, this.offset); + } + + if (this.sequenceNumber != null) { + return this.inclusiveFlag ? + String.format(AmqpConstants.AMQP_ANNOTATION_FORMAT, AmqpConstants.SEQUENCE_NUMBER_ANNOTATION_NAME, "=", this.sequenceNumber) : + String.format(AmqpConstants.AMQP_ANNOTATION_FORMAT, AmqpConstants.SEQUENCE_NUMBER_ANNOTATION_NAME, StringUtil.EMPTY, this.sequenceNumber); + } + + if (this.dateTime != null) { + String ms; + try { + ms = Long.toString(this.dateTime.toEpochMilli()); + } catch (ArithmeticException ex) { + ms = Long.toString(Long.MAX_VALUE); + if (TRACE_LOGGER.isWarnEnabled()) { + TRACE_LOGGER.warn( + "receiver not yet created, action[createReceiveLink], warning[starting receiver from epoch+Long.Max]"); + } + } + return String.format(AmqpConstants.AMQP_ANNOTATION_FORMAT, AmqpConstants.ENQUEUED_TIME_UTC_ANNOTATION_NAME, StringUtil.EMPTY, ms); + } + + throw new IllegalArgumentException("No starting position was set."); + } + + @Override + public String toString() { + return String.format("offset[%s], sequenceNumber[%s], enqueuedTime[%s], inclusiveFlag[%s]", + this.offset, this.sequenceNumber, + (this.dateTime != null) ? this.dateTime.toEpochMilli() : "null", + this.inclusiveFlag); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ExceptionUtil.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ExceptionUtil.java new file mode 100644 index 0000000000000..0814f2ceeeb9e --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ExceptionUtil.java @@ -0,0 +1,243 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import com.microsoft.azure.eventhubs.*; +import org.apache.qpid.proton.amqp.Symbol; +import org.apache.qpid.proton.amqp.transport.ErrorCondition; + +import java.io.IOException; +import java.time.ZonedDateTime; +import java.util.Locale; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public final class ExceptionUtil { + static Exception toException(ErrorCondition errorCondition) { + if (errorCondition == null) { + throw new IllegalArgumentException("'null' errorCondition cannot be translated to EventHubException"); + } + + if (errorCondition.getCondition() == ClientConstants.TIMEOUT_ERROR) { + return new EventHubException(ClientConstants.DEFAULT_IS_TRANSIENT, new TimeoutException(errorCondition.getDescription())); + } else if (errorCondition.getCondition() == ClientConstants.SERVER_BUSY_ERROR) { + return new ServerBusyException(errorCondition.getDescription()); + } else if (errorCondition.getCondition() == AmqpErrorCode.NotFound) { + return ExceptionUtil.distinguishNotFound(errorCondition.getDescription()); + } else if (errorCondition.getCondition() == ClientConstants.ENTITY_DISABLED_ERROR) { + return new IllegalEntityException(errorCondition.getDescription()); + } else if (errorCondition.getCondition() == AmqpErrorCode.Stolen) { + return new ReceiverDisconnectedException(errorCondition.getDescription()); + } else if (errorCondition.getCondition() == AmqpErrorCode.UnauthorizedAccess) { + return new AuthorizationFailedException(errorCondition.getDescription()); + } else if (errorCondition.getCondition() == AmqpErrorCode.PayloadSizeExceeded) { + return new PayloadSizeExceededException(errorCondition.getDescription()); + } else if (errorCondition.getCondition() == AmqpErrorCode.InternalError) { + return new EventHubException(true, new AmqpException(errorCondition)); + } else if (errorCondition.getCondition() == ClientConstants.ARGUMENT_ERROR) { + return new EventHubException(false, errorCondition.getDescription(), new AmqpException(errorCondition)); + } else if (errorCondition.getCondition() == ClientConstants.ARGUMENT_OUT_OF_RANGE_ERROR) { + return new EventHubException(false, errorCondition.getDescription(), new AmqpException(errorCondition)); + } else if (errorCondition.getCondition() == AmqpErrorCode.NotImplemented) { + return new UnsupportedOperationException(errorCondition.getDescription()); + } else if (errorCondition.getCondition() == AmqpErrorCode.NotAllowed) { + return new UnsupportedOperationException(errorCondition.getDescription()); + } else if (errorCondition.getCondition() == ClientConstants.PARTITION_NOT_OWNED_ERROR) { + return new EventHubException(false, errorCondition.getDescription()); + } else if (errorCondition.getCondition() == ClientConstants.STORE_LOCK_LOST_ERROR) { + return new EventHubException(false, errorCondition.getDescription()); + } else if (errorCondition.getCondition() == AmqpErrorCode.AmqpLinkDetachForced) { + return new EventHubException(true, new AmqpException(errorCondition)); + } else if (errorCondition.getCondition() == AmqpErrorCode.ResourceLimitExceeded) { + return new QuotaExceededException(new AmqpException(errorCondition)); + } + + return new EventHubException(ClientConstants.DEFAULT_IS_TRANSIENT, errorCondition.getDescription()); + } + + static Exception amqpResponseCodeToException(final int statusCode, final String statusDescription) { + final AmqpResponseCode amqpResponseCode = AmqpResponseCode.valueOf(statusCode); + if (amqpResponseCode == null) + return new EventHubException(true, String.format(ClientConstants.AMQP_REQUEST_FAILED_ERROR, statusCode, statusDescription)); + + switch (amqpResponseCode) { + case BAD_REQUEST: + return new IllegalArgumentException(String.format(ClientConstants.AMQP_REQUEST_FAILED_ERROR, statusCode, statusDescription)); + case NOT_FOUND: + return ExceptionUtil.distinguishNotFound(statusDescription); + case FORBIDDEN: + return new QuotaExceededException(String.format(ClientConstants.AMQP_REQUEST_FAILED_ERROR, statusCode, statusDescription)); + case UNAUTHORIZED: + return new AuthorizationFailedException(String.format(ClientConstants.AMQP_REQUEST_FAILED_ERROR, statusCode, statusDescription)); + default: + return new EventHubException(true, String.format(ClientConstants.AMQP_REQUEST_FAILED_ERROR, statusCode, statusDescription)); + } + } + + static Exception distinguishNotFound(final String message) { + Pattern p = Pattern.compile("The messaging entity .* could not be found"); + Matcher m = p.matcher(message); + if (m.find()) { + return new IllegalEntityException(message); + } else { + return new EventHubException(true, String.format(ClientConstants.AMQP_REQUEST_FAILED_ERROR, AmqpResponseCode.NOT_FOUND, message)); + } + } + + static void completeExceptionally(CompletableFuture future, Exception exception, ErrorContextProvider contextProvider) { + if (exception != null && exception instanceof EventHubException) { + final ErrorContext errorContext = contextProvider.getContext(); + ((EventHubException) exception).setContext(errorContext); + } + + future.completeExceptionally(exception); + } + + // not a specific message related error + static boolean isGeneralSendError(Symbol amqpError) { + return (amqpError == ClientConstants.SERVER_BUSY_ERROR + || amqpError == ClientConstants.TIMEOUT_ERROR + || amqpError == AmqpErrorCode.ResourceLimitExceeded); + } + + static String getTrackingIDAndTimeToLog() { + return String.format(Locale.US, "TrackingId: %s, at: %s", UUID.randomUUID().toString(), ZonedDateTime.now()); + } + + public static String toStackTraceString(final Throwable exception, final String customErrorMessage) { + final StringBuilder builder = new StringBuilder(); + + if (!StringUtil.isNullOrEmpty(customErrorMessage)) { + builder.append(customErrorMessage); + builder.append(System.lineSeparator()); + } + + builder.append(exception.getMessage()); + final StackTraceElement[] stackTraceElements = exception.getStackTrace(); + if (stackTraceElements != null) { + for (final StackTraceElement ste : stackTraceElements) { + builder.append(System.lineSeparator()); + builder.append(ste.toString()); + } + } + + final Throwable innerException = exception.getCause(); + if (innerException != null) { + builder.append("Cause: " + innerException.getMessage()); + final StackTraceElement[] innerStackTraceElements = innerException.getStackTrace(); + if (innerStackTraceElements != null) { + for (final StackTraceElement ste : innerStackTraceElements) { + builder.append(System.lineSeparator()); + builder.append(ste.toString()); + } + } + } + + return builder.toString(); + } + + public static Throwable getExceptionFromCompletedFuture( + final CompletableFuture exceptionallyCompletedFuture) { + try { + exceptionallyCompletedFuture.get(); + } catch (ExecutionException | InterruptedException exception) { + final Throwable cause = exception.getCause(); + return (cause == null ? exception : cause); + } catch (Exception exception) { + return exception; + } + + return null; + } + + static Exception stripOuterException(final Exception exception) { + Throwable throwable = exception.getCause(); + if (throwable instanceof EventHubException) { + return (EventHubException) throwable; + } else if (throwable instanceof RuntimeException) { + return (RuntimeException) throwable; + } else if (throwable != null) { + return new RuntimeException(throwable); + } else { + return new RuntimeException(exception); + } + } + + private static void handle(final Exception exception) throws EventHubException { + if (exception instanceof InterruptedException) { + // Re-assert the thread's interrupted status + Thread.currentThread().interrupt(); + } + + Throwable throwable = exception.getCause(); + if (throwable instanceof EventHubException) { + throw (EventHubException) throwable; + } else if (throwable instanceof RuntimeException) { + throw (RuntimeException) throwable; + } else if (throwable != null) { + throw new RuntimeException(throwable); + } else { + throw new RuntimeException(exception); + } + } + + public static T sync(final SyncFactory factory) throws EventHubException { + try { + return factory.execute(); + } catch (InterruptedException | ExecutionException exception) { + handle(exception); + return null; + } + } + + public static T syncWithIOException(final SyncFactoryWithIOException factory) throws IOException, EventHubException { + try { + return factory.execute(); + } catch (InterruptedException | ExecutionException exception) { + handle(exception); + return null; + } + } + + public static void syncVoid(final SyncFactoryVoid factory) throws EventHubException { + try { + factory.execute(); + } catch (InterruptedException | ExecutionException exception) { + handle(exception); + } + } + + public static T syncWithIllegalArgException(final SyncFactoryWithIllegalArgException factory) throws EventHubException { + try { + return factory.execute(); + } catch (InterruptedException | ExecutionException exception) { + handle(exception); + return null; + } + } + + @FunctionalInterface + public interface SyncFactory { + T execute() throws EventHubException, ExecutionException, InterruptedException; + } + + @FunctionalInterface + public interface SyncFactoryWithIOException { + T execute() throws IOException, EventHubException, ExecutionException, InterruptedException; + } + + @FunctionalInterface + public interface SyncFactoryVoid { + void execute() throws EventHubException, ExecutionException, InterruptedException; + } + + @FunctionalInterface + public interface SyncFactoryWithIllegalArgException { + T execute() throws IllegalArgumentException, EventHubException, ExecutionException, InterruptedException; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/FaultTolerantObject.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/FaultTolerantObject.java new file mode 100644 index 0000000000000..9aefd3889b46f --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/FaultTolerantObject.java @@ -0,0 +1,130 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import java.io.IOException; +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; + +public class FaultTolerantObject { + + private final Operation openTask; + private final Operation closeTask; + private final Queue> openCallbacks; + private final Queue> closeCallbacks; + + private T innerObject; + private boolean creatingNewInnerObject; + private boolean closingInnerObject; + + public FaultTolerantObject( + final Operation openAsync, + final Operation closeAsync) { + + this.openTask = openAsync; + this.closeTask = closeAsync; + this.openCallbacks = new ConcurrentLinkedQueue<>(); + this.closeCallbacks = new ConcurrentLinkedQueue<>(); + } + + // should be invoked from reactor thread + T unsafeGetIfOpened() { + + if (innerObject != null && innerObject.getState() == IOObject.IOObjectState.OPENED) + return innerObject; + + return null; + } + + public void runOnOpenedObject( + final ReactorDispatcher dispatcher, + final OperationResult openCallback) { + + try { + dispatcher.invoke(new DispatchHandler() { + @Override + public void onEvent() { + if (!creatingNewInnerObject + && (innerObject == null || innerObject.getState() == IOObject.IOObjectState.CLOSED || + innerObject.getState() == IOObject.IOObjectState.CLOSING)) { + creatingNewInnerObject = true; + + try { + openCallbacks.offer(openCallback); + openTask.run(new OperationResult() { + @Override + public void onComplete(T result) { + innerObject = result; + for (OperationResult callback : openCallbacks) + callback.onComplete(result); + + openCallbacks.clear(); + } + + @Override + public void onError(Exception error) { + for (OperationResult callback : openCallbacks) + callback.onError(error); + + openCallbacks.clear(); + } + }); + } finally { + creatingNewInnerObject = false; + } + } else if (innerObject != null && innerObject.getState() == IOObject.IOObjectState.OPENED) { + openCallback.onComplete(innerObject); + } else { + openCallbacks.offer(openCallback); + } + } + }); + } catch (IOException ioException) { + openCallback.onError(ioException); + } + } + + public void close( + final ReactorDispatcher dispatcher, + final OperationResult closeCallback) { + + try { + dispatcher.invoke(new DispatchHandler() { + @Override + public void onEvent() { + if (innerObject == null || innerObject.getState() == IOObject.IOObjectState.CLOSED) { + closeCallback.onComplete(null); + } else if (!closingInnerObject && (innerObject.getState() == IOObject.IOObjectState.OPENED || innerObject.getState() == IOObject.IOObjectState.OPENING)) { + closingInnerObject = true; + closeCallbacks.offer(closeCallback); + closeTask.run(new OperationResult() { + @Override + public void onComplete(Void result) { + closingInnerObject = false; + for (OperationResult callback : closeCallbacks) + callback.onComplete(result); + + closeCallbacks.clear(); + } + + @Override + public void onError(Exception error) { + closingInnerObject = false; + for (OperationResult callback : closeCallbacks) + callback.onError(error); + + closeCallbacks.clear(); + } + }); + } else { + closeCallbacks.offer(closeCallback); + } + } + }); + } catch (IOException ioException) { + closeCallback.onError(ioException); + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/IOObject.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/IOObject.java new file mode 100644 index 0000000000000..5c9b2e0d07ea6 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/IOObject.java @@ -0,0 +1,18 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +public interface IOObject { + + // should be run on reactor thread + public IOObjectState getState(); + + public static enum IOObjectState { + OPENING, + OPENED, + CLOSED, + CLOSING + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/IteratorUtil.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/IteratorUtil.java new file mode 100644 index 0000000000000..46e74b4ef70c7 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/IteratorUtil.java @@ -0,0 +1,51 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import java.util.Iterator; + +public final class IteratorUtil { + private IteratorUtil() { + } + + public static boolean sizeEquals(Iterable iterable, int expectedSize) { + Iterator iterator = iterable.iterator(); + + int currentSize = 0; + while (iterator.hasNext()) { + if (expectedSize > currentSize) { + currentSize++; + iterator.next(); + continue; + } else { + return false; + } + } + + return true; + } + + public static T getLast(Iterator iterator) { + T last = null; + while (iterator.hasNext()) { + last = iterator.next(); + } + + return last; + } + + public static T getFirst(final Iterable iterable) { + if (iterable == null) { + return null; + } + + final Iterator iterator = iterable.iterator(); + if (iterator == null) { + return null; + } + + return iterator.hasNext() ? iterator.next() : null; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ManagementChannel.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ManagementChannel.java new file mode 100644 index 0000000000000..8941ddb01c545 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ManagementChannel.java @@ -0,0 +1,124 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import org.apache.qpid.proton.Proton; +import org.apache.qpid.proton.amqp.messaging.AmqpValue; +import org.apache.qpid.proton.amqp.messaging.ApplicationProperties; +import org.apache.qpid.proton.message.Message; + +import java.io.IOException; +import java.util.Map; +import java.util.concurrent.CompletableFuture; + +import com.microsoft.azure.eventhubs.OperationCancelledException; +import com.microsoft.azure.eventhubs.TimeoutException; + +final class ManagementChannel { + + final FaultTolerantObject innerChannel; + final SessionProvider sessionProvider; + final AmqpConnection connectionEventDispatcher; + + public ManagementChannel(final SessionProvider sessionProvider, final AmqpConnection connection) { + this.sessionProvider = sessionProvider; + this.connectionEventDispatcher = connection; + + final RequestResponseCloser closer = new RequestResponseCloser(); + this.innerChannel = new FaultTolerantObject<>( + new RequestResponseOpener( + sessionProvider, + "mgmt-session", + "mgmt", + ClientConstants.MANAGEMENT_ADDRESS, + connection), + closer); + closer.setInnerChannel(this.innerChannel); + } + + public CompletableFuture> request( + final ReactorDispatcher dispatcher, + final Map request, + final long timeoutInMillis) { + // no body required + final Message requestMessage = Proton.message(); + final ApplicationProperties applicationProperties = new ApplicationProperties(request); + requestMessage.setApplicationProperties(applicationProperties); + final CompletableFuture> resultFuture = new CompletableFuture>(); + try { + // schedule client-timeout on the request + dispatcher.invoke((int) timeoutInMillis, + new DispatchHandler() { + @Override + public void onEvent() { + final RequestResponseChannel channel = innerChannel.unsafeGetIfOpened(); + final String errorMessage; + if (channel != null && channel.getState() == IOObject.IOObjectState.OPENED) { + final String remoteContainerId = channel.getSendLink().getSession().getConnection().getRemoteContainer(); + errorMessage = String.format("Management request timed out (%sms), after not receiving response from service. TrackingId: %s", + timeoutInMillis, StringUtil.isNullOrEmpty(remoteContainerId) ? "n/a" : remoteContainerId); + } else { + errorMessage = "Management request timed out on the client - enable info level tracing to diagnose."; + } + + resultFuture.completeExceptionally(new TimeoutException(errorMessage)); + } + }); + } catch (final IOException ioException) { + resultFuture.completeExceptionally( + new OperationCancelledException( + "Sending request failed while dispatching to Reactor, see cause for more details.", + ioException)); + + return resultFuture; + } + + // if there isn't even 5 millis left - request will not make the round-trip + // to the event hubs service. so don't schedule the request - let it timeout + if (timeoutInMillis > ClientConstants.MGMT_CHANNEL_MIN_RETRY_IN_MILLIS) { + this.innerChannel.runOnOpenedObject(dispatcher, + new OperationResult() { + @Override + public void onComplete(final RequestResponseChannel result) { + result.request(requestMessage, + new OperationResult() { + @Override + public void onComplete(final Message response) { + final int statusCode = (int) response.getApplicationProperties().getValue() + .get(ClientConstants.PUT_TOKEN_STATUS_CODE); + final String statusDescription = (String) response.getApplicationProperties().getValue() + .get(ClientConstants.PUT_TOKEN_STATUS_DESCRIPTION); + + if (statusCode == AmqpResponseCode.ACCEPTED.getValue() + || statusCode == AmqpResponseCode.OK.getValue()) { + if (response.getBody() != null) { + resultFuture.complete((Map) ((AmqpValue) response.getBody()).getValue()); + } + } else { + this.onError(ExceptionUtil.amqpResponseCodeToException(statusCode, statusDescription)); + } + } + + @Override + public void onError(final Exception error) { + resultFuture.completeExceptionally(error); + } + }); + } + + @Override + public void onError(Exception error) { + resultFuture.completeExceptionally(error); + } + }); + } + + return resultFuture; + } + + public void close(final ReactorDispatcher reactorDispatcher, final OperationResult closeCallback) { + this.innerChannel.close(reactorDispatcher, closeCallback); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/MessageReceiver.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/MessageReceiver.java new file mode 100644 index 0000000000000..7b43796e6256b --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/MessageReceiver.java @@ -0,0 +1,821 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import com.microsoft.azure.eventhubs.ErrorContext; +import com.microsoft.azure.eventhubs.EventHubException; +import com.microsoft.azure.eventhubs.TimeoutException; +import org.apache.qpid.proton.Proton; +import org.apache.qpid.proton.amqp.Symbol; +import org.apache.qpid.proton.amqp.UnknownDescribedType; +import org.apache.qpid.proton.amqp.messaging.Source; +import org.apache.qpid.proton.amqp.messaging.Target; +import org.apache.qpid.proton.amqp.transport.ErrorCondition; +import org.apache.qpid.proton.amqp.transport.ReceiverSettleMode; +import org.apache.qpid.proton.amqp.transport.SenderSettleMode; +import org.apache.qpid.proton.engine.*; +import org.apache.qpid.proton.message.Message; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.security.InvalidKeyException; +import java.security.NoSuchAlgorithmException; +import java.time.Duration; +import java.time.Instant; +import java.time.ZonedDateTime; +import java.util.*; +import java.util.concurrent.CancellationException; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.BiConsumer; +import java.util.function.Consumer; + +/** + * Common Receiver that abstracts all amqp related details + * translates event-driven reactor model into async receive Api + */ +public final class MessageReceiver extends ClientEntity implements AmqpReceiver, ErrorContextProvider { + private static final Logger TRACE_LOGGER = LoggerFactory.getLogger(MessageReceiver.class); + private static final int MIN_TIMEOUT_DURATION_MILLIS = 20; + private static final int MAX_OPERATION_TIMEOUT_SCHEDULED = 2; + // TestHooks for code injection + private static volatile Consumer onOpenRetry = null; + private final AtomicInteger operationTimeoutScheduled = new AtomicInteger(0); + private final ConcurrentLinkedQueue pendingReceives; + private final MessagingFactory underlyingFactory; + private final String receivePath; + private final Runnable onOperationTimedout; + private final Duration operationTimeout; + private final CompletableFuture linkClose; + private final ReceiverSettingsProvider settingsProvider; + private final String tokenAudience; + private final ActiveClientTokenManager activeClientTokenManager; + private final WorkItem linkOpen; + private final ConcurrentLinkedQueue prefetchedMessages; + private final ReceiveWork receiveWork; + private final CreateAndReceive createAndReceive; + private final Object errorConditionLock; + private final Timer timer; + private volatile int nextCreditToFlow; + private volatile Receiver receiveLink; + private volatile Duration receiveTimeout; + private volatile Message lastReceivedMessage; + private volatile boolean creatingLink; + private volatile CompletableFuture openTimer; + private volatile CompletableFuture closeTimer; + private int prefetchCount; + private Exception lastKnownLinkError; + private String linkCreationTime; + + private MessageReceiver(final MessagingFactory factory, + final String name, + final String recvPath, + final int prefetchCount, + final ReceiverSettingsProvider settingsProvider) { + super(name, factory, factory.executor); + + this.underlyingFactory = factory; + this.operationTimeout = factory.getOperationTimeout(); + this.receivePath = recvPath; + this.prefetchCount = prefetchCount; + this.prefetchedMessages = new ConcurrentLinkedQueue<>(); + this.linkClose = new CompletableFuture<>(); + this.lastKnownLinkError = null; + this.receiveTimeout = factory.getOperationTimeout(); + this.settingsProvider = settingsProvider; + this.linkOpen = new WorkItem<>(new CompletableFuture<>(), factory.getOperationTimeout()); + this.timer = new Timer(factory); + + this.pendingReceives = new ConcurrentLinkedQueue<>(); + this.errorConditionLock = new Object(); + + // onOperationTimeout delegate - per receive call + this.onOperationTimedout = new Runnable() { + public void run() { + MessageReceiver.this.operationTimeoutTimerFired(); + + WorkItem> topWorkItem = null; + while ((topWorkItem = MessageReceiver.this.pendingReceives.peek()) != null) { + if (topWorkItem.getTimeoutTracker().remaining().toMillis() <= MessageReceiver.MIN_TIMEOUT_DURATION_MILLIS) { + WorkItem> dequedWorkItem = MessageReceiver.this.pendingReceives.poll(); + if (dequedWorkItem != null && dequedWorkItem.getWork() != null && !dequedWorkItem.getWork().isDone()) { + dequedWorkItem.getWork().complete(null); + } else { + break; + } + } else { + if (MessageReceiver.this.shouldScheduleOperationTimeoutTimer()) { + TimeoutTracker timeoutTracker = topWorkItem.getTimeoutTracker(); + + if (TRACE_LOGGER.isDebugEnabled()) { + TRACE_LOGGER.debug( + String.format(Locale.US, + "clientId[%s], path[%s], linkName[%s] - Reschedule operation timer, current: [%s], remaining: [%s] secs", + getClientId(), + receivePath, + receiveLink.getName(), + Instant.now(), + timeoutTracker.remaining().getSeconds())); + } + + MessageReceiver.this.scheduleOperationTimer(timeoutTracker); + } + + break; + } + } + } + }; + + this.receiveWork = new ReceiveWork(); + this.createAndReceive = new CreateAndReceive(); + + this.tokenAudience = String.format(ClientConstants.TOKEN_AUDIENCE_FORMAT, underlyingFactory.getHostName(), receivePath); + + this.activeClientTokenManager = new ActiveClientTokenManager( + this, + new Runnable() { + @Override + public void run() { + try { + underlyingFactory.getCBSChannel().sendToken( + underlyingFactory.getReactorDispatcher(), + underlyingFactory.getTokenProvider().getToken(tokenAudience, ClientConstants.TOKEN_VALIDITY), + tokenAudience, + new OperationResult() { + @Override + public void onComplete(Void result) { + if (TRACE_LOGGER.isDebugEnabled()) { + TRACE_LOGGER.debug( + String.format(Locale.US, + "clientId[%s], path[%s], linkName[%s] - token renewed", + getClientId(), receivePath, receiveLink.getName())); + } + } + + @Override + public void onError(Exception error) { + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info( + String.format(Locale.US, + "clientId[%s], path[%s], linkName[%s], tokenRenewalFailure[%s]", + getClientId(), receivePath, receiveLink.getName(), error.getMessage())); + } + } + }); + } catch (IOException | NoSuchAlgorithmException | InvalidKeyException | RuntimeException exception) { + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info( + String.format(Locale.US, + "clientId[%s], path[%s], linkName[%s], tokenRenewalScheduleFailure[%s]", + getClientId(), receivePath, receiveLink.getName(), exception.getMessage())); + } + } + } + }, + ClientConstants.TOKEN_REFRESH_INTERVAL, + this.underlyingFactory); + } + + // @param connection Connection on which the MessageReceiver's receive AMQP link need to be created on. + // Connection has to be associated with Reactor before Creating a receiver on it. + public static CompletableFuture create( + final MessagingFactory factory, + final String name, + final String recvPath, + final int prefetchCount, + final ReceiverSettingsProvider settingsProvider) { + MessageReceiver msgReceiver = new MessageReceiver( + factory, + name, + recvPath, + prefetchCount, + settingsProvider); + return msgReceiver.createLink(); + } + + public String getReceivePath() { + return this.receivePath; + } + + private CompletableFuture createLink() { + try { + this.underlyingFactory.scheduleOnReactorThread(new DispatchHandler() { + @Override + public void onEvent() { + MessageReceiver.this.createReceiveLink(); + } + }); + } catch (IOException | RejectedExecutionException schedulerException) { + this.linkOpen.getWork().completeExceptionally(schedulerException); + } + + return this.linkOpen.getWork(); + } + + private List receiveCore(final int messageCount) { + List returnMessages = null; + Message currentMessage; + + while ((currentMessage = this.pollPrefetchQueue()) != null) { + if (returnMessages == null) { + returnMessages = new LinkedList<>(); + } + + returnMessages.add(currentMessage); + if (returnMessages.size() >= messageCount) { + break; + } + } + + return returnMessages; + } + + public Duration getReceiveTimeout() { + return this.receiveTimeout; + } + + public void setReceiveTimeout(final Duration value) { + this.receiveTimeout = value; + } + + public CompletableFuture> receive(final int maxMessageCount) { + this.throwIfClosed(); + + final CompletableFuture> onReceive = new CompletableFuture<>(); + if (maxMessageCount <= 0 || maxMessageCount > this.prefetchCount) { + onReceive.completeExceptionally(new IllegalArgumentException(String.format( + Locale.US, + "Entity(%s): maxEventCount(%s) should be a positive number and should be less than prefetchCount(%s)", + this.receivePath, maxMessageCount, this.prefetchCount))); + return onReceive; + } + + if (this.shouldScheduleOperationTimeoutTimer()) { + if (TRACE_LOGGER.isDebugEnabled()) { + TRACE_LOGGER.debug( + String.format(Locale.US, + "clientId[%s], path[%s], linkName[%s] - schedule operation timer, current: [%s], remaining: [%s] secs", + this.getClientId(), + this.receivePath, + this.receiveLink.getName(), + Instant.now(), + this.receiveTimeout.getSeconds())); + } + + timer.schedule(this.onOperationTimedout, this.receiveTimeout); + } + + pendingReceives.offer(new ReceiveWorkItem(onReceive, receiveTimeout, maxMessageCount)); + + try { + this.underlyingFactory.scheduleOnReactorThread(this.createAndReceive); + } catch (IOException | RejectedExecutionException schedulerException) { + onReceive.completeExceptionally(schedulerException); + } + + return onReceive; + } + + @Override + public void onOpenComplete(Exception exception) { + this.creatingLink = false; + + if (exception == null) { + if (this.linkOpen != null && !this.linkOpen.getWork().isDone()) { + this.linkOpen.getWork().complete(this); + } + + this.cancelOpenTimer(); + + if (this.getIsClosingOrClosed()) { + return; + } + + synchronized (this.errorConditionLock) { + this.lastKnownLinkError = null; + } + + this.underlyingFactory.getRetryPolicy().resetRetryCount(this.underlyingFactory.getClientId()); + + this.nextCreditToFlow = 0; + this.sendFlow(this.prefetchCount - this.prefetchedMessages.size()); + + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info(String.format("onOpenComplete - clientId[%s], receiverPath[%s], linkName[%s], updated-link-credit[%s], sentCredits[%s]", + this.getClientId(), this.receivePath, this.receiveLink.getName(), this.receiveLink.getCredit(), this.prefetchCount)); + } + } else { + synchronized (this.errorConditionLock) { + this.lastKnownLinkError = exception; + } + + if (this.linkOpen != null && !this.linkOpen.getWork().isDone()) { + final Duration nextRetryInterval = this.underlyingFactory.getRetryPolicy().getNextRetryInterval( + this.getClientId(), exception, this.linkOpen.getTimeoutTracker().remaining()); + if (nextRetryInterval != null) { + if (onOpenRetry != null) { + onOpenRetry.accept(this); + } + + try { + this.underlyingFactory.scheduleOnReactorThread((int) nextRetryInterval.toMillis(), new DispatchHandler() { + @Override + public void onEvent() { + if (!MessageReceiver.this.getIsClosingOrClosed() + && (receiveLink == null || receiveLink.getLocalState() == EndpointState.CLOSED || receiveLink.getRemoteState() == EndpointState.CLOSED)) { + createReceiveLink(); + underlyingFactory.getRetryPolicy().incrementRetryCount(getClientId()); + } + } + }); + } catch (IOException | RejectedExecutionException schedulerException) { + if (TRACE_LOGGER.isWarnEnabled()) { + TRACE_LOGGER.warn( + String.format(Locale.US, "clientId[%s], receiverPath[%s], scheduling createLink encountered error: %s", + this.getClientId(), this.receivePath, schedulerException.getLocalizedMessage())); + } + + this.cancelOpen(schedulerException); + } + } else if (exception instanceof EventHubException && !((EventHubException) exception).getIsTransient()) { + this.cancelOpen(exception); + } + } else { + this.cancelOpenTimer(); + } + } + } + + private void cancelOpen(final Exception completionException) { + this.setClosed(); + ExceptionUtil.completeExceptionally(this.linkOpen.getWork(), completionException, this); + this.cancelOpenTimer(); + } + + private void cancelOpenTimer() { + if (this.openTimer != null && !this.openTimer.isCancelled()) { + this.openTimer.cancel(false); + } + } + + @Override + public void onReceiveComplete(Delivery delivery) { + int msgSize = delivery.pending(); + byte[] buffer = new byte[msgSize]; + + int read = receiveLink.recv(buffer, 0, msgSize); + + Message message = Proton.message(); + message.decode(buffer, 0, read); + + delivery.settle(); + + this.prefetchedMessages.add(message); + this.underlyingFactory.getRetryPolicy().resetRetryCount(this.getClientId()); + + this.receiveWork.onEvent(); + } + + @Override + public void onError(final Exception exception) { + this.prefetchedMessages.clear(); + + if (this.getIsClosingOrClosed()) { + if (this.closeTimer != null) + this.closeTimer.cancel(false); + + this.drainPendingReceives(exception); + this.linkClose.complete(null); + } else { + synchronized (this.errorConditionLock) { + this.lastKnownLinkError = exception == null ? this.lastKnownLinkError : exception; + } + + final Exception completionException = exception == null + ? new EventHubException(true, String.format(Locale.US, + "Entity(%s): client encountered transient error for unknown reasons, please retry the operation.", this.receivePath)) + : exception; + + if (TRACE_LOGGER.isWarnEnabled()) { + TRACE_LOGGER.warn( + String.format(Locale.US, "clientId[%s], receiverPath[%s], linkName[%s], onError: %s", + this.getClientId(), + this.receivePath, + this.receiveLink.getName(), + completionException)); + } + + this.onOpenComplete(completionException); + + final WorkItem> workItem = this.pendingReceives.peek(); + final Duration nextRetryInterval = workItem != null && workItem.getTimeoutTracker() != null + ? this.underlyingFactory.getRetryPolicy().getNextRetryInterval(this.getClientId(), completionException, workItem.getTimeoutTracker().remaining()) + : null; + + boolean recreateScheduled = true; + if (nextRetryInterval != null) { + try { + this.underlyingFactory.scheduleOnReactorThread((int) nextRetryInterval.toMillis(), new DispatchHandler() { + @Override + public void onEvent() { + if (!MessageReceiver.this.getIsClosingOrClosed() + && (receiveLink.getLocalState() == EndpointState.CLOSED || receiveLink.getRemoteState() == EndpointState.CLOSED)) { + createReceiveLink(); + underlyingFactory.getRetryPolicy().incrementRetryCount(getClientId()); + } + } + }); + } catch (IOException | RejectedExecutionException ignore) { + recreateScheduled = false; + if (TRACE_LOGGER.isWarnEnabled()) { + TRACE_LOGGER.warn( + String.format(Locale.US, "clientId[%s], receiverPath[%s], linkName[%s], scheduling createLink encountered error: %s", + this.getClientId(), + this.receivePath, + this.receiveLink.getName(), ignore.getLocalizedMessage())); + } + } + } + + if (nextRetryInterval == null || !recreateScheduled) { + this.drainPendingReceives(completionException); + } + } + } + + private void drainPendingReceives(final Exception exception) { + WorkItem> workItem; + final boolean shouldReturnNull = (exception == null + || (exception instanceof EventHubException && ((EventHubException) exception).getIsTransient())); + + while ((workItem = this.pendingReceives.poll()) != null) { + final CompletableFuture> future = workItem.getWork(); + if (shouldReturnNull) { + future.complete(null); + } else { + ExceptionUtil.completeExceptionally(future, exception, this); + } + } + } + + private void scheduleOperationTimer(final TimeoutTracker tracker) { + if (tracker != null) { + timer.schedule(this.onOperationTimedout, tracker.remaining()); + } + } + + private void createReceiveLink() { + synchronized (this.errorConditionLock) { + if (this.creatingLink) { + return; + } + + this.creatingLink = true; + } + + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info( + String.format(Locale.US, + "clientId[%s], path[%s], operationTimeout[%s], creating a receive link", + this.getClientId(), this.receivePath, this.operationTimeout)); + } + + this.linkCreationTime = Instant.now().toString(); + + this.scheduleLinkOpenTimeout(TimeoutTracker.create(this.operationTimeout)); + + final Consumer onSessionOpen = new Consumer() { + @Override + public void accept(Session session) { + // if the MessageReceiver is closed - we no-longer need to create the link + if (MessageReceiver.this.getIsClosingOrClosed()) { + + session.close(); + return; + } + + final Source source = new Source(); + source.setAddress(receivePath); + + final Map filterMap = MessageReceiver.this.settingsProvider.getFilter(MessageReceiver.this.lastReceivedMessage); + if (filterMap != null) + source.setFilter(filterMap); + + final Receiver receiver = session.receiver(TrackingUtil.getLinkName(session)); + receiver.setSource(source); + + final Target target = new Target(); + + receiver.setTarget(target); + + // use explicit settlement via dispositions (not pre-settled) + receiver.setSenderSettleMode(SenderSettleMode.UNSETTLED); + receiver.setReceiverSettleMode(ReceiverSettleMode.SECOND); + + final Map linkProperties = MessageReceiver.this.settingsProvider.getProperties(); + if (linkProperties != null) + receiver.setProperties(linkProperties); + + final Symbol[] desiredCapabilities = MessageReceiver.this.settingsProvider.getDesiredCapabilities(); + if (desiredCapabilities != null) + receiver.setDesiredCapabilities(desiredCapabilities); + + final ReceiveLinkHandler handler = new ReceiveLinkHandler(MessageReceiver.this); + BaseHandler.setHandler(receiver, handler); + + if (MessageReceiver.this.receiveLink != null) { + MessageReceiver.this.underlyingFactory.deregisterForConnectionError(MessageReceiver.this.receiveLink); + } + + MessageReceiver.this.underlyingFactory.registerForConnectionError(receiver); + + receiver.open(); + + synchronized (MessageReceiver.this.errorConditionLock) { + MessageReceiver.this.receiveLink = receiver; + } + } + }; + + final BiConsumer onSessionOpenFailed = new BiConsumer() { + @Override + public void accept(ErrorCondition t, Exception u) { + if (t != null) { + onError((t.getCondition() != null) ? ExceptionUtil.toException(t) : null); + } else if (u != null) { + onError(u); + } + } + }; + + try { + this.underlyingFactory.getCBSChannel().sendToken( + this.underlyingFactory.getReactorDispatcher(), + this.underlyingFactory.getTokenProvider().getToken(tokenAudience, ClientConstants.TOKEN_VALIDITY), + tokenAudience, + new OperationResult() { + @Override + public void onComplete(Void result) { + if (MessageReceiver.this.getIsClosingOrClosed()) + return; + + underlyingFactory.getSession( + receivePath, + onSessionOpen, + onSessionOpenFailed); + } + + @Override + public void onError(Exception error) { + final Exception completionException; + if (error != null && error instanceof AmqpException) { + completionException = ExceptionUtil.toException(((AmqpException) error).getError()); + if (completionException != error && completionException.getCause() == null) { + completionException.initCause(error); + } + } else { + completionException = error; + } + + MessageReceiver.this.onError(completionException); + } + }); + } catch (IOException | NoSuchAlgorithmException | InvalidKeyException | RuntimeException exception) { + MessageReceiver.this.onError(exception); + } + } + + // CONTRACT: message should be delivered to the caller of MessageReceiver.receive() only via Poll on prefetchqueue + private Message pollPrefetchQueue() { + final Message message = this.prefetchedMessages.poll(); + if (message != null) { + // message lastReceivedOffset should be up-to-date upon each poll - as recreateLink will depend on this + this.lastReceivedMessage = message; + this.sendFlow(1); + } + + return message; + } + + private void sendFlow(final int credits) { + // slow down sending the flow - to make the protocol less-chat'y + this.nextCreditToFlow += credits; + if (this.nextCreditToFlow >= this.prefetchCount || this.nextCreditToFlow >= 100) { + final int tempFlow = this.nextCreditToFlow; + this.receiveLink.flow(tempFlow); + this.nextCreditToFlow = 0; + + if (TRACE_LOGGER.isDebugEnabled()) { + TRACE_LOGGER.debug(String.format("clientId[%s], receiverPath[%s], linkName[%s], updated-link-credit[%s], sentCredits[%s], ThreadId[%s]", + this.getClientId(), this.receivePath, this.receiveLink.getName(), this.receiveLink.getCredit(), tempFlow, Thread.currentThread().getId())); + } + } + } + + private void scheduleLinkOpenTimeout(final TimeoutTracker timeout) { + // timer to signal a timeout if exceeds the operationTimeout on MessagingFactory + this.openTimer = timer.schedule( + new Runnable() { + public void run() { + creatingLink = false; + + if (!linkOpen.getWork().isDone()) { + final Receiver link; + final Exception lastReportedLinkError; + synchronized (errorConditionLock) { + link = MessageReceiver.this.receiveLink; + lastReportedLinkError = MessageReceiver.this.lastKnownLinkError; + } + + final Exception operationTimedout = new TimeoutException( + String.format(Locale.US, "Open operation on entity(%s) timed out at %s.", + MessageReceiver.this.receivePath, ZonedDateTime.now()), + lastReportedLinkError); + + if (TRACE_LOGGER.isWarnEnabled()) { + TRACE_LOGGER.warn( + String.format(Locale.US, "clientId[%s], receiverPath[%s], Open call timed out", + MessageReceiver.this.getClientId(), MessageReceiver.this.receivePath), operationTimedout); + } + + ExceptionUtil.completeExceptionally(linkOpen.getWork(), operationTimedout, MessageReceiver.this); + setClosed(); + } + } + } + , timeout.remaining()); + + this.openTimer.handleAsync( + (unUsed, exception) -> { + if (exception != null + && exception instanceof Exception + && !(exception instanceof CancellationException)) { + ExceptionUtil.completeExceptionally(linkOpen.getWork(), (Exception) exception, MessageReceiver.this); + } + + return null; + }, this.executor); + } + + private void scheduleLinkCloseTimeout(final TimeoutTracker timeout) { + // timer to signal a timeout if exceeds the operationTimeout on MessagingFactory + this.closeTimer = timer.schedule( + new Runnable() { + public void run() { + if (!linkClose.isDone()) { + final Receiver link; + synchronized (errorConditionLock) { + link = MessageReceiver.this.receiveLink; + } + + final Exception operationTimedout = new TimeoutException(String.format(Locale.US, "Close operation on Receive Link(%s) timed out at %s", + link.getName(), ZonedDateTime.now())); + + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info( + String.format(Locale.US, "clientId[%s], receiverPath[%s], linkName[%s], Close call timed out", + MessageReceiver.this.getClientId(), MessageReceiver.this.receivePath, link.getName()), + operationTimedout); + } + + ExceptionUtil.completeExceptionally(linkClose, operationTimedout, MessageReceiver.this); + MessageReceiver.this.onError((Exception) null); + } + } + } + , timeout.remaining()); + + this.closeTimer.handleAsync( + (unUsed, exception) -> { + if (exception != null && exception instanceof Exception && !(exception instanceof CancellationException)) { + ExceptionUtil.completeExceptionally(linkClose, (Exception) exception, MessageReceiver.this); + } + + return null; + }, this.executor); + } + + private boolean shouldScheduleOperationTimeoutTimer() { + boolean scheduleTimer = this.operationTimeoutScheduled.getAndIncrement() < MAX_OPERATION_TIMEOUT_SCHEDULED; + if (!scheduleTimer) { + this.operationTimeoutScheduled.decrementAndGet(); + } + + return scheduleTimer; + } + + private void operationTimeoutTimerFired() { + MessageReceiver.this.operationTimeoutScheduled.decrementAndGet(); + } + + @Override + public void onClose(ErrorCondition condition) { + if (this.receiveLink != null) { + this.underlyingFactory.deregisterForConnectionError(MessageReceiver.this.receiveLink); + } + + final Exception completionException = (condition != null && condition.getCondition() != null) ? ExceptionUtil.toException(condition) : null; + this.onError(completionException); + } + + @Override + public ErrorContext getContext() { + final Receiver link; + synchronized (this.errorConditionLock) { + link = this.receiveLink; + } + + final boolean isLinkOpened = this.linkOpen != null && this.linkOpen.getWork().isDone(); + final String referenceId = link != null && link.getRemoteProperties() != null && link.getRemoteProperties().containsKey(ClientConstants.TRACKING_ID_PROPERTY) + ? link.getRemoteProperties().get(ClientConstants.TRACKING_ID_PROPERTY).toString() + : ((link != null) ? link.getName() : null); + + final ReceiverContext errorContext = new ReceiverContext(this.underlyingFactory != null ? this.underlyingFactory.getHostName() : null, + this.receivePath, + referenceId, + isLinkOpened ? this.prefetchCount : null, + isLinkOpened && link != null ? link.getCredit() : null, + isLinkOpened && this.prefetchedMessages != null ? this.prefetchedMessages.size() : null); + + return errorContext; + } + + @Override + protected CompletableFuture onClose() { + if (!this.getIsClosed()) { + try { + this.activeClientTokenManager.cancel(); + scheduleLinkCloseTimeout(TimeoutTracker.create(operationTimeout)); + + this.underlyingFactory.scheduleOnReactorThread(new DispatchHandler() { + @Override + public void onEvent() { + if (receiveLink != null && receiveLink.getLocalState() != EndpointState.CLOSED) { + receiveLink.close(); + } else if (receiveLink == null || receiveLink.getRemoteState() == EndpointState.CLOSED) { + if (closeTimer != null && !closeTimer.isCancelled()) { + closeTimer.cancel(false); + } + + linkClose.complete(null); + } + } + }); + } catch (IOException | RejectedExecutionException schedulerException) { + this.linkClose.completeExceptionally(schedulerException); + } + } + + return this.linkClose; + } + + @Override + protected Exception getLastKnownError() { + synchronized (this.errorConditionLock) { + return this.lastKnownLinkError; + } + } + + private static class ReceiveWorkItem extends WorkItem> { + private final int maxMessageCount; + + public ReceiveWorkItem(CompletableFuture> completableFuture, Duration timeout, final int maxMessageCount) { + super(completableFuture, timeout); + this.maxMessageCount = maxMessageCount; + } + } + + private final class ReceiveWork extends DispatchHandler { + + @Override + public void onEvent() { + + ReceiveWorkItem pendingReceive; + while (!prefetchedMessages.isEmpty() && (pendingReceive = pendingReceives.poll()) != null) { + if (pendingReceive.getWork() != null && !pendingReceive.getWork().isDone()) { + Collection receivedMessages = receiveCore(pendingReceive.maxMessageCount); + pendingReceive.getWork().complete(receivedMessages); + } + } + } + } + + private final class CreateAndReceive extends DispatchHandler { + + @Override + public void onEvent() { + receiveWork.onEvent(); + + if (!MessageReceiver.this.getIsClosingOrClosed() + && (receiveLink.getLocalState() == EndpointState.CLOSED || receiveLink.getRemoteState() == EndpointState.CLOSED)) { + createReceiveLink(); + } + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/MessageSender.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/MessageSender.java new file mode 100644 index 0000000000000..6f67323ce9970 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/MessageSender.java @@ -0,0 +1,1009 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import com.microsoft.azure.eventhubs.*; +import org.apache.qpid.proton.Proton; +import org.apache.qpid.proton.amqp.Binary; +import org.apache.qpid.proton.amqp.UnsignedLong; +import org.apache.qpid.proton.amqp.messaging.*; +import org.apache.qpid.proton.amqp.transport.DeliveryState; +import org.apache.qpid.proton.amqp.transport.ErrorCondition; +import org.apache.qpid.proton.amqp.transport.SenderSettleMode; +import org.apache.qpid.proton.engine.*; +import org.apache.qpid.proton.engine.impl.DeliveryImpl; +import org.apache.qpid.proton.message.Message; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.BufferOverflowException; +import java.security.InvalidKeyException; +import java.security.NoSuchAlgorithmException; +import java.time.Duration; +import java.time.Instant; +import java.time.ZonedDateTime; +import java.util.*; +import java.util.concurrent.CancellationException; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.RejectedExecutionException; +import java.util.function.BiConsumer; +import java.util.function.Consumer; + +/** + * Abstracts all amqp related details + * translates event-driven reactor model into async send Api + */ +public final class MessageSender extends ClientEntity implements AmqpSender, ErrorContextProvider { + private static final Logger TRACE_LOGGER = LoggerFactory.getLogger(MessageSender.class); + private static final String SEND_TIMED_OUT = "Send operation timed out"; + // TestHooks for code injection + private static volatile Consumer onOpenRetry = null; + private final MessagingFactory underlyingFactory; + private final String sendPath; + private final Duration operationTimeout; + private final RetryPolicy retryPolicy; + private final CompletableFuture linkClose; + private final Object pendingSendLock; + private final ConcurrentHashMap> pendingSendsData; + private final PriorityQueue pendingSends; + private final DispatchHandler sendWork; + private final ActiveClientTokenManager activeClientTokenManager; + private final String tokenAudience; + private final Object errorConditionLock; + private final Timer timer; + private volatile int maxMessageSize; + private volatile Sender sendLink; + private volatile CompletableFuture linkFirstOpen; + private volatile TimeoutTracker openLinkTracker; + private volatile boolean creatingLink; + private volatile CompletableFuture closeTimer; + private volatile CompletableFuture openTimer; + private Exception lastKnownLinkError; + private Instant lastKnownErrorReportedAt; + private String linkCreationTime; + + private MessageSender(final MessagingFactory factory, final String sendLinkName, final String senderPath) { + super(sendLinkName, factory, factory.executor); + + this.sendPath = senderPath; + this.underlyingFactory = factory; + this.operationTimeout = factory.getOperationTimeout(); + this.timer = new Timer(factory); + this.lastKnownLinkError = null; + this.lastKnownErrorReportedAt = Instant.EPOCH; + this.retryPolicy = factory.getRetryPolicy(); + this.maxMessageSize = ClientConstants.MAX_MESSAGE_LENGTH_BYTES; + this.errorConditionLock = new Object(); + this.pendingSendLock = new Object(); + this.pendingSendsData = new ConcurrentHashMap<>(); + this.pendingSends = new PriorityQueue<>(1000, new DeliveryTagComparator()); + this.linkClose = new CompletableFuture<>(); + this.linkFirstOpen = new CompletableFuture<>(); + this.openLinkTracker = TimeoutTracker.create(factory.getOperationTimeout()); + this.sendWork = new DispatchHandler() { + @Override + public void onEvent() { + MessageSender.this.processSendWork(); + } + }; + this.tokenAudience = String.format(ClientConstants.TOKEN_AUDIENCE_FORMAT, underlyingFactory.getHostName(), sendPath); + this.activeClientTokenManager = new ActiveClientTokenManager( + this, + new Runnable() { + @Override + public void run() { + try { + underlyingFactory.getCBSChannel().sendToken( + underlyingFactory.getReactorDispatcher(), + underlyingFactory.getTokenProvider().getToken(tokenAudience, ClientConstants.TOKEN_VALIDITY), + tokenAudience, + new OperationResult() { + @Override + public void onComplete(Void result) { + if (TRACE_LOGGER.isDebugEnabled()) { + TRACE_LOGGER.debug(String.format(Locale.US, + "clientId[%s], path[%s], linkName[%s] - token renewed", + getClientId(), sendPath, sendLink.getName())); + } + } + + @Override + public void onError(Exception error) { + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info(String.format(Locale.US, + "clientId[%s], path[%s], linkName[%s] - tokenRenewalFailure[%s]", + getClientId(), sendPath, sendLink.getName(), error.getMessage())); + } + } + }); + } catch (IOException | NoSuchAlgorithmException | InvalidKeyException | RuntimeException exception) { + if (TRACE_LOGGER.isWarnEnabled()) { + TRACE_LOGGER.warn(String.format(Locale.US, + "clientId[%s], path[%s], linkName[%s] - tokenRenewalScheduleFailure[%s]", + getClientId(), sendPath, sendLink.getName(), exception.getMessage())); + } + } + } + }, + ClientConstants.TOKEN_REFRESH_INTERVAL, + this.underlyingFactory); + } + + public static CompletableFuture create( + final MessagingFactory factory, + final String sendLinkName, + final String senderPath) { + final MessageSender msgSender = new MessageSender(factory, sendLinkName, senderPath); + + try { + msgSender.underlyingFactory.scheduleOnReactorThread(new DispatchHandler() { + @Override + public void onEvent() { + msgSender.createSendLink(); + } + }); + } catch (IOException | RejectedExecutionException schedulerException) { + msgSender.linkFirstOpen.completeExceptionally(schedulerException); + } + + return msgSender.linkFirstOpen; + } + + public String getSendPath() { + return this.sendPath; + } + + public int getMaxMessageSize() { + return this.maxMessageSize; + } + + private CompletableFuture send(byte[] bytes, int arrayOffset, int messageFormat) { + return this.send(bytes, arrayOffset, messageFormat, null, null); + } + + private CompletableFuture sendCore( + final byte[] bytes, + final int arrayOffset, + final int messageFormat, + final CompletableFuture onSend, + final TimeoutTracker tracker, + final Exception lastKnownError, + final CompletableFuture timeoutTask) { + this.throwIfClosed(); + + final boolean isRetrySend = (onSend != null); + + final CompletableFuture onSendFuture = (onSend == null) ? new CompletableFuture<>() : onSend; + + final ReplayableWorkItem sendWaiterData = (tracker == null) ? + new ReplayableWorkItem<>(bytes, arrayOffset, messageFormat, onSendFuture, this.operationTimeout) : + new ReplayableWorkItem<>(bytes, arrayOffset, messageFormat, onSendFuture, tracker); + + final TimeoutTracker currentSendTracker = sendWaiterData.getTimeoutTracker(); + final String deliveryTag = UUID.randomUUID().toString().replace("-", StringUtil.EMPTY) + "_" + currentSendTracker.elapsed().getSeconds(); + + if (lastKnownError != null) { + sendWaiterData.setLastKnownException(lastKnownError); + } + + if (timeoutTask != null) + timeoutTask.cancel(false); + + final CompletableFuture timeoutTimerTask = this.timer.schedule( + new SendTimeout(deliveryTag, sendWaiterData), + currentSendTracker.remaining()); + + // if the timeoutTask completed with scheduling error - notify sender + if (timeoutTimerTask.isCompletedExceptionally()) { + timeoutTimerTask.handleAsync( + (unUsed, exception) -> { + if (exception != null && !(exception instanceof CancellationException)) + onSendFuture.completeExceptionally( + new OperationCancelledException(String.format(Locale.US, + "Entity(%s): send failed while dispatching to Reactor, see cause for more details.", + this.sendPath), exception)); + + return null; + }, this.executor); + + return onSendFuture; + } + + sendWaiterData.setTimeoutTask(timeoutTimerTask); + + synchronized (this.pendingSendLock) { + this.pendingSendsData.put(deliveryTag, sendWaiterData); + this.pendingSends.offer(new WeightedDeliveryTag(deliveryTag, isRetrySend ? 1 : 0)); + } + + try { + this.underlyingFactory.scheduleOnReactorThread(this.sendWork); + } catch (IOException | RejectedExecutionException schedulerException) { + onSendFuture.completeExceptionally( + new OperationCancelledException(String.format(Locale.US, + "Entity(%s): send failed while dispatching to Reactor, see cause for more details.", + this.sendPath), schedulerException)); + } + + return onSendFuture; + } + + private CompletableFuture send( + final byte[] bytes, + final int arrayOffset, + final int messageFormat, + final CompletableFuture onSend, + final TimeoutTracker tracker) { + return this.sendCore(bytes, arrayOffset, messageFormat, onSend, tracker, null, null); + } + + public CompletableFuture send(final Iterable messages) { + if (messages == null || IteratorUtil.sizeEquals(messages, 0)) { + throw new IllegalArgumentException(String.format(Locale.US, + "Entity[%s}: sending Empty batch of messages is not allowed.", this.sendPath)); + } + + final Message firstMessage = messages.iterator().next(); + if (IteratorUtil.sizeEquals(messages, 1)) { + return this.send(firstMessage); + } + + // proton-j doesn't support multiple dataSections to be part of AmqpMessage + // here's the alternate approach provided by them: https://github.com/apache/qpid-proton/pull/54 + final Message batchMessage = Proton.message(); + batchMessage.setMessageAnnotations(firstMessage.getMessageAnnotations()); + + final int maxMessageSizeTemp = this.maxMessageSize; + + final byte[] bytes = new byte[maxMessageSizeTemp]; + int encodedSize = batchMessage.encode(bytes, 0, maxMessageSizeTemp); + int byteArrayOffset = encodedSize; + + for (final Message amqpMessage : messages) { + final Message messageWrappedByData = Proton.message(); + + int payloadSize = AmqpUtil.getDataSerializedSize(amqpMessage); + int allocationSize = Math.min(payloadSize + ClientConstants.MAX_EVENTHUB_AMQP_HEADER_SIZE_BYTES, maxMessageSizeTemp); + + byte[] messageBytes = new byte[allocationSize]; + int messageSizeBytes = amqpMessage.encode(messageBytes, 0, allocationSize); + messageWrappedByData.setBody(new Data(new Binary(messageBytes, 0, messageSizeBytes))); + + try { + encodedSize = messageWrappedByData.encode(bytes, byteArrayOffset, maxMessageSizeTemp - byteArrayOffset - 1); + } catch (BufferOverflowException exception) { + final CompletableFuture sendTask = new CompletableFuture<>(); + sendTask.completeExceptionally(new PayloadSizeExceededException(String.format(Locale.US, + "Entity(%s): size of the payload exceeded Maximum message size: %s kb", + this.sendPath, maxMessageSizeTemp / 1024), exception)); + return sendTask; + } + + byteArrayOffset = byteArrayOffset + encodedSize; + } + + return this.send(bytes, byteArrayOffset, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT); + } + + public CompletableFuture send(Message msg) { + int payloadSize = AmqpUtil.getDataSerializedSize(msg); + + final int maxMessageSizeTemp = this.maxMessageSize; + int allocationSize = Math.min(payloadSize + ClientConstants.MAX_EVENTHUB_AMQP_HEADER_SIZE_BYTES, maxMessageSizeTemp); + + final byte[] bytes = new byte[allocationSize]; + int encodedSize = 0; + try { + encodedSize = msg.encode(bytes, 0, allocationSize); + } catch (BufferOverflowException exception) { + final CompletableFuture sendTask = new CompletableFuture(); + sendTask.completeExceptionally(new PayloadSizeExceededException(String.format(Locale.US, + "Entity(%s): size of the payload exceeded Maximum message size: %s kb", + this.sendPath, maxMessageSizeTemp / 1024), exception)); + return sendTask; + } + + return this.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT); + } + + @Override + public void onOpenComplete(Exception completionException) { + this.creatingLink = false; + + if (completionException == null) { + if (this.getIsClosingOrClosed()) { + this.sendLink.close(); + return; + } + + synchronized (this.errorConditionLock) { + this.lastKnownLinkError = null; + } + + this.retryPolicy.resetRetryCount(this.getClientId()); + + final UnsignedLong remoteMaxMessageSize = this.sendLink.getRemoteMaxMessageSize(); + if (remoteMaxMessageSize != null) { + this.maxMessageSize = remoteMaxMessageSize.intValue(); + } + + this.cancelOpenTimer(); + + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info(String.format("onOpenComplete - clientId[%s], sendPath[%s], linkName[%s]", + this.getClientId(), this.sendPath, this.sendLink.getName())); + } + + if (!this.linkFirstOpen.isDone()) { + this.linkFirstOpen.complete(this); + } else { + synchronized (this.pendingSendLock) { + if (!this.pendingSendsData.isEmpty()) { + final List unacknowledgedSends = new LinkedList<>(); + unacknowledgedSends.addAll(this.pendingSendsData.keySet()); + + if (unacknowledgedSends.size() > 0) { + final Iterator reverseReader = unacknowledgedSends.iterator(); + while (reverseReader.hasNext()) { + final String unacknowledgedSend = reverseReader.next(); + if (this.pendingSendsData.get(unacknowledgedSend).isWaitingForAck()) { + this.pendingSends.offer(new WeightedDeliveryTag(unacknowledgedSend, 1)); + } + } + } + + unacknowledgedSends.clear(); + } + } + } + } else { + if (!this.linkFirstOpen.isDone()) { + final Duration nextRetryInterval = this.retryPolicy.getNextRetryInterval( + this.getClientId(), completionException, this.openLinkTracker.remaining()); + + if (nextRetryInterval != null) { + if (onOpenRetry != null) { + onOpenRetry.accept(this); + } + + try { + this.underlyingFactory.scheduleOnReactorThread((int) nextRetryInterval.toMillis(), new DispatchHandler() { + @Override + public void onEvent() { + if (!MessageSender.this.getIsClosingOrClosed() + && (sendLink == null || sendLink.getLocalState() == EndpointState.CLOSED || sendLink.getRemoteState() == EndpointState.CLOSED)) { + recreateSendLink(); + } + } + }); + } catch (IOException | RejectedExecutionException schedulerException) { + if (TRACE_LOGGER.isWarnEnabled()) { + TRACE_LOGGER.warn( + String.format(Locale.US, "clientId[%s], senderPath[%s], scheduling createLink encountered error: %s", + this.getClientId(), this.sendPath, schedulerException.getLocalizedMessage())); + } + + this.cancelOpen(schedulerException); + } + } else if (completionException instanceof EventHubException + && !((EventHubException) completionException).getIsTransient()) { + this.cancelOpen(completionException); + } + } else { + this.cancelOpenTimer(); + } + } + } + + private void cancelOpen(final Exception completionException) { + this.setClosed(); + ExceptionUtil.completeExceptionally(this.linkFirstOpen, completionException, this); + this.cancelOpenTimer(); + } + + private void cancelOpenTimer() { + if (this.openTimer != null && !this.openTimer.isCancelled()) { + this.openTimer.cancel(false); + } + } + + @Override + public void onClose(final ErrorCondition condition) { + if (this.sendLink != null) { + this.underlyingFactory.deregisterForConnectionError(this.sendLink); + } + + final Exception completionException = (condition != null && condition.getCondition() != null) ? ExceptionUtil.toException(condition) : null; + this.onError(completionException); + } + + @Override + public void onError(final Exception completionException) { + if (this.getIsClosingOrClosed()) { + if (this.closeTimer != null && !this.closeTimer.isDone()) + this.closeTimer.cancel(false); + + synchronized (this.pendingSendLock) { + for (Map.Entry> pendingSend : this.pendingSendsData.entrySet()) { + ExceptionUtil.completeExceptionally(pendingSend.getValue().getWork(), + completionException == null + ? new OperationCancelledException(String.format(Locale.US, + "Entity(%s): send cancelled as the Sender instance is Closed before the sendOperation completed.", + this.sendPath)) + : completionException, + this); + } + + this.pendingSendsData.clear(); + this.pendingSends.clear(); + } + + this.linkClose.complete(null); + + return; + } else { + synchronized (this.errorConditionLock) { + this.lastKnownLinkError = completionException == null ? this.lastKnownLinkError : completionException; + this.lastKnownErrorReportedAt = Instant.now(); + } + + final Exception finalCompletionException = completionException == null + ? new EventHubException(true, String.format(Locale.US, + "Entity(%s): client encountered transient error for unknown reasons, please retry the operation.", + this.sendPath)) : completionException; + + this.onOpenComplete(finalCompletionException); + + final Map.Entry> pendingSendEntry = IteratorUtil.getFirst(this.pendingSendsData.entrySet()); + if (pendingSendEntry != null && pendingSendEntry.getValue() != null) { + final TimeoutTracker tracker = pendingSendEntry.getValue().getTimeoutTracker(); + if (tracker != null) { + final Duration nextRetryInterval = this.retryPolicy.getNextRetryInterval(this.getClientId(), finalCompletionException, tracker.remaining()); + boolean scheduledRecreate = true; + + if (nextRetryInterval != null) { + try { + this.underlyingFactory.scheduleOnReactorThread((int) nextRetryInterval.toMillis(), new DispatchHandler() { + @Override + public void onEvent() { + if (!MessageSender.this.getIsClosingOrClosed() + && (sendLink.getLocalState() == EndpointState.CLOSED || sendLink.getRemoteState() == EndpointState.CLOSED)) { + recreateSendLink(); + } + } + }); + } catch (IOException | RejectedExecutionException ignore) { + scheduledRecreate = false; + } + } + + if (nextRetryInterval == null || !scheduledRecreate) { + synchronized (this.pendingSendLock) { + for (Map.Entry> pendingSend : this.pendingSendsData.entrySet()) { + this.cleanupFailedSend(pendingSend.getValue(), finalCompletionException); + } + + this.pendingSendsData.clear(); + this.pendingSends.clear(); + } + } + } + } + } + } + + @Override + public void onSendComplete(final Delivery delivery) { + final DeliveryState outcome = delivery.getRemoteState(); + final String deliveryTag = new String(delivery.getTag()); + + if (TRACE_LOGGER.isTraceEnabled()) + TRACE_LOGGER.trace( + String.format( + Locale.US, + "clientId[%s], path[%s], linkName[%s], deliveryTag[%s]", + this.getClientId(), this.sendPath, this.sendLink.getName(), deliveryTag)); + + final ReplayableWorkItem pendingSendWorkItem = this.pendingSendsData.remove(deliveryTag); + + if (pendingSendWorkItem != null) { + if (outcome instanceof Accepted) { + synchronized (this.errorConditionLock) { + this.lastKnownLinkError = null; + } + + this.retryPolicy.resetRetryCount(this.getClientId()); + + pendingSendWorkItem.getTimeoutTask().cancel(false); + pendingSendWorkItem.clearMessage(); + pendingSendWorkItem.getWork().complete(null); + } else if (outcome instanceof Rejected) { + final Rejected rejected = (Rejected) outcome; + final ErrorCondition error = rejected.getError(); + + final Exception exception = ExceptionUtil.toException(error); + + if (ExceptionUtil.isGeneralSendError(error.getCondition())) { + synchronized (this.errorConditionLock) { + this.lastKnownLinkError = exception; + this.lastKnownErrorReportedAt = Instant.now(); + } + } + + final Duration retryInterval = this.retryPolicy.getNextRetryInterval( + this.getClientId(), exception, pendingSendWorkItem.getTimeoutTracker().remaining()); + if (retryInterval == null) { + this.cleanupFailedSend(pendingSendWorkItem, exception); + } else { + pendingSendWorkItem.setLastKnownException(exception); + try { + this.underlyingFactory.scheduleOnReactorThread((int) retryInterval.toMillis(), + new DispatchHandler() { + @Override + public void onEvent() { + MessageSender.this.sendCore( + pendingSendWorkItem.getMessage(), + pendingSendWorkItem.getEncodedMessageSize(), + pendingSendWorkItem.getMessageFormat(), + pendingSendWorkItem.getWork(), + pendingSendWorkItem.getTimeoutTracker(), + pendingSendWorkItem.getLastKnownException(), + pendingSendWorkItem.getTimeoutTask()); + } + }); + } catch (IOException | RejectedExecutionException schedulerException) { + exception.initCause(schedulerException); + this.cleanupFailedSend( + pendingSendWorkItem, + new EventHubException(false, String.format(Locale.US, + "Entity(%s): send operation failed while scheduling a retry on Reactor, see cause for more details.", + this.sendPath), + schedulerException)); + } + } + } else if (outcome instanceof Released) { + this.cleanupFailedSend(pendingSendWorkItem, new OperationCancelledException(outcome.toString())); + } else { + this.cleanupFailedSend(pendingSendWorkItem, new EventHubException(false, outcome.toString())); + } + } else { + if (TRACE_LOGGER.isDebugEnabled()) + TRACE_LOGGER.debug( + String.format(Locale.US, "clientId[%s]. path[%s], linkName[%s], delivery[%s] - mismatch (or send timed out)", + this.getClientId(), this.sendPath, this.sendLink.getName(), deliveryTag)); + } + } + + private void cleanupFailedSend(final ReplayableWorkItem failedSend, final Exception exception) { + if (failedSend.getTimeoutTask() != null) + failedSend.getTimeoutTask().cancel(false); + + ExceptionUtil.completeExceptionally(failedSend.getWork(), exception, this); + } + + private void createSendLink() { + synchronized (this.errorConditionLock) { + if (this.creatingLink) { + return; + } + + this.creatingLink = true; + } + + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info( + String.format(Locale.US, + "clientId[%s], path[%s], operationTimeout[%s], creating a send link", + this.getClientId(), this.sendPath, this.operationTimeout)); + } + + this.linkCreationTime = Instant.now().toString(); + + this.scheduleLinkOpenTimeout(TimeoutTracker.create(this.operationTimeout)); + + final Consumer onSessionOpen = new Consumer() { + @Override + public void accept(Session session) { + if (MessageSender.this.getIsClosingOrClosed()) { + + session.close(); + return; + } + + final Sender sender = session.sender(TrackingUtil.getLinkName(session)); + final Target target = new Target(); + target.setAddress(sendPath); + sender.setTarget(target); + + final Source source = new Source(); + sender.setSource(source); + + sender.setSenderSettleMode(SenderSettleMode.UNSETTLED); + + final SendLinkHandler handler = new SendLinkHandler(MessageSender.this); + BaseHandler.setHandler(sender, handler); + + if (MessageSender.this.sendLink != null) { + MessageSender.this.underlyingFactory.deregisterForConnectionError(MessageSender.this.sendLink); + } + + MessageSender.this.underlyingFactory.registerForConnectionError(sender); + sender.open(); + + synchronized (MessageSender.this.errorConditionLock) { + MessageSender.this.sendLink = sender; + } + } + }; + + final BiConsumer onSessionOpenError = new BiConsumer() { + @Override + public void accept(ErrorCondition t, Exception u) { + if (t != null) + MessageSender.this.onError((t != null && t.getCondition() != null) ? ExceptionUtil.toException(t) : null); + else if (u != null) + MessageSender.this.onError(u); + } + }; + + try { + this.underlyingFactory.getCBSChannel().sendToken( + this.underlyingFactory.getReactorDispatcher(), + this.underlyingFactory.getTokenProvider().getToken(tokenAudience, ClientConstants.TOKEN_VALIDITY), + tokenAudience, + new OperationResult() { + @Override + public void onComplete(Void result) { + if (MessageSender.this.getIsClosingOrClosed()) + return; + + underlyingFactory.getSession( + sendPath, + onSessionOpen, + onSessionOpenError); + } + + @Override + public void onError(Exception error) { + final Exception completionException; + if (error != null && error instanceof AmqpException) { + completionException = ExceptionUtil.toException(((AmqpException) error).getError()); + if (completionException != error && completionException.getCause() == null) { + completionException.initCause(error); + } + } else { + completionException = error; + } + + MessageSender.this.onError(completionException); + } + }); + } catch (IOException | NoSuchAlgorithmException | InvalidKeyException | RuntimeException exception) { + MessageSender.this.onError(exception); + } + } + + private void scheduleLinkOpenTimeout(TimeoutTracker timeout) { + // timer to signal a timeout if exceeds the operationTimeout on MessagingFactory + this.openTimer = this.timer.schedule( + new Runnable() { + public void run() { + creatingLink = false; + + if (!MessageSender.this.linkFirstOpen.isDone()) { + final Exception lastReportedError; + final Sender link; + synchronized (MessageSender.this.errorConditionLock) { + lastReportedError = MessageSender.this.lastKnownLinkError; + link = MessageSender.this.sendLink; + } + + final Exception operationTimedout = new TimeoutException( + String.format(Locale.US, "Open operation on entity(%s) timed out at %s.", + MessageSender.this.getSendPath(), ZonedDateTime.now().toString()), + lastReportedError); + + if (TRACE_LOGGER.isWarnEnabled()) { + TRACE_LOGGER.warn( + String.format(Locale.US, "clientId[%s], path[%s], open call timed out", + MessageSender.this.getClientId(), MessageSender.this.sendPath), + operationTimedout); + } + + ExceptionUtil.completeExceptionally(MessageSender.this.linkFirstOpen, operationTimedout, MessageSender.this); + setClosed(); + } + } + } + , timeout.remaining()); + + this.openTimer.handleAsync( + (unUsed, exception) -> { + if (exception != null + && exception instanceof Exception + && !(exception instanceof CancellationException)) { + ExceptionUtil.completeExceptionally(linkFirstOpen, (Exception) exception, this); + } + + return null; + }, this.executor); + } + + @Override + public ErrorContext getContext() { + final Sender link; + synchronized (this.errorConditionLock) { + link = this.sendLink; + } + + final boolean isLinkOpened = this.linkFirstOpen != null && this.linkFirstOpen.isDone(); + final String referenceId = link != null && link.getRemoteProperties() != null && link.getRemoteProperties().containsKey(ClientConstants.TRACKING_ID_PROPERTY) + ? link.getRemoteProperties().get(ClientConstants.TRACKING_ID_PROPERTY).toString() + : ((link != null) ? link.getName() : null); + + final SenderContext errorContext = new SenderContext( + this.underlyingFactory != null ? this.underlyingFactory.getHostName() : null, + this.sendPath, + referenceId, + isLinkOpened && link != null ? link.getCredit() : null); + return errorContext; + } + + @Override + public void onFlow(final int creditIssued) { + synchronized (this.errorConditionLock) { + this.lastKnownLinkError = null; + } + + if (creditIssued <= 0) + return; + + if (TRACE_LOGGER.isDebugEnabled()) { + int numberOfSendsWaitingforCredit = this.pendingSends.size(); + TRACE_LOGGER.debug(String.format(Locale.US, + "clientId[%s], path[%s], linkName[%s], remoteLinkCredit[%s], pendingSendsWaitingForCredit[%s], pendingSendsWaitingDelivery[%s]", + this.getClientId(), this.sendPath, this.sendLink.getName(), creditIssued, numberOfSendsWaitingforCredit, this.pendingSendsData.size() - numberOfSendsWaitingforCredit)); + } + + this.sendWork.onEvent(); + } + + private void recreateSendLink() { + this.createSendLink(); + this.retryPolicy.incrementRetryCount(this.getClientId()); + } + + // actual send on the SenderLink should happen only in this method & should run on Reactor Thread + private void processSendWork() { + if (this.sendLink.getLocalState() == EndpointState.CLOSED || this.sendLink.getRemoteState() == EndpointState.CLOSED) { + if (!this.getIsClosingOrClosed()) + this.recreateSendLink(); + + return; + } + + while (this.sendLink.getLocalState() == EndpointState.ACTIVE && this.sendLink.getRemoteState() == EndpointState.ACTIVE + && this.sendLink.getCredit() > 0) { + final WeightedDeliveryTag weightedDelivery; + final ReplayableWorkItem sendData; + final String deliveryTag; + synchronized (this.pendingSendLock) { + weightedDelivery = this.pendingSends.poll(); + if (weightedDelivery != null) { + deliveryTag = weightedDelivery.getDeliveryTag(); + sendData = this.pendingSendsData.get(deliveryTag); + } else { + sendData = null; + deliveryTag = null; + } + } + + if (sendData != null) { + if (sendData.getWork() != null && sendData.getWork().isDone()) { + // CoreSend could enque Sends into PendingSends Queue and can fail the SendCompletableFuture + // (when It fails to schedule the ProcessSendWork on reactor Thread) + this.pendingSendsData.remove(deliveryTag); + continue; + } + + Delivery delivery = null; + boolean linkAdvance = false; + int sentMsgSize = 0; + Exception sendException = null; + + try { + delivery = this.sendLink.delivery(deliveryTag.getBytes()); + delivery.setMessageFormat(sendData.getMessageFormat()); + + sentMsgSize = this.sendLink.send(sendData.getMessage(), 0, sendData.getEncodedMessageSize()); + assert sentMsgSize == sendData.getEncodedMessageSize() : "Contract of the ProtonJ library for Sender.Send API changed"; + + linkAdvance = this.sendLink.advance(); + } catch (Exception exception) { + sendException = exception; + } + + if (linkAdvance) { + sendData.setWaitingForAck(); + } else { + if (TRACE_LOGGER.isDebugEnabled()) { + TRACE_LOGGER.debug( + String.format(Locale.US, "clientId[%s], path[%s], linkName[%s], deliveryTag[%s], sentMessageSize[%s], payloadActualSize[%s] - sendlink advance failed", + this.getClientId(), this.sendPath, this.sendLink.getName(), deliveryTag, sentMsgSize, sendData.getEncodedMessageSize())); + } + + if (delivery != null) { + delivery.free(); + } + + sendData.getWork().completeExceptionally(sendException != null + ? new OperationCancelledException(String.format(Locale.US, + "Entity(%s): send operation failed. Please see cause for more details", this.sendPath), sendException) + : new OperationCancelledException( + String.format(Locale.US, "Entity(%s): send operation failed while advancing delivery(tag: %s).", this.sendPath, deliveryTag))); + } + } else { + if (deliveryTag != null) { + if (TRACE_LOGGER.isDebugEnabled()) { + TRACE_LOGGER.debug( + String.format(Locale.US, "clientId[%s], path[%s], linkName[%s], deliveryTag[%s] - sendData not found for this delivery.", + this.getClientId(), this.sendPath, this.sendLink.getName(), deliveryTag)); + } + } + + break; + } + } + } + + private void throwSenderTimeout(final CompletableFuture pendingSendWork, final Exception lastKnownException) { + + Exception cause = lastKnownException; + if (lastKnownException == null) { + final Exception lastReportedLinkLevelError; + final Instant lastLinkErrorReportedAt; + synchronized (this.errorConditionLock) { + lastReportedLinkLevelError = this.lastKnownLinkError; + lastLinkErrorReportedAt = this.lastKnownErrorReportedAt; + } + + if (lastReportedLinkLevelError != null) { + boolean isServerBusy = ((lastReportedLinkLevelError instanceof ServerBusyException) + && (lastLinkErrorReportedAt.isAfter(Instant.now().minusSeconds(ClientConstants.SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS)))); + cause = isServerBusy || (lastLinkErrorReportedAt.isAfter(Instant.now().minusMillis(this.operationTimeout.toMillis()))) + ? lastReportedLinkLevelError + : null; + } + } + + final boolean isClientSideTimeout = (cause == null || !(cause instanceof EventHubException)); + final EventHubException exception = isClientSideTimeout + ? new TimeoutException(String.format(Locale.US, "Entity(%s): %s at %s.", + this.sendPath, MessageSender.SEND_TIMED_OUT, ZonedDateTime.now()), cause) + : (EventHubException) cause; + + ExceptionUtil.completeExceptionally(pendingSendWork, exception, this); + } + + private void scheduleLinkCloseTimeout(final TimeoutTracker timeout) { + // timer to signal a timeout if exceeds the operationTimeout on MessagingFactory + this.closeTimer = this.timer.schedule( + new Runnable() { + public void run() { + if (!linkClose.isDone()) { + final Sender link; + synchronized (MessageSender.this.errorConditionLock) { + link = MessageSender.this.sendLink; + } + + final Exception operationTimedout = new TimeoutException(String.format(Locale.US, + "Entity(%s): close operation timed out at %s", MessageSender.this.sendPath, ZonedDateTime.now())); + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info( + String.format(Locale.US, "clientId[%s], message sender(linkName: %s, path: %s) close call timed out", + MessageSender.this.getClientId(), link.getName(), MessageSender.this.sendPath), + operationTimedout); + } + + ExceptionUtil.completeExceptionally(linkClose, operationTimedout, MessageSender.this); + MessageSender.this.onError((Exception) null); + } + } + } + , timeout.remaining()); + + this.closeTimer.handleAsync( + (unUsed, exception) -> { + if (exception != null && exception instanceof Exception && !(exception instanceof CancellationException)) { + ExceptionUtil.completeExceptionally(linkClose, (Exception) exception, MessageSender.this); + } + + return null; + }, this.executor); + } + + @Override + protected CompletableFuture onClose() { + if (!this.getIsClosed()) { + try { + this.activeClientTokenManager.cancel(); + scheduleLinkCloseTimeout(TimeoutTracker.create(operationTimeout)); + this.underlyingFactory.scheduleOnReactorThread(new DispatchHandler() { + @Override + public void onEvent() { + if (sendLink != null && sendLink.getLocalState() != EndpointState.CLOSED) { + sendLink.close(); + } else if (sendLink == null || sendLink.getRemoteState() == EndpointState.CLOSED) { + if (closeTimer != null && !closeTimer.isCancelled()) { + closeTimer.cancel(false); + } + + linkClose.complete(null); + } + } + }); + + } catch (IOException | RejectedExecutionException schedulerException) { + this.linkClose.completeExceptionally(schedulerException); + } + } + + return this.linkClose; + } + + @Override + protected Exception getLastKnownError() { + synchronized (this.errorConditionLock) { + return this.lastKnownLinkError; + } + } + + private static class WeightedDeliveryTag { + private final String deliveryTag; + private final int priority; + + WeightedDeliveryTag(final String deliveryTag, final int priority) { + this.deliveryTag = deliveryTag; + this.priority = priority; + } + + public String getDeliveryTag() { + return this.deliveryTag; + } + + public int getPriority() { + return this.priority; + } + } + + private static class DeliveryTagComparator implements Comparator { + @Override + public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) { + return deliveryTag1.getPriority() - deliveryTag0.getPriority(); + } + } + + private class SendTimeout implements Runnable { + private final String deliveryTag; + private final ReplayableWorkItem sendWaiterData; + + public SendTimeout( + final String deliveryTag, + final ReplayableWorkItem sendWaiterData) { + this.sendWaiterData = sendWaiterData; + this.deliveryTag = deliveryTag; + } + + @Override + public void run() { + if (!sendWaiterData.getWork().isDone()) { + MessageSender.this.pendingSendsData.remove(deliveryTag); + MessageSender.this.throwSenderTimeout(sendWaiterData.getWork(), sendWaiterData.getLastKnownException()); + } + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/MessageWrapper.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/MessageWrapper.java new file mode 100644 index 0000000000000..f2388659bb5c5 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/MessageWrapper.java @@ -0,0 +1,26 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import com.microsoft.azure.eventhubs.EventPosition; +import org.apache.qpid.proton.message.Message; + +final class MessageWrapper { + private final Message message; + private final EventPosition eventPosition; + + MessageWrapper(Message message, EventPosition eventPosition) { + this.message = message; + this.eventPosition = eventPosition; + } + + Message getMessage() { + return this.message; + } + + EventPosition getEventPosition() { + return this.eventPosition; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/MessagingFactory.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/MessagingFactory.java new file mode 100644 index 0000000000000..24031a7c74ca9 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/MessagingFactory.java @@ -0,0 +1,610 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + + +import com.microsoft.azure.eventhubs.*; +import com.microsoft.azure.eventhubs.TimeoutException; +import org.apache.qpid.proton.amqp.transport.ErrorCondition; +import org.apache.qpid.proton.engine.*; +import org.apache.qpid.proton.reactor.Reactor; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.channels.UnresolvedAddressException; +import java.time.Duration; +import java.time.Instant; +import java.util.LinkedList; +import java.util.List; +import java.util.Locale; +import java.util.concurrent.*; +import java.util.function.BiConsumer; +import java.util.function.Consumer; + +/** + * Abstracts all amqp related details and exposes AmqpConnection object + * Manages connection life-cycle + */ +public final class MessagingFactory extends ClientEntity implements AmqpConnection, SessionProvider, SchedulerProvider { + public static final Duration DefaultOperationTimeout = Duration.ofSeconds(60); + + private static final Logger TRACE_LOGGER = LoggerFactory.getLogger(MessagingFactory.class); + private final String hostName; + private final CompletableFuture closeTask; + private final ConnectionHandler connectionHandler; + private final LinkedList registeredLinks; + private final Object reactorLock; + private final Object cbsChannelCreateLock; + private final Object mgmtChannelCreateLock; + private final SharedAccessSignatureTokenProvider tokenProvider; + private final ReactorFactory reactorFactory; + + private Reactor reactor; + private ReactorDispatcher reactorDispatcher; + private Connection connection; + private CBSChannel cbsChannel; + private ManagementChannel mgmtChannel; + private Duration operationTimeout; + private RetryPolicy retryPolicy; + private CompletableFuture open; + private CompletableFuture openTimer; + private CompletableFuture closeTimer; + private String reactorCreationTime; + + MessagingFactory(final ConnectionStringBuilder builder, + final RetryPolicy retryPolicy, + final ScheduledExecutorService executor, + final ReactorFactory reactorFactory) { + super("MessagingFactory".concat(StringUtil.getRandomString()), null, executor); + + this.hostName = builder.getEndpoint().getHost(); + this.reactorFactory = reactorFactory; + this.operationTimeout = builder.getOperationTimeout(); + this.retryPolicy = retryPolicy; + this.registeredLinks = new LinkedList<>(); + this.reactorLock = new Object(); + this.connectionHandler = ConnectionHandler.create(builder.getTransportType(), this); + this.cbsChannelCreateLock = new Object(); + this.mgmtChannelCreateLock = new Object(); + this.tokenProvider = builder.getSharedAccessSignature() == null + ? new SharedAccessSignatureTokenProvider(builder.getSasKeyName(), builder.getSasKey()) + : new SharedAccessSignatureTokenProvider(builder.getSharedAccessSignature()); + + this.closeTask = new CompletableFuture<>(); + } + + public static CompletableFuture createFromConnectionString(final String connectionString, final ScheduledExecutorService executor) throws IOException { + return createFromConnectionString(connectionString, RetryPolicy.getDefault(), executor); + } + + public static CompletableFuture createFromConnectionString( + final String connectionString, + final RetryPolicy retryPolicy, + final ScheduledExecutorService executor) throws IOException { + return createFromConnectionString(connectionString, retryPolicy, executor, new ReactorFactory()); + } + + public static CompletableFuture createFromConnectionString( + final String connectionString, + final RetryPolicy retryPolicy, + final ScheduledExecutorService executor, + final ReactorFactory reactorFactory) throws IOException { + final ConnectionStringBuilder builder = new ConnectionStringBuilder(connectionString); + final MessagingFactory messagingFactory = new MessagingFactory(builder, + (retryPolicy != null) ? retryPolicy : RetryPolicy.getDefault(), + executor, + reactorFactory); + + messagingFactory.createConnection(); + + final Timer timer = new Timer(messagingFactory); + messagingFactory.openTimer = timer.schedule( + new Runnable() { + @Override + public void run() { + if (!messagingFactory.open.isDone()) { + messagingFactory.open.completeExceptionally(new TimeoutException("Opening MessagingFactory timed out.")); + messagingFactory.getReactor().stop(); + } + } + }, + messagingFactory.getOperationTimeout()); + + // if scheduling messagingfactory openTimer fails - notify user and stop + messagingFactory.openTimer.handleAsync( + (unUsed, exception) -> { + if (exception != null && !(exception instanceof CancellationException)) { + messagingFactory.open.completeExceptionally(exception); + messagingFactory.getReactor().stop(); + } + + return null; + }, messagingFactory.executor); + + return messagingFactory.open; + } + + @Override + public String getHostName() { + return this.hostName; + } + + private Reactor getReactor() { + synchronized (this.reactorLock) { + return this.reactor; + } + } + + public ReactorDispatcher getReactorDispatcher() { + synchronized (this.reactorLock) { + return this.reactorDispatcher; + } + } + + public SharedAccessSignatureTokenProvider getTokenProvider() { + return this.tokenProvider; + } + + private void createConnection() throws IOException { + this.open = new CompletableFuture<>(); + this.startReactor(new ReactorHandlerWithConnection()); + } + + private void startReactor(final ReactorHandler reactorHandler) throws IOException { + final Reactor newReactor = this.reactorFactory.create(reactorHandler, this.connectionHandler.getMaxFrameSize()); + synchronized (this.reactorLock) { + this.reactor = newReactor; + this.reactorDispatcher = new ReactorDispatcher(newReactor); + reactorHandler.unsafeSetReactorDispatcher(this.reactorDispatcher); + } + + this.reactorCreationTime = Instant.now().toString(); + + executor.execute(new RunReactor(newReactor, executor)); + } + + public CBSChannel getCBSChannel() { + synchronized (this.cbsChannelCreateLock) { + if (this.cbsChannel == null) { + this.cbsChannel = new CBSChannel(this, this); + } + } + + return this.cbsChannel; + } + + public ManagementChannel getManagementChannel() { + synchronized (this.mgmtChannelCreateLock) { + if (this.mgmtChannel == null) { + this.mgmtChannel = new ManagementChannel(this, this); + } + } + + return this.mgmtChannel; + } + + @Override + public Session getSession(final String path, final Consumer onRemoteSessionOpen, final BiConsumer onRemoteSessionOpenError) { + if (this.getIsClosingOrClosed()) { + + onRemoteSessionOpenError.accept(null, new OperationCancelledException("underlying messagingFactory instance is closed")); + return null; + } + + if (this.connection == null || this.connection.getLocalState() == EndpointState.CLOSED || this.connection.getRemoteState() == EndpointState.CLOSED) { + this.connection = this.getReactor().connectionToHost( + this.connectionHandler.getRemoteHostName(), + this.connectionHandler.getRemotePort(), + this.connectionHandler); + } + + final Session session = this.connection.session(); + BaseHandler.setHandler(session, new SessionHandler(path, onRemoteSessionOpen, onRemoteSessionOpenError, this.operationTimeout)); + session.open(); + + return session; + } + + public Duration getOperationTimeout() { + return this.operationTimeout; + } + + public RetryPolicy getRetryPolicy() { + return this.retryPolicy; + } + + @Override + public void onOpenComplete(Exception exception) { + if (exception == null) { + this.open.complete(this); + + // if connection creation is in progress and then msgFactory.close call came thru + if (this.getIsClosingOrClosed()) + this.connection.close(); + } else { + this.open.completeExceptionally(exception); + } + + if (this.openTimer != null) + this.openTimer.cancel(false); + } + + @Override + public void onConnectionError(ErrorCondition error) { + if (TRACE_LOGGER.isWarnEnabled()) { + TRACE_LOGGER.warn(String.format(Locale.US, "onConnectionError: messagingFactory[%s], hostname[%s], error[%s]", + this.getClientId(), + this.hostName, + error != null ? error.getDescription() : "n/a")); + } + + if (!this.open.isDone()) { + if (TRACE_LOGGER.isWarnEnabled()) { + TRACE_LOGGER.warn(String.format(Locale.US, "onConnectionError: messagingFactory[%s], hostname[%s], open hasn't complete, stopping the reactor", + this.getClientId(), + this.hostName)); + } + + this.getReactor().stop(); + this.onOpenComplete(ExceptionUtil.toException(error)); + } else { + final Connection oldConnection = this.connection; + final List oldRegisteredLinksCopy = new LinkedList<>(this.registeredLinks); + final List closedLinks = new LinkedList<>(); + + for (Link link : oldRegisteredLinksCopy) { + if (link.getLocalState() != EndpointState.CLOSED && link.getRemoteState() != EndpointState.CLOSED) { + if (TRACE_LOGGER.isWarnEnabled()) { + TRACE_LOGGER.warn(String.format(Locale.US, "onConnectionError: messagingFactory[%s], hostname[%s], closing link [%s]", + this.getClientId(), + this.hostName, link.getName())); + } + + link.setCondition(error); + link.close(); + closedLinks.add(link); + } + } + + // if proton-j detects transport error - onConnectionError is invoked, but, the connection state is not set to closed + // in connection recreation we depend on currentConnection state to evaluate need for recreation + if (oldConnection.getLocalState() != EndpointState.CLOSED) { + if (TRACE_LOGGER.isWarnEnabled()) { + TRACE_LOGGER.warn(String.format(Locale.US, "onConnectionError: messagingFactory[%s], hostname[%s], closing current connection", + this.getClientId(), + this.hostName)); + } + + // this should ideally be done in Connectionhandler + // - but, since proton doesn't automatically emit close events + // for all child objects (links & sessions) we are doing it here + oldConnection.setCondition(error); + oldConnection.close(); + } + + for (Link link : closedLinks) { + final Handler handler = BaseHandler.getHandler(link); + if (handler != null && handler instanceof BaseLinkHandler) { + final BaseLinkHandler linkHandler = (BaseLinkHandler) handler; + linkHandler.processOnClose(link, error); + } + } + } + + if (this.getIsClosingOrClosed() && !this.closeTask.isDone()) { + this.getReactor().stop(); + } + } + + private void onReactorError(Exception cause) { + if (!this.open.isDone()) { + this.onOpenComplete(cause); + } else { + if (this.getIsClosingOrClosed()) { + return; + } + + TRACE_LOGGER.warn(String.format(Locale.US, "onReactorError messagingFactory[%s], hostName[%s], error[%s]", + this.getClientId(), this.getHostName(), + cause.getMessage())); + + final Connection oldConnection = this.connection; + final List oldRegisteredLinksCopy = new LinkedList<>(this.registeredLinks); + + try { + TRACE_LOGGER.info(String.format(Locale.US, "onReactorError messagingFactory[%s], hostName[%s], message[%s]", + this.getClientId(), this.getHostName(), + "starting new reactor")); + + this.startReactor(new ReactorHandlerWithConnection()); + } catch (IOException e) { + TRACE_LOGGER.error(String.format(Locale.US, "messagingFactory[%s], hostName[%s], error[%s]", + this.getClientId(), this.getHostName(), + ExceptionUtil.toStackTraceString(e, "Re-starting reactor failed with error"))); + + // TODO - stop retrying on the error after multiple attempts. + this.onReactorError(cause); + } + + // when the instance of the reactor itself faults - Connection and Links will not be cleaned up even after the + // below .close() calls (local closes). + // But, we still need to change the states of these to Closed - so that subsequent retries - will + // treat the links and connection as closed and re-establish them and continue running on new Reactor instance. + if (oldConnection.getLocalState() != EndpointState.CLOSED && oldConnection.getRemoteState() != EndpointState.CLOSED) { + oldConnection.close(); + } + + for (final Link link : oldRegisteredLinksCopy) { + if (link.getLocalState() != EndpointState.CLOSED && link.getRemoteState() != EndpointState.CLOSED) { + link.close(); + } + + final Handler handler = BaseHandler.getHandler(link); + if (handler != null && handler instanceof BaseLinkHandler) { + final BaseLinkHandler linkHandler = (BaseLinkHandler) handler; + linkHandler.processOnClose(link, cause); + } + } + } + } + + @Override + protected CompletableFuture onClose() { + if (!this.getIsClosed()) { + final Timer timer = new Timer(this); + this.closeTimer = timer.schedule(new Runnable() { + @Override + public void run() { + if (!closeTask.isDone()) { + closeTask.completeExceptionally(new TimeoutException("Closing MessagingFactory timed out.")); + getReactor().stop(); + } + } + }, + operationTimeout); + + if (this.closeTimer.isCompletedExceptionally()) { + this.closeTask.completeExceptionally(ExceptionUtil.getExceptionFromCompletedFuture(this.closeTimer)); + } else { + try { + this.scheduleOnReactorThread(new CloseWork()); + } catch (IOException | RejectedExecutionException schedulerException) { + this.closeTask.completeExceptionally(schedulerException); + } + } + } + + return this.closeTask; + } + + @Override + public void registerForConnectionError(Link link) { + this.registeredLinks.add(link); + } + + @Override + public void deregisterForConnectionError(Link link) { + this.registeredLinks.remove(link); + } + + public void scheduleOnReactorThread(final DispatchHandler handler) throws IOException, RejectedExecutionException { + this.getReactorDispatcher().invoke(handler); + } + + public void scheduleOnReactorThread(final int delay, final DispatchHandler handler) throws IOException, RejectedExecutionException { + this.getReactorDispatcher().invoke(delay, handler); + } + + public static class ReactorFactory { + + public Reactor create(final ReactorHandler reactorHandler, final int maxFrameSize) throws IOException { + return ProtonUtil.reactor(reactorHandler, maxFrameSize); + } + } + + private class CloseWork extends DispatchHandler { + @Override + public void onEvent() { + final ReactorDispatcher dispatcher = getReactorDispatcher(); + synchronized (cbsChannelCreateLock) { + if (cbsChannel != null) { + cbsChannel.close( + dispatcher, + new OperationResult() { + @Override + public void onComplete(Void result) { + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info( + String.format(Locale.US, "messagingFactory[%s], hostName[%s], info[%s]", + getClientId(), getHostName(), "cbsChannel closed")); + } + } + + @Override + public void onError(Exception error) { + if (TRACE_LOGGER.isWarnEnabled()) { + TRACE_LOGGER.warn(String.format(Locale.US, + "messagingFactory[%s], hostName[%s], cbsChannelCloseError[%s]", + getClientId(), getHostName(), error.getMessage())); + } + } + }); + } + } + + synchronized (mgmtChannelCreateLock) { + if (mgmtChannel != null) { + mgmtChannel.close( + dispatcher, + new OperationResult() { + @Override + public void onComplete(Void result) { + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info( + String.format(Locale.US, "messagingFactory[%s], hostName[%s], info[%s]", + getClientId(), getHostName(), "mgmtChannel closed")); + } + } + + @Override + public void onError(Exception error) { + + if (TRACE_LOGGER.isWarnEnabled()) { + TRACE_LOGGER.warn(String.format(Locale.US, + "messagingFactory[%s], hostName[%s], mgmtChannelCloseError[%s]", + getClientId(), getHostName(), error.getMessage())); + } + } + }); + } + } + + if (connection != null && connection.getRemoteState() != EndpointState.CLOSED && connection.getLocalState() != EndpointState.CLOSED) { + connection.close(); + } + } + } + + private class RunReactor implements Runnable { + final private Reactor rctr; + final private ScheduledExecutorService executor; + + volatile boolean hasStarted; + + public RunReactor(final Reactor reactor, final ScheduledExecutorService executor) { + this.rctr = reactor; + this.executor = executor; + this.hasStarted = false; + } + + public void run() { + boolean reScheduledReactor = false; + + try { + if (!this.hasStarted) { + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info(String.format(Locale.US, "messagingFactory[%s], hostName[%s], info[%s]", + getClientId(), getHostName(), "starting reactor instance.")); + } + + this.rctr.start(); + this.hasStarted = true; + } + + if (!Thread.interrupted() && this.rctr.process()) { + try { + this.executor.execute(this); + reScheduledReactor = true; + } catch (RejectedExecutionException exception) { + if (TRACE_LOGGER.isWarnEnabled()) { + TRACE_LOGGER.warn(String.format(Locale.US, "messagingFactory[%s], hostName[%s], error[%s]", + getClientId(), getHostName(), + ExceptionUtil.toStackTraceString(exception, "scheduling reactor failed because the executor has been shut down"))); + } + + this.rctr.attachments().set(RejectedExecutionException.class, RejectedExecutionException.class, exception); + } + + return; + } + + if (TRACE_LOGGER.isWarnEnabled()) { + TRACE_LOGGER.warn(String.format(Locale.US, "messagingFactory[%s], hostName[%s], message[%s]", + getClientId(), getHostName(), + "stopping the reactor because thread was interrupted or the reactor has no more events to process.")); + } + + this.rctr.stop(); + } catch (HandlerException handlerException) { + Throwable cause = handlerException.getCause(); + if (cause == null) { + cause = handlerException; + } + + if (TRACE_LOGGER.isWarnEnabled()) { + TRACE_LOGGER.warn(String.format(Locale.US, "messagingFactory[%s], hostName[%s], error[%s]", + getClientId(), getHostName(), ExceptionUtil.toStackTraceString(handlerException, + "Unhandled exception while processing events in reactor, report this error."))); + } + + final String message = !StringUtil.isNullOrEmpty(cause.getMessage()) ? + cause.getMessage() : + !StringUtil.isNullOrEmpty(handlerException.getMessage()) ? + handlerException.getMessage() : + "Reactor encountered unrecoverable error"; + + final EventHubException sbException; + + if (cause instanceof UnresolvedAddressException) { + sbException = new CommunicationException( + String.format(Locale.US, "%s. This is usually caused by incorrect hostname or network configuration. Check correctness of namespace information. %s", + message, ExceptionUtil.getTrackingIDAndTimeToLog()), + cause); + } else { + sbException = new EventHubException( + true, + String.format(Locale.US, "%s, %s", message, ExceptionUtil.getTrackingIDAndTimeToLog()), + cause); + } + + MessagingFactory.this.onReactorError(sbException); + } finally { + if (reScheduledReactor) { + return; + } + + if (getIsClosingOrClosed() && !closeTask.isDone()) { + this.rctr.free(); + closeTask.complete(null); + if (closeTimer != null) { + closeTimer.cancel(false); + } + } else { + scheduleCompletePendingTasks(); + } + } + } + + private void scheduleCompletePendingTasks() { + this.executor.schedule(new Runnable() { + @Override + public void run() { + if (TRACE_LOGGER.isWarnEnabled()) { + TRACE_LOGGER.warn(String.format(Locale.US, "messagingFactory[%s], hostName[%s], message[%s]", + getClientId(), getHostName(), + "Processing all pending tasks and closing old reactor.")); + } + + try { + rctr.stop(); + rctr.process(); + } catch (HandlerException e) { + if (TRACE_LOGGER.isWarnEnabled()) { + TRACE_LOGGER.warn(String.format(Locale.US, "messagingFactory[%s], hostName[%s], error[%s]", + getClientId(), getHostName(), ExceptionUtil.toStackTraceString(e, + "scheduleCompletePendingTasks - exception occurred while processing events."))); + } + } finally { + rctr.free(); + } + } + }, MessagingFactory.this.getOperationTimeout().getSeconds(), TimeUnit.SECONDS); + } + } + + private class ReactorHandlerWithConnection extends ReactorHandler { + @Override + public void onReactorInit(Event e) { + super.onReactorInit(e); + + final Reactor r = e.getReactor(); + connection = r.connectionToHost( + connectionHandler.getRemoteHostName(), + connectionHandler.getRemotePort(), + connectionHandler); + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/Operation.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/Operation.java new file mode 100644 index 0000000000000..2eee91a38426f --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/Operation.java @@ -0,0 +1,10 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +public interface Operation { + + void run(OperationResult operationCallback); +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/OperationResult.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/OperationResult.java new file mode 100644 index 0000000000000..100760a2791f4 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/OperationResult.java @@ -0,0 +1,13 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + + +public interface OperationResult { + + void onComplete(T result); + + void onError(E error); +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/PartitionReceiverImpl.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/PartitionReceiverImpl.java new file mode 100644 index 0000000000000..3e06503835f81 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/PartitionReceiverImpl.java @@ -0,0 +1,276 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import com.microsoft.azure.eventhubs.*; +import org.apache.qpid.proton.amqp.Symbol; +import org.apache.qpid.proton.amqp.UnknownDescribedType; +import org.apache.qpid.proton.amqp.messaging.DeliveryAnnotations; +import org.apache.qpid.proton.message.Message; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.time.Duration; +import java.util.*; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ScheduledExecutorService; +import java.util.function.Consumer; +import java.util.function.Function; + +final class PartitionReceiverImpl extends ClientEntity implements ReceiverSettingsProvider, PartitionReceiver { + private static final Logger TRACE_LOGGER = LoggerFactory.getLogger(PartitionReceiverImpl.class); + + private final String partitionId; + private final MessagingFactory underlyingFactory; + private final String eventHubName; + private final String consumerGroupName; + private final Object receiveHandlerLock; + private final EventPositionImpl eventPosition; + private final Long epoch; + private final boolean isEpochReceiver; + private final ReceiverOptions receiverOptions; + private final ReceiverRuntimeInformation runtimeInformation; + + private volatile MessageReceiver internalReceiver; + + private ReceivePump receivePump; + private EventPosition currentEventPosition; + + private PartitionReceiverImpl(MessagingFactory factory, + final String eventHubName, + final String consumerGroupName, + final String partitionId, + final EventPositionImpl eventPosition, + final Long epoch, + final boolean isEpochReceiver, + final ReceiverOptions receiverOptions, + final ScheduledExecutorService executor) { + super("PartitionReceiverImpl".concat(StringUtil.getRandomString()), null, executor); + + this.underlyingFactory = factory; + this.eventHubName = eventHubName; + this.consumerGroupName = consumerGroupName; + this.partitionId = partitionId; + this.eventPosition = eventPosition; + this.epoch = epoch; + this.isEpochReceiver = isEpochReceiver; + this.receiveHandlerLock = new Object(); + this.receiverOptions = receiverOptions; + this.runtimeInformation = (this.receiverOptions != null && this.receiverOptions.getReceiverRuntimeMetricEnabled()) + ? new ReceiverRuntimeInformation(partitionId) + : null; + this.currentEventPosition = EventPosition.fromStartOfStream(); + } + + static CompletableFuture create(MessagingFactory factory, + final String eventHubName, + final String consumerGroupName, + final String partitionId, + final EventPosition eventPosition, + final long epoch, + final boolean isEpochReceiver, + ReceiverOptions receiverOptions, + final ScheduledExecutorService executor) { + if (epoch < NULL_EPOCH) { + throw new IllegalArgumentException("epoch cannot be a negative value. Please specify a zero or positive long value."); + } + + if (StringUtil.isNullOrWhiteSpace(consumerGroupName)) { + throw new IllegalArgumentException("specify valid string for argument - 'consumerGroupName'"); + } + + if (receiverOptions == null) { + receiverOptions = new ReceiverOptions(); + } + + final PartitionReceiverImpl receiver = new PartitionReceiverImpl(factory, eventHubName, consumerGroupName, partitionId, (EventPositionImpl) eventPosition, epoch, isEpochReceiver, receiverOptions, executor); + return receiver.createInternalReceiver().thenApplyAsync(new Function() { + public PartitionReceiver apply(Void a) { + return receiver; + } + }, executor); + } + + private CompletableFuture createInternalReceiver() { + return MessageReceiver.create(this.underlyingFactory, + this.getClientId().concat("-InternalReceiver"), + String.format("%s/ConsumerGroups/%s/Partitions/%s", this.eventHubName, this.consumerGroupName, this.partitionId), + this.receiverOptions.getPrefetchCount(), this) + .thenAcceptAsync(new Consumer() { + public void accept(MessageReceiver r) { + PartitionReceiverImpl.this.internalReceiver = r; + } + }, this.executor); + } + + final EventPosition getStartingPosition() { + return this.eventPosition; + } + + public final String getPartitionId() { + return this.partitionId; + } + + public final Duration getReceiveTimeout() { + return this.internalReceiver.getReceiveTimeout(); + } + + public void setReceiveTimeout(Duration value) { + this.internalReceiver.setReceiveTimeout(value); + } + + public final long getEpoch() { + return this.epoch; + } + + public final ReceiverRuntimeInformation getRuntimeInformation() { + return this.runtimeInformation; + } + + public final EventPosition getEventPosition() { + return this.currentEventPosition; + } + + public CompletableFuture> receive(final int maxEventCount) { + return this.internalReceiver.receive(maxEventCount).thenApplyAsync(new Function, Iterable>() { + @Override + public Iterable apply(Collection amqpMessages) { + PassByRef lastMessageRef = null; + if (PartitionReceiverImpl.this.receiverOptions != null && PartitionReceiverImpl.this.receiverOptions.getReceiverRuntimeMetricEnabled()) + lastMessageRef = new PassByRef<>(); + + final Iterable events = EventDataUtil.toEventDataCollection(amqpMessages, lastMessageRef); + + if (lastMessageRef != null && lastMessageRef.get() != null) { + final DeliveryAnnotations deliveryAnnotations = lastMessageRef.get().getMessage().getDeliveryAnnotations(); + if (deliveryAnnotations != null && deliveryAnnotations.getValue() != null) { + final Map deliveryAnnotationsMap = deliveryAnnotations.getValue(); + PartitionReceiverImpl.this.runtimeInformation.setRuntimeInformation( + (long) deliveryAnnotationsMap.get(ClientConstants.LAST_ENQUEUED_SEQUENCE_NUMBER), + ((Date) deliveryAnnotationsMap.get(ClientConstants.LAST_ENQUEUED_TIME_UTC)).toInstant(), + (String) deliveryAnnotationsMap.get(ClientConstants.LAST_ENQUEUED_OFFSET)); + } + + PartitionReceiverImpl.this.currentEventPosition = lastMessageRef.get().getEventPosition(); + } + + return events; + } + }, this.executor); + } + + public CompletableFuture setReceiveHandler(final PartitionReceiveHandler receiveHandler) { + return this.setReceiveHandler(receiveHandler, false); + } + + public CompletableFuture setReceiveHandler(final PartitionReceiveHandler receiveHandler, final boolean invokeWhenNoEvents) { + synchronized (this.receiveHandlerLock) { + // user setting receiveHandler==null should stop the pump if its running + if (receiveHandler == null) { + if (this.receivePump != null && this.receivePump.isRunning()) { + return this.receivePump.stop(); + } + } else { + if (this.receivePump != null && this.receivePump.isRunning()) + throw new IllegalArgumentException( + "Unexpected value for parameter 'receiveHandler'. PartitionReceiver was already registered with a PartitionReceiveHandler instance. Only 1 instance can be registered."); + + this.receivePump = new ReceivePump( + this.eventHubName, + this.consumerGroupName, + new ReceivePump.IPartitionReceiver() { + @Override + public CompletableFuture> receive(int maxBatchSize) { + return PartitionReceiverImpl.this.receive(maxBatchSize); + } + + @Override + public String getPartitionId() { + return PartitionReceiverImpl.this.getPartitionId(); + } + }, + receiveHandler, + invokeWhenNoEvents, + this.executor); + + this.executor.execute(this.receivePump); + } + + return CompletableFuture.completedFuture(null); + } + } + + @Override + public CompletableFuture onClose() { + synchronized (this.receiveHandlerLock) { + if (this.receivePump != null && this.receivePump.isRunning()) { + // set the state of receivePump to StopEventRaised + // - but don't actually wait until the current user-code completes + // if user intends to stop everything - setReceiveHandler(null) should be invoked before close + this.receivePump.stop(); + } + } + + if (this.internalReceiver != null) { + return this.internalReceiver.close(); + } else { + return CompletableFuture.completedFuture(null); + } + } + + @Override + public Map getFilter(final Message lastReceivedMessage) { + String expression; + if (lastReceivedMessage != null) { + String lastReceivedOffset = lastReceivedMessage.getMessageAnnotations().getValue().get(AmqpConstants.OFFSET).toString(); + expression = String.format(AmqpConstants.AMQP_ANNOTATION_FORMAT, AmqpConstants.OFFSET_ANNOTATION_NAME, StringUtil.EMPTY, lastReceivedOffset); + } else { + expression = this.eventPosition.getExpression(); + } + + if (TRACE_LOGGER.isInfoEnabled()) { + String logReceivePath = ""; + if (this.internalReceiver == null) { + // During startup, internalReceiver is still null. Need to handle this special case when logging during startup + // or the reactor thread crashes with NPE when calling internalReceiver.getReceivePath() and no receiving occurs. + logReceivePath = "receiverPath[RECEIVER IS NULL]"; + } else { + logReceivePath = "receiverPath[" + this.internalReceiver.getReceivePath() + "]"; + } + TRACE_LOGGER.info(String.format("%s, action[createReceiveLink], %s", logReceivePath, this.eventPosition)); + } + + return Collections.singletonMap(AmqpConstants.STRING_FILTER, new UnknownDescribedType(AmqpConstants.STRING_FILTER, expression)); + } + + @Override + public Map getProperties() { + + if (!this.isEpochReceiver && + (this.receiverOptions == null || this.receiverOptions.getIdentifier() == null)) { + return null; + } + + final Map properties = new HashMap<>(); + + if (this.isEpochReceiver) { + properties.put(AmqpConstants.EPOCH, (Object) this.epoch); + } + + if (this.receiverOptions != null && this.receiverOptions.getIdentifier() != null) { + properties.put(AmqpConstants.RECEIVER_IDENTIFIER_NAME, (Object) this.receiverOptions.getIdentifier()); + } + + return properties; + } + + @Override + public Symbol[] getDesiredCapabilities() { + + return this.receiverOptions != null && this.receiverOptions.getReceiverRuntimeMetricEnabled() + ? new Symbol[]{AmqpConstants.ENABLE_RECEIVER_RUNTIME_METRIC_NAME} + : null; + } +} \ No newline at end of file diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/PartitionSenderImpl.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/PartitionSenderImpl.java new file mode 100644 index 0000000000000..8acdc2a27d05b --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/PartitionSenderImpl.java @@ -0,0 +1,109 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import com.microsoft.azure.eventhubs.*; + +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ScheduledExecutorService; +import java.util.function.Consumer; +import java.util.function.Function; + +final class PartitionSenderImpl extends ClientEntity implements PartitionSender { + private final String partitionId; + private final String eventHubName; + private final MessagingFactory factory; + + private volatile MessageSender internalSender; + + private PartitionSenderImpl(final MessagingFactory factory, final String eventHubName, final String partitionId, final ScheduledExecutorService executor) { + super("PartitionSenderImpl".concat(StringUtil.getRandomString()), null, executor); + + this.partitionId = partitionId; + this.eventHubName = eventHubName; + this.factory = factory; + } + + static CompletableFuture Create(final MessagingFactory factory, + final String eventHubName, + final String partitionId, + final ScheduledExecutorService executor) throws EventHubException { + final PartitionSenderImpl sender = new PartitionSenderImpl(factory, eventHubName, partitionId, executor); + return sender.createInternalSender() + .thenApplyAsync(new Function() { + public PartitionSender apply(Void a) { + return sender; + } + }, executor); + } + + private CompletableFuture createInternalSender() throws EventHubException { + return MessageSender.create(this.factory, this.getClientId().concat("-InternalSender"), + String.format("%s/Partitions/%s", this.eventHubName, this.partitionId)) + .thenAcceptAsync(new Consumer() { + public void accept(MessageSender a) { + PartitionSenderImpl.this.internalSender = a; + } + }, this.executor); + } + + public String getPartitionId() { + return this.partitionId; + } + + public EventDataBatch createBatch(BatchOptions options) { + if (!StringUtil.isNullOrEmpty(options.partitionKey)) { + throw new IllegalArgumentException("A partition key cannot be set when using PartitionSenderImpl. If you'd like to " + + "continue using PartitionSenderImpl with EventDataBatches, then please do not set a partition key in your BatchOptions."); + } + + int maxSize = this.internalSender.getMaxMessageSize(); + + if (options.maxMessageSize == null) { + return new EventDataBatchImpl(maxSize, null); + } + + if (options.maxMessageSize > maxSize) { + throw new IllegalArgumentException("The maxMessageSize set in BatchOptions is too large. You set a maxMessageSize of " + + options.maxMessageSize + ". The maximum allowed size is " + maxSize + "."); + } + + return new EventDataBatchImpl(options.maxMessageSize, null); + } + + public final CompletableFuture send(EventData data) { + return this.internalSender.send(((EventDataImpl) data).toAmqpMessage()); + } + + public final CompletableFuture send(Iterable eventDatas) { + if (eventDatas == null || IteratorUtil.sizeEquals(eventDatas, 0)) { + throw new IllegalArgumentException("EventData batch cannot be empty."); + } + + return this.internalSender.send(EventDataUtil.toAmqpMessages(eventDatas)); + } + + public final CompletableFuture send(EventDataBatch eventDatas) { + if (eventDatas == null || Integer.compare(eventDatas.getSize(), 0) == 0) { + throw new IllegalArgumentException("EventDataBatch cannot be empty."); + } + + if (!StringUtil.isNullOrEmpty(((EventDataBatchImpl) eventDatas).getPartitionKey())) { + throw new IllegalArgumentException("A partition key cannot be set when using PartitionSenderImpl. If you'd like to " + + "continue using PartitionSenderImpl with EventDataBatches, then please do not set a partition key in your BatchOptions"); + } + + return this.internalSender.send(EventDataUtil.toAmqpMessages(((EventDataBatchImpl) eventDatas).getInternalIterable())); + } + + @Override + public CompletableFuture onClose() { + if (this.internalSender == null) { + return CompletableFuture.completedFuture(null); + } else { + return this.internalSender.close(); + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/PassByRef.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/PassByRef.java new file mode 100644 index 0000000000000..818aeeea21530 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/PassByRef.java @@ -0,0 +1,18 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +public final class PassByRef { + + T t; + + public T get() { + return this.t; + } + + public void set(final T t) { + this.t = t; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ProtonUtil.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ProtonUtil.java new file mode 100644 index 0000000000000..7b719fcf4eee5 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ProtonUtil.java @@ -0,0 +1,29 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import org.apache.qpid.proton.Proton; +import org.apache.qpid.proton.reactor.Reactor; +import org.apache.qpid.proton.reactor.ReactorOptions; + +import java.io.IOException; + +public final class ProtonUtil { + + private ProtonUtil() { + } + + public static Reactor reactor(final ReactorHandler reactorHandler, final int maxFrameSize) throws IOException { + + final ReactorOptions reactorOptions = new ReactorOptions(); + reactorOptions.setMaxFrameSize(maxFrameSize); + reactorOptions.setEnableSaslByDefault(true); + + final Reactor reactor = Proton.reactor(reactorOptions, reactorHandler); + reactor.setGlobalHandler(new CustomIOHandler()); + + return reactor; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReactorDispatcher.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReactorDispatcher.java new file mode 100644 index 0000000000000..6aad0e296670d --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReactorDispatcher.java @@ -0,0 +1,155 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import org.apache.qpid.proton.Proton; +import org.apache.qpid.proton.engine.BaseHandler; +import org.apache.qpid.proton.engine.Event; +import org.apache.qpid.proton.reactor.Reactor; +import org.apache.qpid.proton.reactor.Selectable; +import org.apache.qpid.proton.reactor.Selectable.Callback; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.Pipe; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.RejectedExecutionException; + +/** + * {@link Reactor} is not thread-safe - all calls to {@link Proton} API's should be - on the Reactor Thread. + * {@link Reactor} works out-of-box for all event driven API - ex: onReceive - which could raise upon onSocketRead. + * {@link Reactor} didn't support API's like Send() out-of-box - which could potentially run on different thread to that of Reactor. + * So, the following utility class is used to generate an Event to hook into {@link Reactor}'s event delegation pattern. + * It uses a {@link Pipe} as the IO on which Reactor Listens to. + * Cardinality: multiple {@link ReactorDispatcher}'s could be attached to 1 {@link Reactor}. + * Each {@link ReactorDispatcher} should be initialized Synchronously - as it calls API in {@link Reactor} which is not thread-safe. + */ +public final class ReactorDispatcher { + private static final Logger TRACE_LOGGER = LoggerFactory.getLogger(ReactorDispatcher.class); + private final Reactor reactor; + private final Pipe ioSignal; + private final ConcurrentLinkedQueue workQueue; + private final ScheduleHandler workScheduler; + + public ReactorDispatcher(final Reactor reactor) throws IOException { + this.reactor = reactor; + this.ioSignal = Pipe.open(); + this.workQueue = new ConcurrentLinkedQueue<>(); + this.workScheduler = new ScheduleHandler(); + + initializeSelectable(); + } + + private void initializeSelectable() { + Selectable schedulerSelectable = this.reactor.selectable(); + + schedulerSelectable.setChannel(this.ioSignal.source()); + schedulerSelectable.onReadable(this.workScheduler); + schedulerSelectable.onFree(new CloseHandler()); + + schedulerSelectable.setReading(true); + this.reactor.update(schedulerSelectable); + } + + public void invoke(final DispatchHandler timerCallback) throws IOException, RejectedExecutionException { + this.throwIfSchedulerError(); + + this.workQueue.offer(timerCallback); + this.signalWorkQueue(); + } + + public void invoke(final int delay, final DispatchHandler timerCallback) throws IOException, RejectedExecutionException { + this.throwIfSchedulerError(); + + this.workQueue.offer(new DelayHandler(this.reactor, delay, timerCallback)); + this.signalWorkQueue(); + } + + private void throwIfSchedulerError() { + // throw when the scheduler on which Reactor is running is already closed + final RejectedExecutionException rejectedException = this.reactor.attachments() + .get(RejectedExecutionException.class, RejectedExecutionException.class); + if (rejectedException != null) { + throw new RejectedExecutionException(rejectedException.getMessage(), rejectedException); + } + + // throw when the pipe is in closed state - in which case, + // signalling the new event-dispatch will fail + if (!this.ioSignal.sink().isOpen()) { + throw new RejectedExecutionException("ReactorDispatcher instance is closed."); + } + } + + private void signalWorkQueue() throws IOException { + try { + while (this.ioSignal.sink().write(ByteBuffer.allocate(1)) == 0) { + } + } catch (ClosedChannelException ignorePipeClosedDuringReactorShutdown) { + TRACE_LOGGER.info("signalWorkQueue failed with an error", ignorePipeClosedDuringReactorShutdown); + } + } + + private final class DelayHandler extends BaseHandler { + final int delay; + final BaseHandler timerCallback; + final Reactor reactor; + + public DelayHandler(final Reactor reactor, final int delay, final DispatchHandler timerCallback) { + this.delay = delay; + this.timerCallback = timerCallback; + this.reactor = reactor; + } + + @Override + public void onTimerTask(Event e) { + this.reactor.schedule(this.delay, this.timerCallback); + } + } + + private final class ScheduleHandler implements Callback { + @Override + public void run(Selectable selectable) { + try { + while (ioSignal.source().read(ByteBuffer.allocate(1024)) > 0) { + // read until the end of the stream + } + } catch (ClosedChannelException ignorePipeClosedDuringReactorShutdown) { + TRACE_LOGGER.info("ScheduleHandler.run() failed with an error", ignorePipeClosedDuringReactorShutdown); + } catch (IOException ioException) { + TRACE_LOGGER.warn("ScheduleHandler.run() failed with an error", ioException); + throw new RuntimeException(ioException); + } + + BaseHandler topWork; + while ((topWork = workQueue.poll()) != null) { + topWork.onTimerTask(null); + } + } + } + + private final class CloseHandler implements Callback { + @Override + public void run(Selectable selectable) { + try { + if (ioSignal.sink().isOpen()) + ioSignal.sink().close(); + } catch (IOException ioException) { + TRACE_LOGGER.info("CloseHandler.run() sink().close() failed with an error", ioException); + } + + workScheduler.run(null); + + try { + if (ioSignal.source().isOpen()) + ioSignal.source().close(); + } catch (IOException ioException) { + TRACE_LOGGER.info("CloseHandler.run() source().close() failed with an error", ioException); + } + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReactorHandler.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReactorHandler.java new file mode 100644 index 0000000000000..9b1bea3c0d132 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReactorHandler.java @@ -0,0 +1,42 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import org.apache.qpid.proton.engine.BaseHandler; +import org.apache.qpid.proton.engine.Event; +import org.apache.qpid.proton.reactor.Reactor; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class ReactorHandler extends BaseHandler { + + private static final Logger TRACE_LOGGER = LoggerFactory.getLogger(ReactorHandler.class); + + private ReactorDispatcher reactorDispatcher; + + public ReactorDispatcher getReactorDispatcher() { + return this.reactorDispatcher; + } + + // set needs to happen before starting reactorThread + public void unsafeSetReactorDispatcher(final ReactorDispatcher reactorDispatcher) { + this.reactorDispatcher = reactorDispatcher; + } + + @Override + public void onReactorInit(Event e) { + + TRACE_LOGGER.info("reactor.onReactorInit"); + + final Reactor reactor = e.getReactor(); + reactor.setTimeout(ClientConstants.REACTOR_IO_POLL_TIMEOUT); + } + + @Override + public void onReactorFinal(Event e) { + + TRACE_LOGGER.info("reactor.onReactorFinal"); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReceiveLinkHandler.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReceiveLinkHandler.java new file mode 100644 index 0000000000000..db32fe36d8812 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReceiveLinkHandler.java @@ -0,0 +1,111 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import org.apache.qpid.proton.engine.Delivery; +import org.apache.qpid.proton.engine.Event; +import org.apache.qpid.proton.engine.Link; +import org.apache.qpid.proton.engine.Receiver; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Locale; + +// ServiceBus <-> ProtonReactor interaction +// handles all recvLink - reactor events +public final class ReceiveLinkHandler extends BaseLinkHandler { + private static final Logger TRACE_LOGGER = LoggerFactory.getLogger(ReceiveLinkHandler.class); + private final AmqpReceiver amqpReceiver; + private final Object firstResponse; + private boolean isFirstResponse; + + public ReceiveLinkHandler(final AmqpReceiver receiver) { + super(receiver); + + this.amqpReceiver = receiver; + this.firstResponse = new Object(); + this.isFirstResponse = true; + } + + @Override + public void onLinkLocalOpen(Event evt) { + Link link = evt.getLink(); + if (link instanceof Receiver) { + Receiver receiver = (Receiver) link; + + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info( + String.format("onLinkLocalOpen linkName[%s], localSource[%s]", receiver.getName(), receiver.getSource())); + } + } + } + + @Override + public void onLinkRemoteOpen(Event event) { + Link link = event.getLink(); + if (link instanceof Receiver) { + Receiver receiver = (Receiver) link; + if (link.getRemoteSource() != null) { + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info(String.format(Locale.US, "onLinkRemoteOpen linkName[%s], remoteSource[%s]", + receiver.getName(), link.getRemoteSource())); + } + + synchronized (this.firstResponse) { + this.isFirstResponse = false; + this.amqpReceiver.onOpenComplete(null); + } + } else { + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info( + String.format(Locale.US, "onLinkRemoteOpen linkName[%s], remoteTarget[null], " + + "remoteSource[null], action[waitingForError]", receiver.getName())); + } + } + } + } + + @Override + public void onDelivery(Event event) { + synchronized (this.firstResponse) { + if (this.isFirstResponse) { + this.isFirstResponse = false; + this.amqpReceiver.onOpenComplete(null); + } + } + + Delivery delivery = event.getDelivery(); + Receiver receiveLink = (Receiver) delivery.getLink(); + + // If a message spans across deliveries (for ex: 200k message will be 4 frames (deliveries) 64k 64k 64k 8k), + // all until "last-1" deliveries will be partial + // reactor will raise onDelivery event for all of these - we only need the last one + if (!delivery.isPartial()) { + + // One of our customers hit an issue - where duplicate 'Delivery' events are raised to Reactor in proton-j layer + // While processing the duplicate event - reactor hits an IllegalStateException in proton-j layer + // before we fix proton-j - this work around ensures that we ignore the duplicate Delivery event + if (delivery.isSettled()) { + if (TRACE_LOGGER.isWarnEnabled()) { + TRACE_LOGGER.warn( + receiveLink != null + ? String.format(Locale.US, "onDelivery linkName[%s], updatedLinkCredit[%s], remoteCredit[%s], " + + "remoteCondition[%s], delivery.isSettled[%s]", + receiveLink.getName(), receiveLink.getCredit(), receiveLink.getRemoteCredit(), receiveLink.getRemoteCondition(), delivery.isSettled()) + : String.format(Locale.US, "delivery.isSettled[%s]", delivery.isSettled())); + } + } else { + this.amqpReceiver.onReceiveComplete(delivery); + } + } + + if (TRACE_LOGGER.isTraceEnabled() && receiveLink != null) { + TRACE_LOGGER.trace( + String.format(Locale.US, "onDelivery linkName[%s], updatedLinkCredit[%s], remoteCredit[%s], " + + "remoteCondition[%s], delivery.isPartial[%s]", + receiveLink.getName(), receiveLink.getCredit(), receiveLink.getRemoteCredit(), receiveLink.getRemoteCondition(), delivery.isPartial())); + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReceivePump.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReceivePump.java new file mode 100644 index 0000000000000..b60e67db04620 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReceivePump.java @@ -0,0 +1,178 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import com.microsoft.azure.eventhubs.EventData; +import com.microsoft.azure.eventhubs.PartitionReceiveHandler; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Executor; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.BiFunction; + +public class ReceivePump implements Runnable { + private static final Logger TRACE_LOGGER = LoggerFactory.getLogger(ReceivePump.class); + + private final IPartitionReceiver receiver; + private final PartitionReceiveHandler onReceiveHandler; + private final boolean invokeOnTimeout; + private final CompletableFuture stopPump; + private final Executor executor; + private final ProcessAndReschedule processAndReschedule; + private final String eventHubName; + private final String consumerGroupName; + + private AtomicBoolean stopPumpRaised; + private volatile boolean isPumpHealthy = true; + + public ReceivePump( + final String eventHubName, + final String consumerGroupName, + final IPartitionReceiver receiver, + final PartitionReceiveHandler receiveHandler, + final boolean invokeOnReceiveWithNoEvents, + final Executor executor) { + this.eventHubName = eventHubName; + this.consumerGroupName = consumerGroupName; + this.receiver = receiver; + this.onReceiveHandler = receiveHandler; + this.invokeOnTimeout = invokeOnReceiveWithNoEvents; + this.stopPump = new CompletableFuture<>(); + this.executor = executor; + this.processAndReschedule = new ProcessAndReschedule(); + + this.stopPumpRaised = new AtomicBoolean(false); + } + + // entry-point - for runnable + public void run() { + try { + ReceivePump.this.receiveAndProcess(); + } catch (final Exception exception) { + if (TRACE_LOGGER.isErrorEnabled()) { + TRACE_LOGGER.error( + String.format("Receive pump for eventHub (%s), consumerGroup (%s), partition (%s) " + + "encountered unrecoverable error and exited with exception %s.", + this.eventHubName, this.consumerGroupName, this.receiver.getPartitionId(), exception.toString())); + } + + throw exception; + } + } + + // receives and invokes user-callback if success or stops pump if fails + public void receiveAndProcess() { + if (this.shouldContinue()) { + this.receiver.receive(this.onReceiveHandler.getMaxEventCount()) + .handleAsync(this.processAndReschedule, this.executor); + } else { + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info(String.format("Stopping receive pump for eventHub (%s), consumerGroup (%s), partition (%s) as %s", + this.eventHubName, this.consumerGroupName, this.receiver.getPartitionId(), + this.stopPumpRaised.get() ? "per the request." : "pump ran into errors.")); + } + + this.stopPump.complete(null); + } + } + + public CompletableFuture stop() { + this.stopPumpRaised.set(true); + return this.stopPump; + } + + public boolean isRunning() { + return !this.stopPump.isDone(); + } + + private boolean shouldContinue() { + return this.isPumpHealthy && !this.stopPumpRaised.get(); + } + + private void handleClientExceptions(final Throwable clientException) { + if (clientException != null) { + this.isPumpHealthy = false; + + if (TRACE_LOGGER.isWarnEnabled()) { + TRACE_LOGGER.warn(String.format( + "Receive pump for eventHub (%s), consumerGroup (%s), partition (%s) exiting after receive exception %s", + this.eventHubName, this.consumerGroupName, this.receiver.getPartitionId(), clientException.toString())); + } + + this.onReceiveHandler.onError(clientException); + } + } + + private void handleUserCodeExceptions(final Throwable userCodeException) { + this.isPumpHealthy = false; + if (TRACE_LOGGER.isErrorEnabled()) { + TRACE_LOGGER.error( + String.format("Receive pump for eventHub (%s), consumerGroup (%s), partition (%s) " + + "exiting after user-code exception %s", + this.eventHubName, this.consumerGroupName, this.receiver.getPartitionId(), userCodeException.toString())); + } + + this.onReceiveHandler.onError(userCodeException); + + if (userCodeException instanceof InterruptedException) { + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info(String.format("Interrupting receive pump for eventHub (%s), consumerGroup (%s), partition (%s)", + this.eventHubName, this.consumerGroupName, this.receiver.getPartitionId())); + } + + Thread.currentThread().interrupt(); + } + } + + private void schedulePump() { + try { + this.executor.execute(this); + } catch (final RejectedExecutionException rejectedException) { + this.isPumpHealthy = false; + + if (TRACE_LOGGER.isWarnEnabled()) { + TRACE_LOGGER.warn(String.format( + "Receive pump for eventHub (%s), consumerGroup (%s), partition (%s) exiting with error: %s", + this.eventHubName, this.consumerGroupName, ReceivePump.this.receiver.getPartitionId(), rejectedException.toString())); + } + + this.onReceiveHandler.onError(rejectedException); + } + } + + // partition receiver contract against which this pump works + public interface IPartitionReceiver { + String getPartitionId(); + + CompletableFuture> receive(final int maxBatchSize); + } + + private final class ProcessAndReschedule implements BiFunction, Throwable, Void> { + + @Override + public Void apply(final Iterable receivedEvents, final Throwable clientException) { + + ReceivePump.this.handleClientExceptions(clientException); + + try { + // don't invoke user call back - if stop is already raised / pump is unhealthy + if (ReceivePump.this.shouldContinue() && + (receivedEvents != null + || (receivedEvents == null && ReceivePump.this.invokeOnTimeout))) { + ReceivePump.this.onReceiveHandler.onReceive(receivedEvents); + } + } catch (final Throwable userCodeError) { + ReceivePump.this.handleUserCodeExceptions(userCodeError); + } + + ReceivePump.this.schedulePump(); + + return null; + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReceiverContext.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReceiverContext.java new file mode 100644 index 0000000000000..7c0597780eed5 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReceiverContext.java @@ -0,0 +1,77 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import com.microsoft.azure.eventhubs.ErrorContext; + +import java.util.Locale; + +public class ReceiverContext extends ErrorContext { + final static boolean EPOCH_RECEIVER_TYPE = true; + final static boolean NON_EPOCH_RECEIVER_TYPE = !ReceiverContext.EPOCH_RECEIVER_TYPE; + + final String receivePath; + final String referenceId; + final Integer prefetchCount; + final Integer currentLinkCredit; + final Integer prefetchQueueLength; + + ReceiverContext( + final String namespaceName, + final String receivePath, + final String referenceId, + final Integer prefetchCount, + final Integer currentLinkCredit, + final Integer prefetchQueueLength) { + super(namespaceName); + this.receivePath = receivePath; + this.referenceId = referenceId; + this.prefetchCount = prefetchCount; + this.currentLinkCredit = currentLinkCredit; + this.prefetchQueueLength = prefetchQueueLength; + } + + @Override + public String toString() { + final String superString = super.toString(); + StringBuilder toString = new StringBuilder(); + + if (!StringUtil.isNullOrEmpty(superString)) { + toString.append(superString); + toString.append(", "); + } + + if (this.receivePath != null) { + toString.append(String.format(Locale.US, "PATH: %s", this.receivePath)); + toString.append(", "); + } + + if (this.referenceId != null) { + toString.append(String.format(Locale.US, "REFERENCE_ID: %s", this.referenceId)); + toString.append(", "); + } + + if (this.prefetchCount != null) { + toString.append(String.format(Locale.US, "PREFETCH_COUNT: %s", this.prefetchCount)); + toString.append(", "); + } + + if (this.currentLinkCredit != null) { + toString.append(String.format(Locale.US, "LINK_CREDIT: %s", this.currentLinkCredit)); + toString.append(", "); + } + + if (this.prefetchQueueLength != null) { + toString.append(String.format(Locale.US, "PREFETCH_Q_LEN: %s", this.prefetchQueueLength)); + toString.append(", "); + } + + if (toString.length() > 2) { + toString.setLength(toString.length() - 2); + } + + return toString.toString(); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReceiverSettingsProvider.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReceiverSettingsProvider.java new file mode 100644 index 0000000000000..e5b559520126c --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReceiverSettingsProvider.java @@ -0,0 +1,19 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import org.apache.qpid.proton.amqp.Symbol; +import org.apache.qpid.proton.amqp.UnknownDescribedType; +import org.apache.qpid.proton.message.Message; + +import java.util.Map; + +public interface ReceiverSettingsProvider { + Map getFilter(final Message lastReceivedMessage); + + Map getProperties(); + + Symbol[] getDesiredCapabilities(); +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReplayableWorkItem.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReplayableWorkItem.java new file mode 100644 index 0000000000000..dae7f31e2fcef --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/ReplayableWorkItem.java @@ -0,0 +1,74 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import java.time.Duration; +import java.util.concurrent.CompletableFuture; + +public class ReplayableWorkItem extends WorkItem { + private byte[] amqpMessage; + private int messageFormat; + private int encodedMessageSize; + private boolean waitingForAck; + + private Exception lastKnownException; + private CompletableFuture timeoutTask; + + public ReplayableWorkItem(final byte[] amqpMessage, final int encodedMessageSize, final int messageFormat, final CompletableFuture completableFuture, final Duration timeout) { + super(completableFuture, timeout); + this.initialize(amqpMessage, encodedMessageSize, messageFormat); + } + + public ReplayableWorkItem(final byte[] amqpMessage, final int encodedMessageSize, final int messageFormat, final CompletableFuture completableFuture, final TimeoutTracker timeout) { + super(completableFuture, timeout); + this.initialize(amqpMessage, encodedMessageSize, messageFormat); + } + + private void initialize(final byte[] amqpMessage, final int encodedMessageSize, final int messageFormat) { + this.amqpMessage = amqpMessage; + this.messageFormat = messageFormat; + this.encodedMessageSize = encodedMessageSize; + } + + public byte[] getMessage() { + return this.amqpMessage; + } + + public void clearMessage() { + this.amqpMessage = null; + } + + public int getEncodedMessageSize() { + return this.encodedMessageSize; + } + + public int getMessageFormat() { + return this.messageFormat; + } + + public Exception getLastKnownException() { + return this.lastKnownException; + } + + public void setLastKnownException(Exception exception) { + this.lastKnownException = exception; + } + + public CompletableFuture getTimeoutTask() { + return this.timeoutTask; + } + + public void setTimeoutTask(final CompletableFuture timeoutTask) { + this.timeoutTask = timeoutTask; + } + + public void setWaitingForAck() { + this.waitingForAck = true; + } + + public boolean isWaitingForAck() { + return this.waitingForAck; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/RequestResponseChannel.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/RequestResponseChannel.java new file mode 100644 index 0000000000000..68a9719b4ab5e --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/RequestResponseChannel.java @@ -0,0 +1,263 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import com.microsoft.azure.eventhubs.EventHubException; +import org.apache.qpid.proton.Proton; +import org.apache.qpid.proton.amqp.UnsignedLong; +import org.apache.qpid.proton.amqp.messaging.Source; +import org.apache.qpid.proton.amqp.messaging.Target; +import org.apache.qpid.proton.amqp.transport.ErrorCondition; +import org.apache.qpid.proton.amqp.transport.ReceiverSettleMode; +import org.apache.qpid.proton.amqp.transport.SenderSettleMode; +import org.apache.qpid.proton.engine.*; +import org.apache.qpid.proton.message.Message; + +import java.util.HashMap; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; + +public class RequestResponseChannel implements IOObject { + + private final Sender sendLink; + private final Receiver receiveLink; + private final String replyTo; + private final HashMap> inflightRequests; + private final AtomicLong requestId; + private final AtomicInteger openRefCount; + private final AtomicInteger closeRefCount; + + private OperationResult onOpen; + private OperationResult onClose; // handles closeLink due to failures + private OperationResult onGraceFullClose; // handles intentional close + + public RequestResponseChannel( + final String linkName, + final String path, + final Session session) { + + this.replyTo = path.replace("$", "") + "-client-reply-to"; + this.openRefCount = new AtomicInteger(2); + this.closeRefCount = new AtomicInteger(2); + this.inflightRequests = new HashMap<>(); + this.requestId = new AtomicLong(0); + + this.sendLink = session.sender(linkName + ":sender"); + final Target target = new Target(); + target.setAddress(path); + this.sendLink.setTarget(target); + sendLink.setSource(new Source()); + this.sendLink.setSenderSettleMode(SenderSettleMode.SETTLED); + BaseHandler.setHandler(this.sendLink, new SendLinkHandler(new RequestHandler())); + + this.receiveLink = session.receiver(linkName + ":receiver"); + final Source source = new Source(); + source.setAddress(path); + this.receiveLink.setSource(source); + final Target receiverTarget = new Target(); + receiverTarget.setAddress(this.replyTo); + this.receiveLink.setTarget(receiverTarget); + this.receiveLink.setSenderSettleMode(SenderSettleMode.SETTLED); + this.receiveLink.setReceiverSettleMode(ReceiverSettleMode.SECOND); + BaseHandler.setHandler(this.receiveLink, new ReceiveLinkHandler(new ResponseHandler())); + } + + // open should be called only once - we use FaultTolerantObject for that + public void open(final OperationResult onOpen, final OperationResult onClose) { + + this.onOpen = onOpen; + this.onClose = onClose; + this.sendLink.open(); + this.receiveLink.open(); + } + + // close should be called exactly once - we use FaultTolerantObject for that + public void close(final OperationResult onGraceFullClose) { + + this.onGraceFullClose = onGraceFullClose; + this.sendLink.close(); + this.receiveLink.close(); + } + + public Sender getSendLink() { + return this.sendLink; + } + + public Receiver getReceiveLink() { + return this.receiveLink; + } + + // not thread-safe + // this must be invoked from reactor/dispatcher thread + // & assumes that this is run on Opened Object + public void request( + final Message message, + final OperationResult onResponse) { + + if (message == null) + throw new IllegalArgumentException("message cannot be null"); + + if (message.getMessageId() != null) + throw new IllegalArgumentException("message.getMessageId() should be null"); + + if (message.getReplyTo() != null) + throw new IllegalArgumentException("message.getReplyTo() should be null"); + + message.setMessageId("request" + UnsignedLong.valueOf(this.requestId.incrementAndGet()).toString()); + message.setReplyTo(this.replyTo); + + this.inflightRequests.put(message.getMessageId(), onResponse); + + sendLink.delivery(UUID.randomUUID().toString().replace("-", StringUtil.EMPTY).getBytes()); + final int payloadSize = AmqpUtil.getDataSerializedSize(message) + 512; // need buffer for headers + + final byte[] bytes = new byte[payloadSize]; + final int encodedSize = message.encode(bytes, 0, payloadSize); + + receiveLink.flow(1); + sendLink.send(bytes, 0, encodedSize); + sendLink.advance(); + } + + private void onLinkOpenComplete(final Exception exception) { + + if (openRefCount.decrementAndGet() <= 0 && onOpen != null) + if (exception == null && this.sendLink.getRemoteState() == EndpointState.ACTIVE && this.receiveLink.getRemoteState() == EndpointState.ACTIVE) + onOpen.onComplete(null); + else { + if (exception != null) + onOpen.onError(exception); + else { + final ErrorCondition error = (this.sendLink.getRemoteCondition() != null && this.sendLink.getRemoteCondition().getCondition() != null) + ? this.sendLink.getRemoteCondition() + : this.receiveLink.getRemoteCondition(); + onOpen.onError(ExceptionUtil.toException(error)); + } + } + } + + private void onLinkCloseComplete(final Exception exception) { + + if (closeRefCount.decrementAndGet() <= 0) + if (exception == null) { + onClose.onComplete(null); + if (onGraceFullClose != null) + onGraceFullClose.onComplete(null); + } else { + onClose.onError(exception); + if (onGraceFullClose != null) + onGraceFullClose.onError(exception); + } + } + + @Override + public IOObjectState getState() { + + if (sendLink.getLocalState() == EndpointState.UNINITIALIZED || receiveLink.getLocalState() == EndpointState.UNINITIALIZED + || sendLink.getRemoteState() == EndpointState.UNINITIALIZED || receiveLink.getRemoteState() == EndpointState.UNINITIALIZED) + return IOObjectState.OPENING; + + if (sendLink.getRemoteState() == EndpointState.ACTIVE && receiveLink.getRemoteState() == EndpointState.ACTIVE + && sendLink.getLocalState() == EndpointState.ACTIVE && receiveLink.getRemoteState() == EndpointState.ACTIVE) + return IOObjectState.OPENED; + + if (sendLink.getRemoteState() == EndpointState.CLOSED && receiveLink.getRemoteState() == EndpointState.CLOSED) + return IOObjectState.CLOSED; + + return IOObjectState.CLOSING; // only left cases are if some are active and some are closed + } + + private class RequestHandler implements AmqpSender { + + @Override + public void onFlow(int creditIssued) { + } + + @Override + public void onSendComplete(Delivery delivery) { + } + + @Override + public void onOpenComplete(Exception completionException) { + + onLinkOpenComplete(completionException); + } + + @Override + public void onError(Exception exception) { + + onLinkCloseComplete(exception); + } + + @Override + public void onClose(ErrorCondition condition) { + + if (condition == null || condition.getCondition() == null) + onLinkCloseComplete(null); + else + onError(ExceptionUtil.toException(condition)); + } + + } + + private class ResponseHandler implements AmqpReceiver { + + @Override + public void onReceiveComplete(Delivery delivery) { + + final Message response = Proton.message(); + final int msgSize = delivery.pending(); + final byte[] buffer = new byte[msgSize]; + + final int read = receiveLink.recv(buffer, 0, msgSize); + + response.decode(buffer, 0, read); + delivery.settle(); + + final OperationResult responseCallback = inflightRequests.remove(response.getCorrelationId()); + if (responseCallback != null) + responseCallback.onComplete(response); + } + + @Override + public void onOpenComplete(Exception completionException) { + + onLinkOpenComplete(completionException); + } + + @Override + public void onError(Exception exception) { + + this.cancelPendingRequests(exception); + + if (onClose != null) + onLinkCloseComplete(exception); + } + + @Override + public void onClose(ErrorCondition condition) { + + if (condition == null || condition.getCondition() == null) { + this.cancelPendingRequests( + new EventHubException( + ClientConstants.DEFAULT_IS_TRANSIENT, + "The underlying request-response channel closed, recreate the channel and retry the request.")); + + if (onClose != null) + onLinkCloseComplete(null); + } else { + this.onError(ExceptionUtil.toException(condition)); + } + } + + private void cancelPendingRequests(final Exception exception) { + for (OperationResult responseCallback : inflightRequests.values()) + responseCallback.onError(exception); + + inflightRequests.clear(); + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/RequestResponseCloser.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/RequestResponseCloser.java new file mode 100644 index 0000000000000..cfd57ac043e48 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/RequestResponseCloser.java @@ -0,0 +1,33 @@ +package com.microsoft.azure.eventhubs.impl; + +public class RequestResponseCloser implements Operation { + private FaultTolerantObject innerChannel = null; + + public RequestResponseCloser() { + } + + // innerChannel is not available when this object is constructed, have to set later + public void setInnerChannel(final FaultTolerantObject innerChannel) { + this.innerChannel = innerChannel; + } + + @Override + public void run(OperationResult closeOperationCallback) { + final RequestResponseChannel channelToBeClosed = this.innerChannel.unsafeGetIfOpened(); + if (channelToBeClosed == null) { + closeOperationCallback.onComplete(null); + } else { + channelToBeClosed.close(new OperationResult() { + @Override + public void onComplete(Void result) { + closeOperationCallback.onComplete(result); + } + + @Override + public void onError(Exception error) { + closeOperationCallback.onError(error); + } + }); + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/RequestResponseOpener.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/RequestResponseOpener.java new file mode 100644 index 0000000000000..bd4d556170845 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/RequestResponseOpener.java @@ -0,0 +1,77 @@ +package com.microsoft.azure.eventhubs.impl; + +import org.apache.qpid.proton.amqp.transport.ErrorCondition; +import org.apache.qpid.proton.engine.Session; + +import java.util.function.BiConsumer; + +public class RequestResponseOpener implements Operation { + private final SessionProvider sessionProvider; + private final String sessionName; + private final String linkName; + private final String endpointAddress; + private final AmqpConnection eventDispatcher; + + public RequestResponseOpener(final SessionProvider sessionProvider, final String sessionName, final String linkName, + final String endpointAddress, final AmqpConnection eventDispatcher) { + this.sessionProvider = sessionProvider; + this.sessionName = sessionName; + this.linkName = linkName; + this.endpointAddress = endpointAddress; + this.eventDispatcher = eventDispatcher; + } + + @Override + public void run(OperationResult operationCallback) { + + final Session session = this.sessionProvider.getSession( + this.sessionName, + null, + new BiConsumer() { + @Override + public void accept(ErrorCondition error, Exception exception) { + if (error != null) + operationCallback.onError(ExceptionUtil.toException(error)); + else if (exception != null) + operationCallback.onError(exception); + } + }); + + if (session == null) + return; + + final RequestResponseChannel requestResponseChannel = new RequestResponseChannel( + this.linkName, + this.endpointAddress, + session); + + requestResponseChannel.open( + new OperationResult() { + @Override + public void onComplete(Void result) { + eventDispatcher.registerForConnectionError(requestResponseChannel.getSendLink()); + eventDispatcher.registerForConnectionError(requestResponseChannel.getReceiveLink()); + + operationCallback.onComplete(requestResponseChannel); + } + + @Override + public void onError(Exception error) { + operationCallback.onError(error); + } + }, + new OperationResult() { + @Override + public void onComplete(Void result) { + eventDispatcher.deregisterForConnectionError(requestResponseChannel.getSendLink()); + eventDispatcher.deregisterForConnectionError(requestResponseChannel.getReceiveLink()); + } + + @Override + public void onError(Exception error) { + eventDispatcher.deregisterForConnectionError(requestResponseChannel.getSendLink()); + eventDispatcher.deregisterForConnectionError(requestResponseChannel.getReceiveLink()); + } + }); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/RetryExponential.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/RetryExponential.java new file mode 100644 index 0000000000000..c356c7d929127 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/RetryExponential.java @@ -0,0 +1,65 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import com.microsoft.azure.eventhubs.ConnectionStringBuilder; +import com.microsoft.azure.eventhubs.RetryPolicy; + +import java.time.Duration; + +/** + * RetryPolicy implementation where the delay between retries will grow in an exponential manner. + * RetryPolicy can be set on the client operations using {@link ConnectionStringBuilder}. + * RetryIntervals will be computed using a retryFactor which is a function of deltaBackOff (MaximumBackoff - MinimumBackoff) and MaximumRetryCount + */ +public final class RetryExponential extends RetryPolicy { + private final Duration minimumBackoff; + private final Duration maximumBackoff; + private final int maximumRetryCount; + private final double retryFactor; + + public RetryExponential(final Duration minimumBackoff, final Duration maximumBackoff, final int maximumRetryCount, final String name) { + super(name); + + this.minimumBackoff = minimumBackoff; + this.maximumBackoff = maximumBackoff; + this.maximumRetryCount = maximumRetryCount; + this.retryFactor = this.computeRetryFactor(); + } + + @Override + protected Duration onGetNextRetryInterval(final String clientId, final Exception lastException, final Duration remainingTime, final int baseWaitTimeSecs) { + int currentRetryCount = this.getRetryCount(clientId); + + if (currentRetryCount >= this.maximumRetryCount) { + return null; + } + + if (!RetryPolicy.isRetryableException(lastException)) { + return null; + } + + double nextRetryInterval = Math.pow(this.retryFactor, (double) currentRetryCount); + long nextRetryIntervalSeconds = (long) nextRetryInterval; + long nextRetryIntervalNano = (long) ((nextRetryInterval - (double) nextRetryIntervalSeconds) * 1000000000); + if (remainingTime.getSeconds() < Math.max(nextRetryInterval, ClientConstants.TIMER_TOLERANCE.getSeconds())) { + return null; + } + + Duration retryAfter = this.minimumBackoff.plus(Duration.ofSeconds(nextRetryIntervalSeconds, nextRetryIntervalNano)); + retryAfter = retryAfter.plus(Duration.ofSeconds(baseWaitTimeSecs)); + + return retryAfter; + } + + private double computeRetryFactor() { + long deltaBackoff = this.maximumBackoff.minus(this.minimumBackoff).getSeconds(); + if (deltaBackoff <= 0 || this.maximumRetryCount <= 0) { + return 0; + } + + return (Math.log(deltaBackoff) / Math.log(this.maximumRetryCount)); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SchedulerProvider.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SchedulerProvider.java new file mode 100644 index 0000000000000..03e6d91c9d8ce --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SchedulerProvider.java @@ -0,0 +1,6 @@ +package com.microsoft.azure.eventhubs.impl; + +interface SchedulerProvider { + + ReactorDispatcher getReactorDispatcher(); +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SendLinkHandler.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SendLinkHandler.java new file mode 100644 index 0000000000000..8a9df26a626c3 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SendLinkHandler.java @@ -0,0 +1,103 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import org.apache.qpid.proton.engine.Delivery; +import org.apache.qpid.proton.engine.Event; +import org.apache.qpid.proton.engine.Link; +import org.apache.qpid.proton.engine.Sender; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Locale; + +public class SendLinkHandler extends BaseLinkHandler { + private static final Logger TRACE_LOGGER = LoggerFactory.getLogger(SendLinkHandler.class); + private final AmqpSender msgSender; + private final Object firstFlow; + private boolean isFirstFlow; + + public SendLinkHandler(final AmqpSender sender) { + super(sender); + + this.msgSender = sender; + this.firstFlow = new Object(); + this.isFirstFlow = true; + } + + @Override + public void onLinkLocalOpen(Event event) { + Link link = event.getLink(); + if (link instanceof Sender) { + Sender sender = (Sender) link; + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info(String.format("onLinkLocalOpen linkName[%s], localTarget[%s]", sender.getName(), sender.getTarget())); + } + } + } + + @Override + public void onLinkRemoteOpen(Event event) { + Link link = event.getLink(); + if (link instanceof Sender) { + Sender sender = (Sender) link; + if (link.getRemoteTarget() != null) { + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info(String.format(Locale.US, "onLinkRemoteOpen linkName[%s], remoteTarget[%s]", sender.getName(), link.getRemoteTarget())); + } + + synchronized (this.firstFlow) { + this.isFirstFlow = false; + this.msgSender.onOpenComplete(null); + } + } else { + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info( + String.format(Locale.US, "onLinkRemoteOpen linkName[%s], remoteTarget[null], remoteSource[null], action[waitingForError]", sender.getName())); + } + } + } + } + + @Override + public void onDelivery(Event event) { + Delivery delivery = event.getDelivery(); + + while (delivery != null) { + Sender sender = (Sender) delivery.getLink(); + + if (TRACE_LOGGER.isTraceEnabled()) { + TRACE_LOGGER.trace( + "onDelivery linkName[" + sender.getName() + + "], unsettled[" + sender.getUnsettled() + "], credit[" + sender.getRemoteCredit() + "], deliveryState[" + delivery.getRemoteState() + + "], delivery.isBuffered[" + delivery.isBuffered() + "], delivery.id[" + new String(delivery.getTag()) + "]"); + } + + msgSender.onSendComplete(delivery); + delivery.settle(); + + delivery = sender.current(); + } + } + + @Override + public void onLinkFlow(Event event) { + if (this.isFirstFlow) { + synchronized (this.firstFlow) { + if (this.isFirstFlow) { + this.msgSender.onOpenComplete(null); + this.isFirstFlow = false; + } + } + } + + Sender sender = event.getSender(); + this.msgSender.onFlow(sender.getRemoteCredit()); + + if (TRACE_LOGGER.isDebugEnabled()) { + TRACE_LOGGER.debug("onLinkFlow linkName[" + sender.getName() + "], unsettled[" + sender.getUnsettled() + "], credit[" + sender.getCredit() + "]"); + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SenderContext.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SenderContext.java new file mode 100644 index 0000000000000..6fd4952ce6cd5 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SenderContext.java @@ -0,0 +1,59 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import com.microsoft.azure.eventhubs.ErrorContext; + +import java.util.Locale; + +public class SenderContext extends ErrorContext { + final String sendPath; + final String referenceId; + final Integer currentLinkCredit; + + SenderContext( + final String namespaceName, + final String sendPath, + final String referenceId, + final Integer currentLinkCredit) { + super(namespaceName); + + this.sendPath = sendPath; + this.referenceId = referenceId; + this.currentLinkCredit = currentLinkCredit; + } + + @Override + public String toString() { + final String superString = super.toString(); + StringBuilder toString = new StringBuilder(); + + if (!StringUtil.isNullOrEmpty(superString)) { + toString.append(superString); + toString.append(", "); + } + + if (this.sendPath != null) { + toString.append(String.format(Locale.US, "PATH: %s", this.sendPath)); + toString.append(", "); + } + + if (this.referenceId != null) { + toString.append(String.format(Locale.US, "REFERENCE_ID: %s", this.referenceId)); + toString.append(", "); + } + + if (this.currentLinkCredit != null) { + toString.append(String.format(Locale.US, "LINK_CREDIT: %s", this.currentLinkCredit)); + toString.append(", "); + } + + if (toString.length() > 2) { + toString.setLength(toString.length() - 2); + } + + return toString.toString(); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SessionHandler.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SessionHandler.java new file mode 100644 index 0000000000000..65714d2e57c5f --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SessionHandler.java @@ -0,0 +1,175 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import com.microsoft.azure.eventhubs.EventHubException; +import org.apache.qpid.proton.amqp.transport.ErrorCondition; +import org.apache.qpid.proton.engine.*; +import org.apache.qpid.proton.reactor.Reactor; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.time.Duration; +import java.util.Iterator; +import java.util.Locale; +import java.util.function.BiConsumer; +import java.util.function.Consumer; + +public class SessionHandler extends BaseHandler { + protected static final Logger TRACE_LOGGER = LoggerFactory.getLogger(SessionHandler.class); + + private final String entityName; + private final Consumer onRemoteSessionOpen; + private final BiConsumer onRemoteSessionOpenError; + private final Duration openTimeout; + + private boolean sessionCreated = false; + private boolean sessionOpenErrorDispatched = false; + + public SessionHandler(final String entityName, + final Consumer onRemoteSessionOpen, + final BiConsumer onRemoteSessionOpenError, + final Duration openTimeout) { + this.entityName = entityName; + this.onRemoteSessionOpenError = onRemoteSessionOpenError; + this.onRemoteSessionOpen = onRemoteSessionOpen; + this.openTimeout = openTimeout; + } + + @Override + public void onSessionLocalOpen(Event e) { + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info(String.format(Locale.US, "onSessionLocalOpen entityName[%s], condition[%s]", this.entityName, + e.getSession().getCondition() == null ? "none" : e.getSession().getCondition().toString())); + } + + if (this.onRemoteSessionOpenError != null) { + ReactorHandler reactorHandler = null; + final Reactor reactor = e.getReactor(); + final Iterator reactorEventHandlers = reactor.getHandler().children(); + while (reactorEventHandlers.hasNext()) { + final Handler currentHandler = reactorEventHandlers.next(); + if (currentHandler instanceof ReactorHandler) { + reactorHandler = (ReactorHandler) currentHandler; + break; + } + } + + final ReactorDispatcher reactorDispatcher = reactorHandler.getReactorDispatcher(); + final Session session = e.getSession(); + + try { + reactorDispatcher.invoke((int) this.openTimeout.toMillis(), new SessionTimeoutHandler(session, entityName)); + } catch (IOException ioException) { + if (TRACE_LOGGER.isWarnEnabled()) { + TRACE_LOGGER.warn(String.format(Locale.US, "onSessionLocalOpen entityName[%s], reactorDispatcherError[%s]", + this.entityName, ioException.getMessage())); + } + + session.close(); + this.onRemoteSessionOpenError.accept( + null, + new EventHubException( + false, + String.format("onSessionLocalOpen entityName[%s], underlying IO of reactorDispatcher faulted with error: %s", + this.entityName, ioException.getMessage()), ioException)); + } + } + } + + @Override + public void onSessionRemoteOpen(Event e) { + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info(String.format(Locale.US, "onSessionRemoteOpen entityName[%s], sessionIncCapacity[%s], sessionOutgoingWindow[%s]", + this.entityName, e.getSession().getIncomingCapacity(), e.getSession().getOutgoingWindow())); + } + + final Session session = e.getSession(); + if (session != null && session.getLocalState() == EndpointState.UNINITIALIZED) { + session.open(); + } + + sessionCreated = true; + if (this.onRemoteSessionOpen != null) + this.onRemoteSessionOpen.accept(session); + } + + @Override + public void onSessionLocalClose(Event e) { + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info(String.format(Locale.US, "onSessionLocalClose entityName[%s], condition[%s]", this.entityName, + e.getSession().getCondition() == null ? "none" : e.getSession().getCondition().toString())); + } + } + + @Override + public void onSessionRemoteClose(Event e) { + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info(String.format(Locale.US, "onSessionRemoteClose entityName[%s], condition[%s]", this.entityName, + e.getSession().getRemoteCondition() == null ? "none" : e.getSession().getRemoteCondition().toString())); + } + + final Session session = e.getSession(); + ErrorCondition condition = session != null ? session.getRemoteCondition() : null; + + if (session != null && session.getLocalState() != EndpointState.CLOSED) { + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info(String.format(Locale.US, "onSessionRemoteClose closing a local session for entityName[%s], condition[%s], description[%s]", + this.entityName, + condition != null ? condition.getCondition() : "n/a", + condition != null ? condition.getDescription() : "n/a")); + } + + session.setCondition(session.getRemoteCondition()); + session.close(); + } + + this.sessionOpenErrorDispatched = true; + if (!sessionCreated && this.onRemoteSessionOpenError != null) + this.onRemoteSessionOpenError.accept(condition, null); + } + + @Override + public void onSessionFinal(Event e) { + if (TRACE_LOGGER.isInfoEnabled()) { + final Session session = e.getSession(); + ErrorCondition condition = session != null ? session.getCondition() : null; + + TRACE_LOGGER.info(String.format(Locale.US, "onSessionFinal entityName[%s], condition[%s], description[%s]", + this.entityName, + condition != null ? condition.getCondition() : "n/a", + condition != null ? condition.getDescription() : "n/a")); + } + } + + private class SessionTimeoutHandler extends DispatchHandler { + + private final Session session; + private final String entityName; + + SessionTimeoutHandler(final Session session, final String entityName) { + this.session = session; + this.entityName = entityName; + } + + @Override + public void onEvent() { + // It is supposed to close a local session to handle timeout exception. + // However, closing the session can result in NPE because of proton-j bug (https://issues.apache.org/jira/browse/PROTON-1939). + // And the bug will cause the reactor thread to stop processing pending tasks scheduled on the reactor and + // as a result task won't be completed at all. + + // TODO: handle timeout error once the proton-j bug is fixed. + + if (!sessionCreated && !sessionOpenErrorDispatched) { + if (TRACE_LOGGER.isWarnEnabled()) { + TRACE_LOGGER.warn(String.format(Locale.US, "SessionTimeoutHandler.onEvent - entityName[%s], session open timed out.", + this.entityName)); + } + } + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SessionProvider.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SessionProvider.java new file mode 100644 index 0000000000000..a1a97eb8eec56 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SessionProvider.java @@ -0,0 +1,18 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import org.apache.qpid.proton.amqp.transport.ErrorCondition; +import org.apache.qpid.proton.engine.Session; + +import java.util.function.BiConsumer; +import java.util.function.Consumer; + +public interface SessionProvider { + Session getSession( + final String path, + final Consumer onSessionOpen, + final BiConsumer onSessionOpenError); +} \ No newline at end of file diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SharedAccessSignatureTokenProvider.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SharedAccessSignatureTokenProvider.java new file mode 100644 index 0000000000000..431c0c513f530 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/SharedAccessSignatureTokenProvider.java @@ -0,0 +1,85 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import javax.crypto.Mac; +import javax.crypto.spec.SecretKeySpec; +import java.io.IOException; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; +import java.security.InvalidKeyException; +import java.security.NoSuchAlgorithmException; +import java.time.Duration; +import java.time.Instant; +import java.util.Base64; +import java.util.Locale; + +public class SharedAccessSignatureTokenProvider { + final String keyName; + final String sharedAccessKey; + final String sharedAccessSignature; + + SharedAccessSignatureTokenProvider( + final String keyName, + final String sharedAccessKey) { + this.keyName = keyName; + this.sharedAccessKey = sharedAccessKey; + this.sharedAccessSignature = null; + } + + public SharedAccessSignatureTokenProvider(final String sharedAccessSignature) { + this.keyName = null; + this.sharedAccessKey = null; + this.sharedAccessSignature = sharedAccessSignature; + } + + public static String generateSharedAccessSignature( + final String keyName, + final String sharedAccessKey, + final String resource, + final Duration tokenTimeToLive) + throws IOException, NoSuchAlgorithmException, InvalidKeyException { + if (StringUtil.isNullOrWhiteSpace(keyName)) { + throw new IllegalArgumentException("keyName cannot be empty"); + } + + if (StringUtil.isNullOrWhiteSpace(sharedAccessKey)) { + throw new IllegalArgumentException("sharedAccessKey cannot be empty"); + } + + if (StringUtil.isNullOrWhiteSpace(resource)) { + throw new IllegalArgumentException("resource cannot be empty"); + } + + if (tokenTimeToLive.isZero() || tokenTimeToLive.isNegative()) { + throw new IllegalArgumentException("tokenTimeToLive has to positive and in the order-of seconds"); + } + + final String utf8Encoding = StandardCharsets.UTF_8.name(); + String expiresOn = Long.toString(Instant.now().getEpochSecond() + tokenTimeToLive.getSeconds()); + String audienceUri = URLEncoder.encode(resource, utf8Encoding); + String secretToSign = audienceUri + "\n" + expiresOn; + + final String hashAlgorithm = "HMACSHA256"; + Mac hmac = Mac.getInstance(hashAlgorithm); + byte[] sasKeyBytes = sharedAccessKey.getBytes(utf8Encoding); + SecretKeySpec finalKey = new SecretKeySpec(sasKeyBytes, hashAlgorithm); + hmac.init(finalKey); + byte[] signatureBytes = hmac.doFinal(secretToSign.getBytes(utf8Encoding)); + String signature = Base64.getEncoder().encodeToString(signatureBytes); + + return String.format(Locale.US, "SharedAccessSignature sr=%s&sig=%s&se=%s&skn=%s", + audienceUri, + URLEncoder.encode(signature, utf8Encoding), + URLEncoder.encode(expiresOn, utf8Encoding), + URLEncoder.encode(keyName, utf8Encoding)); + } + + public String getToken(final String resource, final Duration tokenTimeToLive) throws IOException, InvalidKeyException, NoSuchAlgorithmException { + return this.sharedAccessSignature == null + ? generateSharedAccessSignature(this.keyName, this.sharedAccessKey, resource, tokenTimeToLive) + : this.sharedAccessSignature; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/StringUtil.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/StringUtil.java new file mode 100644 index 0000000000000..e7da5dec92490 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/StringUtil.java @@ -0,0 +1,32 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import java.util.UUID; + +public final class StringUtil { + public final static String EMPTY = ""; + + public static boolean isNullOrEmpty(String string) { + return (string == null || string.isEmpty()); + } + + public static boolean isNullOrWhiteSpace(String string) { + if (string == null) + return true; + + for (int index = 0; index < string.length(); index++) { + if (!Character.isWhitespace(string.charAt(index))) { + return false; + } + } + + return true; + } + + public static String getRandomString() { + return UUID.randomUUID().toString().substring(0, 6); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/TimeoutTracker.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/TimeoutTracker.java new file mode 100644 index 0000000000000..255fef69c9153 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/TimeoutTracker.java @@ -0,0 +1,48 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import java.time.*; + +public class TimeoutTracker { + private final Duration originalTimeout; + private boolean isTimerStarted; + private Instant startTime; + + /** + * @param timeout original operationTimeout + * @param startTrackingTimeout whether/not to start the timeout tracking - right now. if not started now, timer tracking will start upon the first call to {@link TimeoutTracker#elapsed()}/{@link TimeoutTracker#remaining()} + */ + public TimeoutTracker(Duration timeout, boolean startTrackingTimeout) { + if (timeout.compareTo(Duration.ZERO) < 0) { + throw new IllegalArgumentException("timeout should be non-negative"); + } + + this.originalTimeout = timeout; + + if (startTrackingTimeout) { + this.startTime = Instant.now(); + } + + this.isTimerStarted = startTrackingTimeout; + } + + public static TimeoutTracker create(Duration timeout) { + return new TimeoutTracker(timeout, true); + } + + public Duration remaining() { + return this.originalTimeout.minus(this.elapsed()); + } + + public Duration elapsed() { + if (!this.isTimerStarted) { + this.startTime = Instant.now(); + this.isTimerStarted = true; + } + + return Duration.between(this.startTime, Instant.now()); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/Timer.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/Timer.java new file mode 100644 index 0000000000000..a1cf865537322 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/Timer.java @@ -0,0 +1,61 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import java.io.IOException; +import java.time.Duration; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.RejectedExecutionException; + +final class Timer { + + final SchedulerProvider schedulerProvider; + + public Timer(final SchedulerProvider schedulerProvider) { + this.schedulerProvider = schedulerProvider; + } + + public CompletableFuture schedule( + final Runnable runnable, + final Duration runAfter) { + + final ScheduledTask scheduledTask = new ScheduledTask(runnable); + final CompletableFuture taskHandle = scheduledTask.getScheduledFuture(); + try { + this.schedulerProvider.getReactorDispatcher().invoke((int) runAfter.toMillis(), scheduledTask); + } catch (IOException | RejectedExecutionException e) { + taskHandle.completeExceptionally(e); + } + + return taskHandle; + } + + final static class ScheduledTask extends DispatchHandler { + + final CompletableFuture scheduledFuture; + final Runnable runnable; + + public ScheduledTask(final Runnable runnable) { + this.runnable = runnable; + this.scheduledFuture = new CompletableFuture<>(); + } + + @Override + public void onEvent() { + if (!scheduledFuture.isCancelled()) { + try { + runnable.run(); + scheduledFuture.complete(null); + } catch (Exception exception) { + scheduledFuture.completeExceptionally(exception); + } + } + } + + public CompletableFuture getScheduledFuture() { + return this.scheduledFuture; + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/TrackingUtil.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/TrackingUtil.java new file mode 100644 index 0000000000000..cc20776589d35 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/TrackingUtil.java @@ -0,0 +1,39 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import org.apache.qpid.proton.engine.Session; + +import java.time.Instant; + +public final class TrackingUtil { + public static final String TRACKING_ID_TOKEN_SEPARATOR = "_"; + + private TrackingUtil() { + } + + /** + * parses ServiceBus role identifiers from trackingId + * + * @return null if no roleIdentifier found + */ + static String parseRoleIdentifier(final String trackingId) { + if (StringUtil.isNullOrWhiteSpace(trackingId) || !trackingId.contains(TRACKING_ID_TOKEN_SEPARATOR)) { + return null; + } + + return trackingId.substring(trackingId.indexOf(TRACKING_ID_TOKEN_SEPARATOR)); + } + + public static String getLinkName(final Session session) { + // returned linkName lookslike: ea9cac_8b_G27_1479943074829 + final String linkNamePrefix = StringUtil.getRandomString(); + final String linkNameWithServiceRoleTracker = session.getConnection() != null && !StringUtil.isNullOrEmpty(session.getConnection().getRemoteContainer()) ? + linkNamePrefix.concat(TrackingUtil.TRACKING_ID_TOKEN_SEPARATOR).concat(session.getConnection().getRemoteContainer() + .substring(Math.max(session.getConnection().getRemoteContainer().length() - 7, 0), session.getConnection().getRemoteContainer().length())) : + linkNamePrefix; + return linkNameWithServiceRoleTracker.concat(TrackingUtil.TRACKING_ID_TOKEN_SEPARATOR).concat(String.valueOf(Instant.now().toEpochMilli())); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/WebSocketConnectionHandler.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/WebSocketConnectionHandler.java new file mode 100644 index 0000000000000..b8ab9484b0122 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/WebSocketConnectionHandler.java @@ -0,0 +1,54 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import com.microsoft.azure.proton.transport.ws.impl.WebSocketImpl; +import org.apache.qpid.proton.engine.Event; +import org.apache.qpid.proton.engine.impl.TransportInternal; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class WebSocketConnectionHandler extends ConnectionHandler { + private static final Logger TRACE_LOGGER = LoggerFactory.getLogger(WebSocketConnectionHandler.class); + + public WebSocketConnectionHandler(AmqpConnection amqpConnection) { + super(amqpConnection); + } + + @Override + protected void addTransportLayers(final Event event, final TransportInternal transport) { + final String hostName = event.getConnection().getHostname(); + + final WebSocketImpl webSocket = new WebSocketImpl(); + webSocket.configure( + hostName, + "/$servicebus/websocket", + "", + 0, + "AMQPWSB10", + null, + null); + + transport.addTransportLayer(webSocket); + + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info("addWebsocketHandshake: hostname[" + hostName +"]"); + } + + super.addTransportLayers(event, transport); + } + + @Override + protected int getProtocolPort() { + return ClientConstants.HTTPS_PORT; + } + + @Override + protected int getMaxFrameSize() { + // This is the current limitation of https://github.com/Azure/qpid-proton-j-extensions + // once, this library enables larger frames - this property can be removed. + return 4 * 1024; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/WebSocketProxyConnectionHandler.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/WebSocketProxyConnectionHandler.java new file mode 100644 index 0000000000000..41dc0baa3d04a --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/WebSocketProxyConnectionHandler.java @@ -0,0 +1,194 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import com.microsoft.azure.proton.transport.proxy.ProxyHandler; +import com.microsoft.azure.proton.transport.proxy.impl.ProxyHandlerImpl; +import com.microsoft.azure.proton.transport.proxy.impl.ProxyImpl; + +import org.apache.qpid.proton.amqp.transport.ConnectionError; +import org.apache.qpid.proton.amqp.transport.ErrorCondition; +import org.apache.qpid.proton.engine.Connection; +import org.apache.qpid.proton.engine.Event; +import org.apache.qpid.proton.engine.Transport; +import org.apache.qpid.proton.engine.impl.TransportInternal; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.Authenticator; +import java.net.InetSocketAddress; +import java.net.PasswordAuthentication; +import java.net.Proxy; +import java.net.ProxySelector; +import java.net.URI; +import java.util.Base64; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class WebSocketProxyConnectionHandler extends WebSocketConnectionHandler { + private static final Logger TRACE_LOGGER = LoggerFactory.getLogger(WebSocketProxyConnectionHandler.class); + private final String proxySelectorModifiedError = "ProxySelector has been modified."; + + public static Boolean shouldUseProxy(final String hostName) { + final URI uri = createURIFromHostNamePort(hostName, ClientConstants.HTTPS_PORT); + final ProxySelector proxySelector = ProxySelector.getDefault(); + if (proxySelector == null) { + return false; + } + + final List proxies = proxySelector.select(uri); + return isProxyAddressLegal(proxies); + } + + public WebSocketProxyConnectionHandler(AmqpConnection amqpConnection) { + super(amqpConnection); + } + + @Override + protected void addTransportLayers(final Event event, final TransportInternal transport) { + super.addTransportLayers(event, transport); + + final ProxyImpl proxy = new ProxyImpl(); + + // host name used to create proxy connect request + // after creating the socket to proxy + final String hostName = event.getConnection().getHostname(); + final ProxyHandler proxyHandler = new ProxyHandlerImpl(); + final Map proxyHeader = getAuthorizationHeader(); + proxy.configure(hostName, proxyHeader, proxyHandler, transport); + + transport.addTransportLayer(proxy); + + if (TRACE_LOGGER.isInfoEnabled()) { + TRACE_LOGGER.info("addProxyHandshake: hostname[" + hostName +"]"); + } + } + + @Override + protected void notifyTransportErrors(final Event event) { + final Transport transport = event.getTransport(); + final Connection connection = event.getConnection(); + if (connection == null || transport == null) { + return; + } + + final ErrorCondition errorCondition = transport.getCondition(); + final String hostName = event.getReactor().getConnectionAddress(connection); + final ProxySelector proxySelector = ProxySelector.getDefault(); + + if (errorCondition == null + || !(errorCondition.getCondition().equals(ConnectionError.FRAMING_ERROR) + || errorCondition.getCondition().equals(AmqpErrorCode.PROTON_IO_ERROR)) + || proxySelector == null + || StringUtil.isNullOrEmpty(hostName)) { + return; + } + + final String[] hostNameParts = hostName.split(":"); + if (hostNameParts.length != 2) { + return; + } + + int port; + try { + port = Integer.parseInt(hostNameParts[1]); + } catch (NumberFormatException ignore){ + return; + } + + final IOException ioException = reconstructIOException(errorCondition); + proxySelector.connectFailed( + createURIFromHostNamePort(this.getAmqpConnection().getHostName(), this.getProtocolPort()), + new InetSocketAddress(hostNameParts[0], port), + ioException); + } + + @Override + public String getRemoteHostName() { + final InetSocketAddress socketAddress = getProxyAddress(); + return socketAddress.getHostString(); + } + + @Override + public int getRemotePort() { + final InetSocketAddress socketAddress = getProxyAddress(); + return socketAddress.getPort(); + } + + private Map getAuthorizationHeader() { + final PasswordAuthentication authentication = Authenticator.requestPasswordAuthentication( + getRemoteHostName(), + null, + getRemotePort(), + null, + null, + "http", + null, + Authenticator.RequestorType.PROXY); + if (authentication == null) { + return null; + } + + final String proxyUserName = authentication.getUserName(); + final String proxyPassword = authentication.getPassword() != null + ? new String(authentication.getPassword()) + : null; + if (StringUtil.isNullOrEmpty(proxyUserName) + || StringUtil.isNullOrEmpty(proxyPassword)) { + return null; + } + + final HashMap proxyAuthorizationHeader = new HashMap<>(); + // https://tools.ietf.org/html/rfc7617 + final String usernamePasswordPair = proxyUserName + ":" + proxyPassword; + proxyAuthorizationHeader.put( + "Proxy-Authorization", + "Basic " + Base64.getEncoder().encodeToString(usernamePasswordPair.getBytes())); + return proxyAuthorizationHeader; + } + + private InetSocketAddress getProxyAddress() { + final URI serviceUri = createURIFromHostNamePort( + this.getAmqpConnection().getHostName(), + this.getProtocolPort()); + final ProxySelector proxySelector = ProxySelector.getDefault(); + if (proxySelector == null) { + throw new IllegalStateException(proxySelectorModifiedError); + } + + final List proxies = proxySelector.select(serviceUri); + if (!isProxyAddressLegal(proxies)) { + throw new IllegalStateException(proxySelectorModifiedError); + } + + final Proxy proxy = proxies.get(0); + return (InetSocketAddress) proxy.address(); + } + + private static URI createURIFromHostNamePort(final String hostName, final int port) { + return URI.create(String.format(ClientConstants.HTTPS_URI_FORMAT, hostName, port)); + } + + private static boolean isProxyAddressLegal(final List proxies) { + // we look only at the first proxy in the list + // if the proxy can be translated to InetSocketAddress + // only then - can we parse it to hostName and Port + // which is required by qpid-proton-j library reactor.connectToHost() API + return proxies != null + && !proxies.isEmpty() + && proxies.get(0).type() == Proxy.Type.HTTP + && proxies.get(0).address() != null + && proxies.get(0).address() instanceof InetSocketAddress; + } + + private static IOException reconstructIOException(ErrorCondition errorCondition) { + // since proton library communicates all errors based on amqp-error-condition + // it swallows the IOException and translates it to proton-io errorCode + // we reconstruct the IOException in this case - but, callstack is lost + return new IOException(errorCondition.getDescription()); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/WorkItem.java b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/WorkItem.java new file mode 100644 index 0000000000000..dc6cd3950f8af --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/main/java/com/microsoft/azure/eventhubs/impl/WorkItem.java @@ -0,0 +1,30 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import java.time.*; +import java.util.concurrent.*; + +public class WorkItem { + private final TimeoutTracker tracker; + private final CompletableFuture work; + + public WorkItem(final CompletableFuture completableFuture, final Duration timeout) { + this(completableFuture, TimeoutTracker.create(timeout)); + } + + public WorkItem(final CompletableFuture completableFuture, final TimeoutTracker tracker) { + this.work = completableFuture; + this.tracker = tracker; + } + + public TimeoutTracker getTimeoutTracker() { + return this.tracker; + } + + public CompletableFuture getWork() { + return this.work; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/concurrency/ConcurrentReceiversTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/concurrency/ConcurrentReceiversTest.java new file mode 100644 index 0000000000000..4e54c0b4cd6d3 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/concurrency/ConcurrentReceiversTest.java @@ -0,0 +1,127 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.concurrency; + +import com.microsoft.azure.eventhubs.*; +import com.microsoft.azure.eventhubs.lib.ApiTestBase; +import com.microsoft.azure.eventhubs.lib.TestBase; +import com.microsoft.azure.eventhubs.lib.TestContext; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.IOException; +import java.time.Duration; +import java.time.Instant; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; + +public class ConcurrentReceiversTest extends ApiTestBase { + static EventHubClient sender; + static PartitionReceiver[] receivers; + static EventHubClient ehClient; + static String consumerGroupName; + static ConnectionStringBuilder connStr; + static int partitionCount; + + int eventSentPerPartition = 1; + + @BeforeClass + public static void initialize() throws InterruptedException, ExecutionException, EventHubException, IOException { + connStr = TestContext.getConnectionString(); + + sender = EventHubClient.create(connStr.toString(), TestContext.EXECUTOR_SERVICE).get(); + partitionCount = sender.getRuntimeInformation().get().getPartitionCount(); + receivers = new PartitionReceiver[partitionCount]; + consumerGroupName = TestContext.getConsumerGroupName(); + } + + @AfterClass() + public static void cleanup() { + if (sender != null) { + sender.close(); + } + } + + @Test() + public void testParallelCreationOfReceivers() throws EventHubException, IOException, InterruptedException, ExecutionException, TimeoutException { + ehClient = EventHubClient.createSync(connStr.toString(), TestContext.EXECUTOR_SERVICE); + ReceiveAtleastOneEventValidator[] counter = new ReceiveAtleastOneEventValidator[partitionCount]; + + @SuppressWarnings("unchecked") CompletableFuture[] validationSignals = new CompletableFuture[partitionCount]; + @SuppressWarnings("unchecked") CompletableFuture[] receiverFutures = new CompletableFuture[partitionCount]; + for (int i = 0; i < partitionCount; i++) { + final int index = i; + receiverFutures[i] = ehClient.createReceiver(consumerGroupName, Integer.toString(i), EventPosition.fromEnqueuedTime(Instant.now())).thenAcceptAsync( + new Consumer() { + @Override + public void accept(final PartitionReceiver t) { + receivers[index] = t; + receivers[index].setReceiveTimeout(Duration.ofMillis(400)); + validationSignals[index] = new CompletableFuture(); + counter[index] = new ReceiveAtleastOneEventValidator(validationSignals[index], receivers[index]); + receivers[index].setReceiveHandler(counter[index]); + } + }); + } + + CompletableFuture.allOf(receiverFutures).get(partitionCount * 10, TimeUnit.SECONDS); + + @SuppressWarnings("unchecked") + CompletableFuture[] sendFutures = new CompletableFuture[partitionCount]; + for (int i = 0; i < partitionCount; i++) { + sendFutures[i] = TestBase.pushEventsToPartition(sender, Integer.toString(i), eventSentPerPartition); + } + + CompletableFuture.allOf(sendFutures).get(); + + CompletableFuture.allOf(validationSignals).get(partitionCount * 10, TimeUnit.SECONDS); + } + + @After + public void cleanupTest() { + for (int i = 0; i < partitionCount; i++) { + if (receivers[i] != null) { + receivers[i].close(); + } + } + + if (ehClient != null) { + ehClient.close(); + } + } + + public static final class ReceiveAtleastOneEventValidator implements PartitionReceiveHandler { + final CompletableFuture signalReceived; + final PartitionReceiver currentReceiver; + + public ReceiveAtleastOneEventValidator(final CompletableFuture signalReceived, final PartitionReceiver currentReceiver) { + this.signalReceived = signalReceived; + this.currentReceiver = currentReceiver; + } + + @Override + public int getMaxEventCount() { + return 50; + } + + @Override + public void onReceive(Iterable events) { + if (events != null && events.iterator().hasNext()) { + this.signalReceived.complete(null); + this.currentReceiver.setReceiveHandler(null); + } + } + + @Override + public void onError(Throwable error) { + this.signalReceived.completeExceptionally(error); + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/concurrency/EventHubClientTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/concurrency/EventHubClientTest.java new file mode 100644 index 0000000000000..c292d6a51ee98 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/concurrency/EventHubClientTest.java @@ -0,0 +1,69 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.concurrency; + +import com.microsoft.azure.eventhubs.ConnectionStringBuilder; +import com.microsoft.azure.eventhubs.EventHubClient; +import com.microsoft.azure.eventhubs.EventPosition; +import com.microsoft.azure.eventhubs.PartitionReceiver; +import com.microsoft.azure.eventhubs.lib.ApiTestBase; +import com.microsoft.azure.eventhubs.lib.TestBase; +import com.microsoft.azure.eventhubs.lib.TestContext; +import org.junit.Assert; +import org.junit.Test; + +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; + +public class EventHubClientTest extends ApiTestBase { + + @Test() + public void testParallelEventHubClients() throws Exception { + + final String consumerGroupName = TestContext.getConsumerGroupName(); + final String partitionId = "0"; + final int noOfClients = 4; + final ScheduledExecutorService executorService = Executors.newScheduledThreadPool(1); + + @SuppressWarnings("unchecked") + CompletableFuture[] createFutures = new CompletableFuture[noOfClients]; + try { + ConnectionStringBuilder connectionString = TestContext.getConnectionString(); + for (int i = 0; i < noOfClients; i++) { + createFutures[i] = EventHubClient.create(connectionString.toString(), executorService); + } + + CompletableFuture.allOf(createFutures).get(); + boolean firstOne = true; + for (CompletableFuture createFuture : createFutures) { + final EventHubClient ehClient = createFuture.join(); + if (firstOne) { + TestBase.pushEventsToPartition(ehClient, partitionId, 10).get(); + firstOne = false; + } + + PartitionReceiver receiver = ehClient.createReceiverSync(consumerGroupName, partitionId, EventPosition.fromStartOfStream()); + try { + Assert.assertTrue(receiver.receiveSync(100).iterator().hasNext()); + } finally { + receiver.closeSync(); + } + } + } finally { + if (createFutures != null) { + for (CompletableFuture createFuture : createFutures) { + if (!createFuture.isCancelled() || !createFuture.isCompletedExceptionally()) { + EventHubClient ehClient = createFuture.join(); + ehClient.closeSync(); + } + } + } + + executorService.shutdown(); + } + } + +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/connstrbuilder/ConnStrBuilderTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/connstrbuilder/ConnStrBuilderTest.java new file mode 100644 index 0000000000000..4649b88628c2b --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/connstrbuilder/ConnStrBuilderTest.java @@ -0,0 +1,82 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.connstrbuilder; + +import com.microsoft.azure.eventhubs.ConnectionStringBuilder; +import com.microsoft.azure.eventhubs.IllegalConnectionStringFormatException; +import com.microsoft.azure.eventhubs.TransportType; +import com.microsoft.azure.eventhubs.lib.ApiTestBase; +import org.junit.Assert; +import org.junit.Test; + +import java.time.Duration; +import java.util.function.Consumer; + +public class ConnStrBuilderTest extends ApiTestBase { + static final String correctConnectionString = "Endpoint=sb://endpoint1;EntityPath=eventhub1;SharedAccessKeyName=somevalue;SharedAccessKey=something;OperationTimeout=PT5S;TransportType=AMQP"; + static final Consumer validateConnStrBuilder = new Consumer() { + @Override + public void accept(ConnectionStringBuilder connStrBuilder) { + Assert.assertTrue(connStrBuilder.getEventHubName().equals("eventhub1")); + Assert.assertTrue(connStrBuilder.getEndpoint().getHost().equals("endpoint1")); + Assert.assertTrue(connStrBuilder.getSasKey().equals("something")); + Assert.assertTrue(connStrBuilder.getSasKeyName().equals("somevalue")); + Assert.assertTrue(connStrBuilder.getTransportType() == TransportType.AMQP); + Assert.assertTrue(connStrBuilder.getOperationTimeout().equals(Duration.ofSeconds(5))); + } + }; + + @Test(expected = IllegalConnectionStringFormatException.class) + public void parseInvalidConnectionString() { + new ConnectionStringBuilder("something"); + } + + @Test(expected = IllegalConnectionStringFormatException.class) + public void throwOnUnrecognizedParts() { + new ConnectionStringBuilder(correctConnectionString + ";" + "something"); + } + + @Test(expected = IllegalConnectionStringFormatException.class) + public void throwOnInvalidTransportType() { + ConnectionStringBuilder connectionStringBuilder = new ConnectionStringBuilder(correctConnectionString); + String connectionStringWithTransportType = connectionStringBuilder.setTransportType(TransportType.AMQP_WEB_SOCKETS).toString(); + String connectionStringWithInvalidTransportType = connectionStringWithTransportType.replace(TransportType.AMQP_WEB_SOCKETS.toString(), "invalid"); + new ConnectionStringBuilder(connectionStringWithInvalidTransportType); + } + + @Test + public void parseValidConnectionString() { + final ConnectionStringBuilder connStrBuilder = new ConnectionStringBuilder(correctConnectionString); + validateConnStrBuilder.accept(connStrBuilder); + } + + @Test + public void exchangeConnectionStringAcrossConstructors() { + final ConnectionStringBuilder connStrBuilder = new ConnectionStringBuilder(correctConnectionString); + final ConnectionStringBuilder secondConnStr = new ConnectionStringBuilder() + .setEndpoint(connStrBuilder.getEndpoint()) + .setEventHubName(connStrBuilder.getEventHubName()) + .setSasKeyName(connStrBuilder.getSasKeyName()) + .setSasKey(connStrBuilder.getSasKey()) + .setTransportType(connStrBuilder.getTransportType()) + .setOperationTimeout(connStrBuilder.getOperationTimeout()); + + validateConnStrBuilder.accept(new ConnectionStringBuilder(secondConnStr.toString())); + } + + @Test + public void testPropertySetters() { + final ConnectionStringBuilder connStrBuilder = new ConnectionStringBuilder(correctConnectionString); + final ConnectionStringBuilder testConnStrBuilder = new ConnectionStringBuilder(connStrBuilder.toString()); + validateConnStrBuilder.accept(testConnStrBuilder); + + connStrBuilder.setOperationTimeout(Duration.ofSeconds(8)); + connStrBuilder.setTransportType(TransportType.AMQP_WEB_SOCKETS); + + ConnectionStringBuilder testConnStrBuilder1 = new ConnectionStringBuilder(connStrBuilder.toString()); + Assert.assertTrue(testConnStrBuilder1.getOperationTimeout().getSeconds() == 8); + Assert.assertTrue(testConnStrBuilder1.getTransportType() == TransportType.AMQP_WEB_SOCKETS); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/connstrbuilder/TransportTypeTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/connstrbuilder/TransportTypeTest.java new file mode 100644 index 0000000000000..68eb7fb8d9c8b --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/connstrbuilder/TransportTypeTest.java @@ -0,0 +1,145 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.connstrbuilder; + +import com.microsoft.azure.eventhubs.ConnectionStringBuilder; +import com.microsoft.azure.eventhubs.EventHubClient; +import com.microsoft.azure.eventhubs.TransportType; +import com.microsoft.azure.eventhubs.impl.ConnectionHandler; +import com.microsoft.azure.eventhubs.impl.EventHubClientImpl; +import com.microsoft.azure.eventhubs.impl.MessagingFactory; +import com.microsoft.azure.eventhubs.lib.ApiTestBase; +import com.microsoft.azure.eventhubs.lib.TestContext; + +import org.jutils.jproxy.ProxyServer; +import org.junit.Assert; +import org.junit.Test; + +import java.io.IOException; +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.net.*; +import java.util.LinkedList; +import java.util.List; + +public class TransportTypeTest extends ApiTestBase { + + public volatile boolean isProxySelectorInvoked = false; + + @Test + public void transportTypeAmqpCreatesConnectionWithPort5671() throws Exception { + ConnectionStringBuilder builder = new ConnectionStringBuilder(TestContext.getConnectionString().toString()); + builder.setTransportType(TransportType.AMQP); + + EventHubClient ehClient = EventHubClient.createSync(builder.toString(), TestContext.EXECUTOR_SERVICE); + try { + EventHubClientImpl eventHubClientImpl = (EventHubClientImpl) ehClient; + final Field factoryField = EventHubClientImpl.class.getDeclaredField("underlyingFactory"); + factoryField.setAccessible(true); + final MessagingFactory underlyingFactory = (MessagingFactory) factoryField.get(eventHubClientImpl); + + final Field connectionHandlerField = MessagingFactory.class.getDeclaredField("connectionHandler"); + connectionHandlerField.setAccessible(true); + final ConnectionHandler connectionHandler = (ConnectionHandler) connectionHandlerField.get(underlyingFactory); + + final Method outboundSocketPort = ConnectionHandler.class.getDeclaredMethod("getRemotePort"); + outboundSocketPort.setAccessible(true); + + final Method protocolPort = ConnectionHandler.class.getDeclaredMethod("getProtocolPort"); + protocolPort.setAccessible(true); + + Assert.assertEquals(5671, outboundSocketPort.invoke(connectionHandler)); + Assert.assertEquals(5671, protocolPort.invoke(connectionHandler)); + } finally { + ehClient.closeSync(); + } + } + + @Test + public void transportTypeAmqpWebSocketsCreatesConnectionWithPort443() throws Exception { + ConnectionStringBuilder builder = new ConnectionStringBuilder(TestContext.getConnectionString().toString()); + builder.setTransportType(TransportType.AMQP_WEB_SOCKETS); + + EventHubClient ehClient = EventHubClient.createSync(builder.toString(), TestContext.EXECUTOR_SERVICE); + try { + EventHubClientImpl eventHubClientImpl = (EventHubClientImpl) ehClient; + final Field factoryField = EventHubClientImpl.class.getDeclaredField("underlyingFactory"); + factoryField.setAccessible(true); + final MessagingFactory underlyingFactory = (MessagingFactory) factoryField.get(eventHubClientImpl); + + final Field connectionHandlerField = MessagingFactory.class.getDeclaredField("connectionHandler"); + connectionHandlerField.setAccessible(true); + final ConnectionHandler connectionHandler = (ConnectionHandler) connectionHandlerField.get(underlyingFactory); + + final Method outboundSocketPort = ConnectionHandler.class.getDeclaredMethod("getRemotePort"); + outboundSocketPort.setAccessible(true); + + final Method protocolPort = ConnectionHandler.class.getDeclaredMethod("getProtocolPort"); + protocolPort.setAccessible(true); + + Assert.assertEquals(443, outboundSocketPort.invoke(connectionHandler)); + Assert.assertEquals(443, protocolPort.invoke(connectionHandler)); + } finally { + ehClient.closeSync(); + } + } + + @Test + public void transportTypeAmqpWebSocketsWithProxyCreatesConnectionWithCorrectPorts() throws Exception { + int proxyPort = 8899; + ProxyServer proxyServer = ProxyServer.create("localhost", proxyPort); + proxyServer.start(throwable -> {}); + + ProxySelector defaultProxySelector = ProxySelector.getDefault(); + this.isProxySelectorInvoked = false; + try { + ProxySelector.setDefault(new ProxySelector() { + @Override + public List select(URI uri) { + LinkedList proxies = new LinkedList<>(); + proxies.add(new Proxy(Proxy.Type.HTTP, new InetSocketAddress("localhost", proxyPort))); + isProxySelectorInvoked = true; + return proxies; + } + + @Override + public void connectFailed(URI uri, SocketAddress sa, IOException ioe) { + // no-op + } + }); + + ConnectionStringBuilder builder = new ConnectionStringBuilder(TestContext.getConnectionString().toString()); + builder.setTransportType(TransportType.AMQP_WEB_SOCKETS); + + EventHubClient ehClient = EventHubClient.createSync(builder.toString(), TestContext.EXECUTOR_SERVICE); + try { + EventHubClientImpl eventHubClientImpl = (EventHubClientImpl) ehClient; + final Field factoryField = EventHubClientImpl.class.getDeclaredField("underlyingFactory"); + factoryField.setAccessible(true); + final MessagingFactory underlyingFactory = (MessagingFactory) factoryField.get(eventHubClientImpl); + + final Field connectionHandlerField = MessagingFactory.class.getDeclaredField("connectionHandler"); + connectionHandlerField.setAccessible(true); + final ConnectionHandler connectionHandler = (ConnectionHandler) connectionHandlerField.get(underlyingFactory); + + final Method outboundSocketPort = ConnectionHandler.class.getDeclaredMethod("getRemotePort"); + outboundSocketPort.setAccessible(true); + + final Method protocolPort = ConnectionHandler.class.getDeclaredMethod("getProtocolPort"); + protocolPort.setAccessible(true); + + Assert.assertEquals(proxyPort, outboundSocketPort.invoke(connectionHandler)); + Assert.assertEquals(443, protocolPort.invoke(connectionHandler)); + + Assert.assertTrue(isProxySelectorInvoked); + } finally { + ehClient.closeSync(); + ProxySelector.setDefault(defaultProxySelector); + } + } finally { + proxyServer.stop(); + } + } +} \ No newline at end of file diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/BackCompatTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/BackCompatTest.java new file mode 100644 index 0000000000000..c4f8fb496d085 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/BackCompatTest.java @@ -0,0 +1,102 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.eventdata; + +import com.microsoft.azure.eventhubs.*; +import com.microsoft.azure.eventhubs.impl.MessageSender; +import com.microsoft.azure.eventhubs.impl.MessagingFactory; +import com.microsoft.azure.eventhubs.lib.ApiTestBase; +import com.microsoft.azure.eventhubs.lib.TestContext; +import org.apache.qpid.proton.Proton; +import org.apache.qpid.proton.amqp.Binary; +import org.apache.qpid.proton.amqp.messaging.ApplicationProperties; +import org.apache.qpid.proton.amqp.messaging.Data; +import org.apache.qpid.proton.message.Message; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.time.Instant; +import java.util.HashMap; +import java.util.concurrent.ExecutionException; +import java.util.function.Consumer; + +public class BackCompatTest extends ApiTestBase { + static final String partitionId = "0"; + static final Message originalMessage = Proton.message(); + static final String applicationProperty = "firstProp"; + static final String intApplicationProperty = "intProp"; + static final String msgAnnotation = "message-annotation-1"; + static final String payload = "testmsg"; + static EventHubClient ehClient; + static MessagingFactory msgFactory; + static PartitionReceiver receiver; + static MessageSender partitionMsgSender; + static EventData receivedEvent; + + final Consumer validateAmqpPropertiesInEventData = new Consumer() { + @Override + public void accept(EventData eData) { + Assert.assertTrue(eData.getProperties().containsKey(applicationProperty) + && eData.getProperties().get(applicationProperty).equals(originalMessage.getApplicationProperties().getValue().get(applicationProperty))); + + Assert.assertTrue(eData.getProperties().containsKey(intApplicationProperty) + && eData.getProperties().get(intApplicationProperty).equals(originalMessage.getApplicationProperties().getValue().get(intApplicationProperty))); + + Assert.assertTrue(eData.getProperties().size() == 2); + + Assert.assertTrue(new String(eData.getBytes()).equals(payload)); + } + }; + + @BeforeClass + public static void initialize() throws EventHubException, IOException, InterruptedException, ExecutionException { + final ConnectionStringBuilder connStrBuilder = TestContext.getConnectionString(); + final String connectionString = connStrBuilder.toString(); + + ehClient = EventHubClient.createSync(connectionString, TestContext.EXECUTOR_SERVICE); + msgFactory = MessagingFactory.createFromConnectionString(connectionString, TestContext.EXECUTOR_SERVICE).get(); + receiver = ehClient.createReceiverSync(TestContext.getConsumerGroupName(), partitionId, EventPosition.fromEnqueuedTime(Instant.now())); + partitionMsgSender = MessageSender.create(msgFactory, "link1", connStrBuilder.getEventHubName() + "/partitions/" + partitionId).get(); + + // until version 0.10.0 - we used to have Properties as HashMap + // This specific combination is intended to test the back compat - with the new Properties type as HashMap + final HashMap appProperties = new HashMap<>(); + appProperties.put(applicationProperty, "value1"); + appProperties.put(intApplicationProperty, "3"); + // back compat end + + final ApplicationProperties applicationProperties = new ApplicationProperties(appProperties); + originalMessage.setApplicationProperties(applicationProperties); + + originalMessage.setBody(new Data(Binary.create(ByteBuffer.wrap(payload.getBytes())))); + + partitionMsgSender.send(originalMessage).get(); + receivedEvent = receiver.receiveSync(10).iterator().next(); + } + + @AfterClass + public static void cleanup() throws EventHubException { + if (partitionMsgSender != null) + partitionMsgSender.closeSync(); + + if (receiver != null) + receiver.closeSync(); + + if (ehClient != null) + ehClient.closeSync(); + + if (msgFactory != null) + msgFactory.closeSync(); + } + + @Test + public void backCompatWithJavaSDKOlderThan_0_11_0() { + validateAmqpPropertiesInEventData.accept(receivedEvent); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/EventDataBatchTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/EventDataBatchTest.java new file mode 100644 index 0000000000000..744ed427359eb --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/EventDataBatchTest.java @@ -0,0 +1,33 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.eventdata; + +import com.microsoft.azure.eventhubs.*; +import com.microsoft.azure.eventhubs.lib.ApiTestBase; +import com.microsoft.azure.eventhubs.lib.TestContext; +import org.junit.Assert; +import org.junit.Test; + +import java.io.IOException; +import java.util.concurrent.Executors; + +public class EventDataBatchTest extends ApiTestBase { + + private static EventHubClient ehClient; + + @Test(expected = PayloadSizeExceededException.class) + public void payloadExceededException() throws EventHubException, IOException { + final ConnectionStringBuilder connStrBuilder = TestContext.getConnectionString(); + ehClient = EventHubClient.createSync(connStrBuilder.toString(), Executors.newScheduledThreadPool(1)); + + final EventDataBatch batch = ehClient.createBatch(); + + final EventData within = EventData.create(new byte[1024]); + final EventData tooBig = EventData.create(new byte[1024 * 1024 * 2]); + + Assert.assertTrue(batch.tryAdd(within)); + batch.tryAdd(tooBig); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/EventDataTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/EventDataTest.java new file mode 100644 index 0000000000000..90d6998df41ab --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/EventDataTest.java @@ -0,0 +1,248 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.eventdata; + +import com.microsoft.azure.eventhubs.EventData; +import org.junit.Assert; +import org.junit.Test; + +import java.io.*; +import java.nio.ByteBuffer; + +public class EventDataTest { + final String payload = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"; // even number of chars + + @Test(expected = IllegalArgumentException.class) + public void eventDataByteArrayNotNull() { + byte[] byteArray = null; + EventData.create(byteArray); + } + + @Test(expected = IllegalArgumentException.class) + public void eventDataByteArrayNotNullBuffer() { + final ByteBuffer buffer = null; + EventData.create(buffer); + } + + @Test(expected = IllegalArgumentException.class) + public void eventDataByteArrayNotNullConstructor2() { + EventData.create(null, 0, 0); + } + + @Test + public void eventDataEmptyByteArray() throws IOException, ClassNotFoundException { + byte[] byteArray = new byte[0]; + EventData deSerializedEvent = serializeAndDeserialize(EventData.create(byteArray)); + Assert.assertEquals(deSerializedEvent.getBytes().length, 0); + Assert.assertTrue(deSerializedEvent.getBytes() != null); + } + + @Test + public void eventDataSerializationTest() throws IOException, ClassNotFoundException { + final EventData withSimpleByteArray = EventData.create(payload.getBytes()); + EventData deSerializedEvent = serializeAndDeserialize(withSimpleByteArray); + Assert.assertTrue(payload.equals(new String(deSerializedEvent.getBytes()))); + } + + @Test + public void eventDataSerializationTestConstWithOffsetAndLength() throws IOException, ClassNotFoundException { + final ByteArrayOutputStream payloadStream = new ByteArrayOutputStream(); + payloadStream.write(payload.getBytes()); + payloadStream.write(payload.getBytes()); + payloadStream.close(); + + final EventData withByteArrayAndOffset = EventData.create(payloadStream.toByteArray(), payloadStream.size() / 2, payloadStream.size() / 2); + final EventData deSerializedEvent = serializeAndDeserialize(withByteArrayAndOffset); + Assert.assertTrue(payload.equals(new String(deSerializedEvent.getBytes()))); + } + + @Test + public void eventDataSerializationTestConstWithByteBuffer() throws IOException, ClassNotFoundException { + final ByteArrayOutputStream payloadStream = new ByteArrayOutputStream(); + payloadStream.write(payload.getBytes()); + payloadStream.write(payload.getBytes()); + payloadStream.close(); + + final EventData withByteBuffer = EventData.create(ByteBuffer.wrap(payloadStream.toByteArray(), payloadStream.size() / 2, payloadStream.size() / 2)); + final EventData deSerializedEvent = serializeAndDeserialize(withByteBuffer); + Assert.assertTrue(payload.equals(new String(deSerializedEvent.getBytes()))); + } + + @Test + public void sendingEventsSysPropsShouldBeNull() { + Assert.assertTrue(EventData.create("Test".getBytes()).getSystemProperties() == null); + } + + private EventData serializeAndDeserialize(final EventData input) throws IOException, ClassNotFoundException { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + ObjectOutputStream oos = new ObjectOutputStream(baos); + oos.writeObject(input); + oos.close(); + + ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray()); + ObjectInputStream ois = new ObjectInputStream(bais); + final EventData deSerializedEvent = (EventData) ois.readObject(); + ois.close(); + + return deSerializedEvent; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/InteropAmqpPropertiesTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/InteropAmqpPropertiesTest.java new file mode 100755 index 0000000000000..7a2aa90bfb128 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/InteropAmqpPropertiesTest.java @@ -0,0 +1,195 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.eventdata; + +import com.microsoft.azure.eventhubs.*; +import com.microsoft.azure.eventhubs.impl.*; +import com.microsoft.azure.eventhubs.lib.ApiTestBase; +import com.microsoft.azure.eventhubs.lib.TestContext; +import org.apache.qpid.proton.Proton; +import org.apache.qpid.proton.amqp.Binary; +import org.apache.qpid.proton.amqp.Symbol; +import org.apache.qpid.proton.amqp.messaging.ApplicationProperties; +import org.apache.qpid.proton.amqp.messaging.Data; +import org.apache.qpid.proton.amqp.messaging.MessageAnnotations; +import org.apache.qpid.proton.message.Message; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.time.Instant; +import java.util.HashMap; +import java.util.concurrent.ExecutionException; +import java.util.function.Consumer; + +public class InteropAmqpPropertiesTest extends ApiTestBase { + static final String partitionId = "0"; + static final Message originalMessage = Proton.message(); + static final String applicationProperty = "firstProp"; + static final String msgAnnotation = "message-annotation-1"; + static final String payload = "testmsg"; + static EventHubClient ehClient; + static MessagingFactory msgFactory; + static PartitionReceiver receiver; + static MessageReceiver msgReceiver; + static MessageSender partitionMsgSender; + static PartitionSender partitionEventSender; + static EventData receivedEvent; + static EventData reSentAndReceivedEvent; + static Message reSendAndReceivedMessage; + + final Consumer validateAmqpPropertiesInEventData = new Consumer() { + @Override + public void accept(EventData eData) { + Assert.assertTrue(eData.getSystemProperties().containsKey(AmqpConstants.AMQP_PROPERTY_MESSAGE_ID) + && eData.getSystemProperties().get(AmqpConstants.AMQP_PROPERTY_MESSAGE_ID).equals(originalMessage.getMessageId())); + Assert.assertTrue(eData.getSystemProperties().containsKey(AmqpConstants.AMQP_PROPERTY_USER_ID) + && new String((byte[]) eData.getSystemProperties().get(AmqpConstants.AMQP_PROPERTY_USER_ID)).equals(new String(originalMessage.getUserId()))); + Assert.assertTrue(eData.getSystemProperties().containsKey(AmqpConstants.AMQP_PROPERTY_TO) + && eData.getSystemProperties().get(AmqpConstants.AMQP_PROPERTY_TO).equals(originalMessage.getAddress())); + Assert.assertTrue(eData.getSystemProperties().containsKey(AmqpConstants.AMQP_PROPERTY_CONTENT_TYPE) + && eData.getSystemProperties().get(AmqpConstants.AMQP_PROPERTY_CONTENT_TYPE).equals(originalMessage.getContentType())); + Assert.assertTrue(eData.getSystemProperties().containsKey(AmqpConstants.AMQP_PROPERTY_CONTENT_ENCODING) + && eData.getSystemProperties().get(AmqpConstants.AMQP_PROPERTY_CONTENT_ENCODING).equals(originalMessage.getContentEncoding())); + Assert.assertTrue(eData.getSystemProperties().containsKey(AmqpConstants.AMQP_PROPERTY_CORRELATION_ID) + && eData.getSystemProperties().get(AmqpConstants.AMQP_PROPERTY_CORRELATION_ID).equals(originalMessage.getCorrelationId())); + Assert.assertTrue(eData.getSystemProperties().containsKey(AmqpConstants.AMQP_PROPERTY_CREATION_TIME) + && eData.getSystemProperties().get(AmqpConstants.AMQP_PROPERTY_CREATION_TIME).equals(originalMessage.getCreationTime())); + Assert.assertTrue(eData.getSystemProperties().containsKey(AmqpConstants.AMQP_PROPERTY_SUBJECT) + && eData.getSystemProperties().get(AmqpConstants.AMQP_PROPERTY_SUBJECT).equals(originalMessage.getSubject())); + Assert.assertTrue(eData.getSystemProperties().containsKey(AmqpConstants.AMQP_PROPERTY_GROUP_ID) + && eData.getSystemProperties().get(AmqpConstants.AMQP_PROPERTY_GROUP_ID).equals(originalMessage.getGroupId())); + Assert.assertTrue(eData.getSystemProperties().containsKey(AmqpConstants.AMQP_PROPERTY_REPLY_TO_GROUP_ID) + && eData.getSystemProperties().get(AmqpConstants.AMQP_PROPERTY_REPLY_TO_GROUP_ID).equals(originalMessage.getReplyToGroupId())); + Assert.assertTrue(eData.getSystemProperties().containsKey(AmqpConstants.AMQP_PROPERTY_REPLY_TO) + && eData.getSystemProperties().get(AmqpConstants.AMQP_PROPERTY_REPLY_TO).equals(originalMessage.getReplyTo())); + Assert.assertTrue(eData.getSystemProperties().containsKey(AmqpConstants.AMQP_PROPERTY_ABSOLUTE_EXPRITY_TIME) + && eData.getSystemProperties().get(AmqpConstants.AMQP_PROPERTY_ABSOLUTE_EXPRITY_TIME).equals(originalMessage.getExpiryTime())); + + Assert.assertTrue(eData.getSystemProperties().containsKey(msgAnnotation) + && eData.getSystemProperties().get(msgAnnotation).equals(originalMessage.getMessageAnnotations().getValue().get(Symbol.getSymbol(msgAnnotation)))); + + Assert.assertTrue(eData.getProperties().containsKey(applicationProperty) + && eData.getProperties().get(applicationProperty).equals(originalMessage.getApplicationProperties().getValue().get(applicationProperty))); + + Assert.assertTrue(eData.getProperties().size() == 1); + + Assert.assertTrue(new String(eData.getBytes()).equals(payload)); + } + }; + + @BeforeClass + public static void initialize() throws EventHubException, IOException, InterruptedException, ExecutionException { + final ConnectionStringBuilder connStrBuilder = TestContext.getConnectionString(); + final String connectionString = connStrBuilder.toString(); + + ehClient = EventHubClient.createSync(connectionString, TestContext.EXECUTOR_SERVICE); + msgFactory = MessagingFactory.createFromConnectionString(connectionString, TestContext.EXECUTOR_SERVICE).get(); + receiver = ehClient.createReceiverSync(TestContext.getConsumerGroupName(), partitionId, EventPosition.fromEnqueuedTime(Instant.now())); + partitionMsgSender = MessageSender.create(msgFactory, "link1", connStrBuilder.getEventHubName() + "/partitions/" + partitionId).get(); + partitionEventSender = ehClient.createPartitionSenderSync(partitionId); + + final HashMap appProperties = new HashMap<>(); + appProperties.put(applicationProperty, "value1"); + final ApplicationProperties applicationProperties = new ApplicationProperties(appProperties); + originalMessage.setApplicationProperties(applicationProperties); + + originalMessage.setMessageId("id1"); + originalMessage.setUserId("user1".getBytes()); + originalMessage.setAddress("eventhub1"); + originalMessage.setSubject("sub"); + originalMessage.setReplyTo("replyingTo"); + originalMessage.setExpiryTime(456L); + originalMessage.setGroupSequence(5555L); + originalMessage.setContentType("events"); + originalMessage.setContentEncoding("UTF-8"); + originalMessage.setCorrelationId("corid1"); + originalMessage.setCreationTime(345L); + originalMessage.setGroupId("gid"); + originalMessage.setReplyToGroupId("replyToGroupId"); + + originalMessage.setMessageAnnotations(new MessageAnnotations(new HashMap<>())); + originalMessage.getMessageAnnotations().getValue().put(Symbol.getSymbol(msgAnnotation), "messageAnnotationValue"); + + originalMessage.setBody(new Data(Binary.create(ByteBuffer.wrap(payload.getBytes())))); + + partitionMsgSender.send(originalMessage).get(); + receivedEvent = receiver.receiveSync(10).iterator().next(); + + partitionEventSender.sendSync(receivedEvent); + reSentAndReceivedEvent = receiver.receiveSync(10).iterator().next(); + + partitionEventSender.sendSync(reSentAndReceivedEvent); + msgReceiver = MessageReceiver.create( + msgFactory, + "receiver1", + connStrBuilder.getEventHubName() + "/ConsumerGroups/" + TestContext.getConsumerGroupName() + "/Partitions/" + partitionId, + 100, + (ReceiverSettingsProvider) ehClient.createReceiver(TestContext.getConsumerGroupName(), partitionId, EventPosition.fromOffset(reSentAndReceivedEvent.getSystemProperties().getOffset(), false)).get()).get(); + + reSendAndReceivedMessage = msgReceiver.receive(10).get().iterator().next(); + } + + @AfterClass + public static void cleanup() throws EventHubException { + if (msgReceiver != null) + msgReceiver.closeSync(); + + if (partitionEventSender != null) + partitionEventSender.closeSync(); + + if (partitionMsgSender != null) + partitionMsgSender.closeSync(); + + if (receiver != null) + receiver.closeSync(); + + if (ehClient != null) + ehClient.closeSync(); + + if (msgFactory != null) + msgFactory.closeSync(); + } + + @Test + public void interopWithDirectProtonAmqpMessage() { + validateAmqpPropertiesInEventData.accept(receivedEvent); + } + + @Test + public void interopWithDirectProtonEventDataReSend() { + validateAmqpPropertiesInEventData.accept(reSentAndReceivedEvent); + } + + @Test + public void resentAmqpMessageShouldRetainAllOriginalProps() { + Assert.assertTrue(reSendAndReceivedMessage.getMessageId().equals(originalMessage.getMessageId())); + Assert.assertTrue(reSendAndReceivedMessage.getAddress().equals(originalMessage.getAddress())); + Assert.assertTrue(reSendAndReceivedMessage.getContentEncoding().equals(originalMessage.getContentEncoding())); + Assert.assertTrue(reSendAndReceivedMessage.getContentType().equals(originalMessage.getContentType())); + Assert.assertTrue(new String(reSendAndReceivedMessage.getUserId()).equals(new String(originalMessage.getUserId()))); + Assert.assertTrue(reSendAndReceivedMessage.getCorrelationId().equals(originalMessage.getCorrelationId())); + Assert.assertTrue(reSendAndReceivedMessage.getGroupId().equals(originalMessage.getGroupId())); + Assert.assertTrue(reSendAndReceivedMessage.getReplyTo().equals(originalMessage.getReplyTo())); + Assert.assertTrue(reSendAndReceivedMessage.getReplyToGroupId().equals(originalMessage.getReplyToGroupId())); + Assert.assertTrue(reSendAndReceivedMessage.getSubject().equals(originalMessage.getSubject())); + Assert.assertTrue(reSendAndReceivedMessage.getExpiryTime() == originalMessage.getExpiryTime()); + Assert.assertTrue(reSendAndReceivedMessage.getCreationTime() == originalMessage.getCreationTime()); + Assert.assertTrue(reSendAndReceivedMessage.getExpiryTime() == originalMessage.getExpiryTime()); + Assert.assertTrue(reSendAndReceivedMessage.getGroupSequence() == originalMessage.getGroupSequence()); + + Assert.assertTrue(reSendAndReceivedMessage.getApplicationProperties().getValue().get(applicationProperty) + .equals(originalMessage.getApplicationProperties().getValue().get(applicationProperty))); + + Assert.assertTrue(reSendAndReceivedMessage.getMessageAnnotations().getValue().get(Symbol.getSymbol(msgAnnotation)) + .equals(originalMessage.getMessageAnnotations().getValue().get(Symbol.getSymbol(msgAnnotation)))); + + Binary payloadBytes = ((Data) reSendAndReceivedMessage.getBody()).getValue(); + Assert.assertTrue(new String(payloadBytes.getArray(), payloadBytes.getArrayOffset(), payloadBytes.getLength()).equals(payload)); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/InteropEventBodyTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/InteropEventBodyTest.java new file mode 100644 index 0000000000000..42c012cbf6af8 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/eventdata/InteropEventBodyTest.java @@ -0,0 +1,111 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.eventdata; + +import com.microsoft.azure.eventhubs.*; +import com.microsoft.azure.eventhubs.impl.MessageSender; +import com.microsoft.azure.eventhubs.impl.MessagingFactory; +import com.microsoft.azure.eventhubs.lib.ApiTestBase; +import com.microsoft.azure.eventhubs.lib.TestContext; +import org.apache.qpid.proton.Proton; +import org.apache.qpid.proton.amqp.Binary; +import org.apache.qpid.proton.amqp.messaging.AmqpSequence; +import org.apache.qpid.proton.amqp.messaging.AmqpValue; +import org.apache.qpid.proton.amqp.messaging.Data; +import org.apache.qpid.proton.message.Message; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.IOException; +import java.time.Duration; +import java.time.Instant; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.ExecutionException; + +public class InteropEventBodyTest extends ApiTestBase { + + static final String partitionId = "0"; + static EventHubClient ehClient; + static MessagingFactory msgFactory; + static PartitionReceiver receiver; + static MessageSender partitionMsgSender; + static PartitionSender partitionSender; + static EventData receivedEvent; + static EventData reSentAndReceivedEvent; + + @BeforeClass + public static void initialize() throws EventHubException, IOException, InterruptedException, ExecutionException { + final ConnectionStringBuilder connStrBuilder = TestContext.getConnectionString(); + final String connectionString = connStrBuilder.toString(); + + ehClient = EventHubClient.createSync(connectionString, TestContext.EXECUTOR_SERVICE); + msgFactory = MessagingFactory.createFromConnectionString(connectionString, TestContext.EXECUTOR_SERVICE).get(); + receiver = ehClient.createReceiverSync(TestContext.getConsumerGroupName(), partitionId, EventPosition.fromEnqueuedTime(Instant.now())); + partitionSender = ehClient.createPartitionSenderSync(partitionId); + partitionMsgSender = MessageSender.create(msgFactory, "link1", connStrBuilder.getEventHubName() + "/partitions/" + partitionId).get(); + + // run out of messages in that specific partition - to account for clock-skew with Instant.now() on test machine vs eventhubs service + receiver.setReceiveTimeout(Duration.ofSeconds(5)); + Iterable clockSkewEvents; + do { + clockSkewEvents = receiver.receiveSync(100); + } while (clockSkewEvents != null && clockSkewEvents.iterator().hasNext()); + } + + @AfterClass + public static void cleanup() throws EventHubException { + if (partitionMsgSender != null) + partitionMsgSender.closeSync(); + + if (receiver != null) + receiver.closeSync(); + + if (ehClient != null) + ehClient.closeSync(); + + if (msgFactory != null) + msgFactory.closeSync(); + } + + @Test + public void interopWithProtonAmqpMessageBodyAsAmqpValue() throws EventHubException, InterruptedException, ExecutionException { + Message originalMessage = Proton.message(); + String payload = "testmsg"; + originalMessage.setBody(new AmqpValue(payload)); + partitionMsgSender.send(originalMessage).get(); + receivedEvent = receiver.receiveSync(10).iterator().next(); + + Assert.assertEquals(payload, receivedEvent.getObject()); + Assert.assertEquals(receivedEvent.getBytes(), null); + + partitionSender.sendSync(receivedEvent); + reSentAndReceivedEvent = receiver.receiveSync(10).iterator().next(); + Assert.assertEquals(payload, reSentAndReceivedEvent.getObject()); + Assert.assertEquals(reSentAndReceivedEvent.getBytes(), null); + } + + @Test + public void interopWithProtonAmqpMessageBodyAsAmqpSequence() throws EventHubException, InterruptedException, ExecutionException { + Message originalMessage = Proton.message(); + String payload = "testmsg"; + LinkedList datas = new LinkedList<>(); + datas.add(new Data(new Binary(payload.getBytes()))); + originalMessage.setBody(new AmqpSequence(datas)); + + partitionMsgSender.send(originalMessage).get(); + receivedEvent = receiver.receiveSync(10).iterator().next(); + + Assert.assertEquals(payload, new String(((List) receivedEvent.getObject()).get(0).getValue().getArray())); + Assert.assertEquals(receivedEvent.getBytes(), null); + + partitionSender.sendSync(receivedEvent); + reSentAndReceivedEvent = receiver.receiveSync(10).iterator().next(); + Assert.assertEquals(payload, new String(((List) reSentAndReceivedEvent.getObject()).get(0).getValue().getArray())); + Assert.assertArrayEquals(reSentAndReceivedEvent.getBytes(), null); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/ClientEntityCreateTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/ClientEntityCreateTest.java new file mode 100644 index 0000000000000..396d24d9b8161 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/ClientEntityCreateTest.java @@ -0,0 +1,216 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.exceptioncontracts; + +import com.microsoft.azure.eventhubs.*; +import com.microsoft.azure.eventhubs.impl.MessageReceiver; +import com.microsoft.azure.eventhubs.impl.MessageSender; +import com.microsoft.azure.eventhubs.lib.ApiTestBase; +import com.microsoft.azure.eventhubs.lib.TestContext; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.lang.reflect.Field; +import java.time.Duration; +import java.util.UUID; +import java.util.function.Consumer; + +public class ClientEntityCreateTest extends ApiTestBase { + static final String PARTITION_ID = "0"; + static ConnectionStringBuilder connStr; + static final int SHORT_TIMEOUT = 8; + + @BeforeClass + public static void initialize() { + connStr = TestContext.getConnectionString(); + } + + @Test() + public void createReceiverShouldRetryAndThrowTimeoutExceptionUponRepeatedTransientErrors() throws Exception { + setIsTransientOnIllegalEntityException(true); + + try { + final ConnectionStringBuilder localConnStr = new ConnectionStringBuilder(connStr.toString()); + localConnStr.setOperationTimeout(Duration.ofSeconds(SHORT_TIMEOUT)); // to retry atleast once + + final EventHubClient eventHubClient = EventHubClient.createSync(localConnStr.toString(), TestContext.EXECUTOR_SERVICE); + + try { + eventHubClient.createReceiverSync("nonexistantcg", PARTITION_ID, EventPosition.fromStartOfStream()); + Assert.assertTrue(false); // this should be unreachable + } catch (TimeoutException exception) { + Assert.assertTrue(exception.getCause() instanceof IllegalEntityException); + } + + eventHubClient.closeSync(); + } finally { + setIsTransientOnIllegalEntityException(false); + } + } + + @Test() + public void createSenderShouldRetryAndThrowTimeoutExceptionUponRepeatedTransientErrors() throws Exception { + setIsTransientOnIllegalEntityException(true); + + try { + final ConnectionStringBuilder localConnStr = new ConnectionStringBuilder(connStr.toString()); + localConnStr.setOperationTimeout(Duration.ofSeconds(SHORT_TIMEOUT)); // to retry atleast once + localConnStr.setEventHubName("nonexistanteventhub"); + final EventHubClient eventHubClient = EventHubClient.createSync(localConnStr.toString(), TestContext.EXECUTOR_SERVICE); + + try { + eventHubClient.createPartitionSenderSync(PARTITION_ID); + Assert.assertTrue(false); // this should be unreachable + } catch (TimeoutException exception) { + Assert.assertTrue(exception.getCause() instanceof IllegalEntityException); + } + + eventHubClient.closeSync(); + } finally { + setIsTransientOnIllegalEntityException(false); + } + } + + @Test() + public void createInternalSenderShouldRetryAndThrowTimeoutExceptionUponRepeatedTransientErrors() throws Exception { + setIsTransientOnIllegalEntityException(true); + + try { + final ConnectionStringBuilder localConnStr = new ConnectionStringBuilder(connStr.toString()); + localConnStr.setOperationTimeout(Duration.ofSeconds(SHORT_TIMEOUT)); // to retry atleast once + localConnStr.setEventHubName("nonexistanteventhub"); + final EventHubClient eventHubClient = EventHubClient.createSync(localConnStr.toString(), TestContext.EXECUTOR_SERVICE); + + try { + eventHubClient.sendSync(EventData.create("Testmessage".getBytes())); + Assert.assertTrue(false); // this should be unreachable + } catch (TimeoutException exception) { + Assert.assertTrue(exception.getCause() instanceof IllegalEntityException); + } + + eventHubClient.closeSync(); + } finally { + setIsTransientOnIllegalEntityException(false); + } + } + + @Test() + public void createReceiverFailsOnTransientErrorAndThenSucceedsOnRetry() throws Exception { + final TestObject testObject = new TestObject(); + testObject.isRetried = false; + final String nonExistentEventHubName = "nonexistanteh" + UUID.randomUUID(); + + Consumer onOpenRetry = new Consumer() { + @Override + public void accept(MessageReceiver messageReceiver) { + try { + final Field receivePathField = MessageReceiver.class.getDeclaredField("receivePath"); + receivePathField.setAccessible(true); + String receivePath = (String) receivePathField.get(messageReceiver); + receivePathField.set(messageReceiver, receivePath.replace(nonExistentEventHubName, connStr.getEventHubName())); + + final Field tokenAudienceField = MessageReceiver.class.getDeclaredField("tokenAudience"); + tokenAudienceField.setAccessible(true); + String tokenAudience = (String) tokenAudienceField.get(messageReceiver); + tokenAudienceField.set(messageReceiver, tokenAudience.replace(nonExistentEventHubName, connStr.getEventHubName())); + + testObject.isRetried = true; + } catch(Exception ignore){ + System.out.println("this testcase depends on receivepath & tokenAudience in MessageReceiver class for faultinjection..."); + } + } + }; + + final Field openRetryField = MessageReceiver.class.getDeclaredField("onOpenRetry"); + openRetryField.setAccessible(true); + openRetryField.set(null, onOpenRetry); + + setIsTransientOnIllegalEntityException(true); + + try { + ConnectionStringBuilder localConnectionStringBuilder = new ConnectionStringBuilder(connStr.toString()); + localConnectionStringBuilder.setEventHubName(nonExistentEventHubName); + final EventHubClient eventHubClient = EventHubClient.createSync(localConnectionStringBuilder.toString(), TestContext.EXECUTOR_SERVICE); + eventHubClient.createReceiverSync(EventHubClient.DEFAULT_CONSUMER_GROUP_NAME, PARTITION_ID, EventPosition.fromStartOfStream()); + eventHubClient.closeSync(); + } finally { + setIsTransientOnIllegalEntityException(false); + } + + Assert.assertTrue(testObject.isRetried); + } + + @Test() + public void createSenderFailsOnTransientErrorAndThenSucceedsOnRetry() throws Exception { + final TestObject testObject = new TestObject(); + testObject.isRetried = false; + final String nonExistentEventHubName = "nonexistanteh" + UUID.randomUUID(); + + Consumer onOpenRetry = new Consumer() { + @Override + public void accept(MessageSender messageSender) { + try { + final Field receivePathField = MessageSender.class.getDeclaredField("sendPath"); + receivePathField.setAccessible(true); + String receivePath = (String) receivePathField.get(messageSender); + receivePathField.set(messageSender, receivePath.replace(nonExistentEventHubName, connStr.getEventHubName())); + + final Field tokenAudienceField = MessageSender.class.getDeclaredField("tokenAudience"); + tokenAudienceField.setAccessible(true); + String tokenAudience = (String) tokenAudienceField.get(messageSender); + tokenAudienceField.set(messageSender, tokenAudience.replace(nonExistentEventHubName, connStr.getEventHubName())); + + testObject.isRetried = true; + } catch(Exception ignore){ + System.out.println("this testcase depends on sendPath & tokenAudience in MessageReceiver class for faultinjection..."); + } + } + }; + + final Field openRetryField = MessageSender.class.getDeclaredField("onOpenRetry"); + openRetryField.setAccessible(true); + openRetryField.set(null, onOpenRetry); + + setIsTransientOnIllegalEntityException(true); + + try { + ConnectionStringBuilder localConnectionStringBuilder = new ConnectionStringBuilder(connStr.toString()); + localConnectionStringBuilder.setEventHubName(nonExistentEventHubName); + final EventHubClient eventHubClient = EventHubClient.createSync(localConnectionStringBuilder.toString(), TestContext.EXECUTOR_SERVICE); + eventHubClient.createPartitionSenderSync(PARTITION_ID); + eventHubClient.closeSync(); + } finally { + setIsTransientOnIllegalEntityException(false); + } + + Assert.assertTrue(testObject.isRetried); + } + + @Test(expected = IllegalEntityException.class) + public void createReceiverShouldThrowRespectiveExceptionUponNonTransientErrors() throws Exception { + setIsTransientOnIllegalEntityException(false); + final ConnectionStringBuilder localConnStr = new ConnectionStringBuilder(connStr.toString()); + localConnStr.setOperationTimeout(Duration.ofSeconds(SHORT_TIMEOUT)); // to retry atleast once + + final EventHubClient eventHubClient = EventHubClient.createSync(localConnStr.toString(), TestContext.EXECUTOR_SERVICE); + + try { + eventHubClient.createReceiverSync("nonexistantcg", PARTITION_ID, EventPosition.fromStartOfStream()); + } finally { + eventHubClient.closeSync(); + } + } + + static void setIsTransientOnIllegalEntityException(final boolean value) throws Exception { + final Field isTransientField = IllegalEntityException.class.getDeclaredField("isTransient"); + isTransientField.setAccessible(true); + isTransientField.setBoolean(null, value); + } + + private class TestObject { + public boolean isRetried; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/MsgFactoryOpenCloseTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/MsgFactoryOpenCloseTest.java new file mode 100644 index 0000000000000..4ae37ffe71a26 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/MsgFactoryOpenCloseTest.java @@ -0,0 +1,292 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.exceptioncontracts; + +import com.microsoft.azure.eventhubs.*; +import com.microsoft.azure.eventhubs.impl.MessagingFactory; +import com.microsoft.azure.eventhubs.lib.ApiTestBase; +import com.microsoft.azure.eventhubs.lib.FaultInjectingReactorFactory; +import com.microsoft.azure.eventhubs.lib.TestContext; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.time.Instant; +import java.util.concurrent.*; + +public class MsgFactoryOpenCloseTest extends ApiTestBase { + + static final String PARTITION_ID = "0"; + static ConnectionStringBuilder connStr; + + @BeforeClass + public static void initialize() { + connStr = TestContext.getConnectionString(); + } + + @Test() + public void VerifyTaskQueueEmptyOnMsgFactoryGracefulClose() throws Exception { + + final ScheduledExecutorService executor = Executors.newScheduledThreadPool(1); + try { + final EventHubClient ehClient = EventHubClient.createSync( + TestContext.getConnectionString().toString(), + executor); + + final PartitionReceiver receiver = ehClient.createReceiverSync( + TestContext.getConsumerGroupName(), PARTITION_ID, EventPosition.fromEnqueuedTime(Instant.now())); + final PartitionSender sender = ehClient.createPartitionSenderSync(PARTITION_ID); + sender.sendSync(EventData.create("test data - string".getBytes())); + Iterable events = receiver.receiveSync(10); + + Assert.assertTrue(events.iterator().hasNext()); + sender.closeSync(); + receiver.closeSync(); + + ehClient.closeSync(); + + Assert.assertEquals(((ScheduledThreadPoolExecutor) executor).getQueue().size(), 0); + } finally { + executor.shutdown(); + } + } + + @Test() + public void VerifyTaskQueueEmptyOnMsgFactoryWithPumpGracefulClose() throws Exception { + + final ScheduledExecutorService executor = new ScheduledThreadPoolExecutor(1); + + try { + final EventHubClient ehClient = EventHubClient.createSync( + TestContext.getConnectionString().toString(), + executor); + + final PartitionReceiver receiver = ehClient.createReceiverSync( + TestContext.getConsumerGroupName(), PARTITION_ID, EventPosition.fromEnqueuedTime(Instant.now())); + + final CompletableFuture> signalReceive = new CompletableFuture<>(); + receiver.setReceiveHandler(new PartitionReceiveHandler() { + @Override + public int getMaxEventCount() { + return 10; + } + + @Override + public void onReceive(Iterable events) { + signalReceive.complete(events); + } + + @Override + public void onError(Throwable error) { + } + }, false); + + final PartitionSender sender = ehClient.createPartitionSenderSync(PARTITION_ID); + sender.sendSync(EventData.create("test data - string".getBytes())); + + final Iterable events = signalReceive.get(); + Assert.assertTrue(events.iterator().hasNext()); + + receiver.setReceiveHandler(null).get(); + + sender.closeSync(); + receiver.closeSync(); + + ehClient.closeSync(); + + Assert.assertEquals(((ScheduledThreadPoolExecutor) executor).getQueue().size(), 0); + } finally { + executor.shutdown(); + } + } + + @Test() + public void VerifyThreadReleaseOnMsgFactoryOpenError() throws Exception { + + final FaultInjectingReactorFactory networkOutageSimulator = new FaultInjectingReactorFactory(); + networkOutageSimulator.setFaultType(FaultInjectingReactorFactory.FaultType.NetworkOutage); + + final ScheduledExecutorService executor = Executors.newScheduledThreadPool(1); + + try { + final CompletableFuture openFuture = MessagingFactory.createFromConnectionString( + connStr.toString(), null, + executor, + networkOutageSimulator); + try { + openFuture.get(); + Assert.assertFalse(true); + } catch (ExecutionException error) { + Assert.assertEquals(EventHubException.class, error.getCause().getClass()); + } + + Thread.sleep(1000); // for reactor to transition from cleanup to complete-stop + + Assert.assertEquals(((ScheduledThreadPoolExecutor) executor).getQueue().size(), 0); + } finally { + executor.shutdown(); + } + } + + @Test(expected = RejectedExecutionException.class) + public void SupplyClosedExecutorServiceToEventHubClient() throws Exception { + final ScheduledExecutorService testClosed = new ScheduledThreadPoolExecutor(1); + testClosed.shutdown(); + + EventHubClient.createSync( + TestContext.getConnectionString().toString(), + testClosed); + } + + @Test(expected = RejectedExecutionException.class) + public void SupplyClosedExecutorServiceToSendOperation() throws Exception { + final ScheduledExecutorService testClosed = Executors.newScheduledThreadPool(1); + + final EventHubClient temp = EventHubClient.createSync( + TestContext.getConnectionString().toString(), + testClosed); + temp.sendSync(EventData.create("test data - string".getBytes())); + + testClosed.shutdown(); + + temp.sendSync(EventData.create("test data - string".getBytes())); + testClosed.awaitTermination(60, TimeUnit.SECONDS); + } + + @Test(expected = RejectedExecutionException.class) + public void SupplyClosedExecutorServiceToReceiveOperation() throws Exception { + final ScheduledExecutorService testClosed = new ScheduledThreadPoolExecutor(1); + + final PartitionReceiver temp = EventHubClient.createSync( + TestContext.getConnectionString().toString(), + testClosed) + .createReceiverSync(TestContext.getConsumerGroupName(), PARTITION_ID, EventPosition.fromEndOfStream()); + + testClosed.shutdown(); + testClosed.awaitTermination(60, TimeUnit.SECONDS); + + temp.receiveSync(20); + } + + @Test(expected = RejectedExecutionException.class) + public void SupplyClosedExecutorServiceToCreateLinkOperation() throws Exception { + final ScheduledExecutorService testClosed = Executors.newScheduledThreadPool(1); + + final EventHubClient temp = EventHubClient.createSync( + TestContext.getConnectionString().toString(), + testClosed); + + testClosed.shutdown(); + testClosed.awaitTermination(60, TimeUnit.SECONDS); + + // first send creates send link + temp.sendSync(EventData.create("test data - string".getBytes())); + } + + @Test(expected = RejectedExecutionException.class) + public void SupplyClosedExecutorServiceToCreateSenderOperation() throws Exception { + final ScheduledExecutorService testClosed = new ScheduledThreadPoolExecutor(1); + + final EventHubClient temp = EventHubClient.createSync( + TestContext.getConnectionString().toString(), + testClosed); + + testClosed.shutdown(); + testClosed.awaitTermination(60, TimeUnit.SECONDS); + + temp.createPartitionSenderSync(PARTITION_ID); + } + + @Test(expected = RejectedExecutionException.class) + public void SupplyClosedExecutorServiceToCreateReceiverOperation() throws Exception { + final ScheduledExecutorService testClosed = Executors.newScheduledThreadPool(1); + + final EventHubClient temp = EventHubClient.createSync( + TestContext.getConnectionString().toString(), + testClosed); + + testClosed.shutdown(); + testClosed.awaitTermination(60, TimeUnit.SECONDS); + + temp.createReceiverSync(TestContext.getConsumerGroupName(), PARTITION_ID, EventPosition.fromEndOfStream()); + } + + @Test(expected = RejectedExecutionException.class) + public void SupplyClosedExecutorServiceThenMgmtOperation() throws Throwable { + final ScheduledThreadPoolExecutor testClosed = new ScheduledThreadPoolExecutor(1); + + final EventHubClient temp = EventHubClient.createSync( + TestContext.getConnectionString().toString(), + testClosed); + + testClosed.shutdown(); + testClosed.awaitTermination(60, TimeUnit.SECONDS); + + try { + temp.getPartitionRuntimeInformation(PARTITION_ID).get(); + } catch (ExecutionException ex) { + throw ex.getCause(); + } + } + + @Test(expected = RejectedExecutionException.class) + public void SupplyClosedExecutorServiceThenFactoryCloseOperation() throws Exception { + final ScheduledExecutorService testClosed = Executors.newScheduledThreadPool(1); + + final EventHubClient temp = EventHubClient.createSync( + TestContext.getConnectionString().toString(), + testClosed); + + testClosed.shutdown(); + testClosed.awaitTermination(60, TimeUnit.SECONDS); + + temp.closeSync(); + } + + @Test(expected = RejectedExecutionException.class) + public void SupplyClosedExecutorServiceThenSenderCloseOperation() throws Exception { + final ScheduledThreadPoolExecutor testClosed = new ScheduledThreadPoolExecutor(1); + + final PartitionSender temp = EventHubClient.createSync( + TestContext.getConnectionString().toString(), + testClosed).createPartitionSenderSync(PARTITION_ID); + + testClosed.shutdown(); + testClosed.awaitTermination(60, TimeUnit.SECONDS); + + temp.closeSync(); + } + + @Test(expected = RejectedExecutionException.class) + public void SupplyClosedExecutorServiceThenReceiverCloseOperation() throws Exception { + final ScheduledExecutorService testClosed = Executors.newScheduledThreadPool(1); + + final PartitionReceiver temp = EventHubClient.createSync( + TestContext.getConnectionString().toString(), + testClosed).createReceiverSync(TestContext.getConsumerGroupName(), PARTITION_ID, EventPosition.fromEndOfStream()); + + testClosed.shutdown(); + testClosed.awaitTermination(60, TimeUnit.SECONDS); + + temp.closeSync(); + } + + @Test(expected = RejectedExecutionException.class) + public void testEventHubClientSendAfterClose() throws Exception { + final ConnectionStringBuilder connectionString = TestContext.getConnectionString(); + final EventHubClient eventHubClient = EventHubClient.createSync(connectionString.toString(), TestContext.EXECUTOR_SERVICE); + eventHubClient.closeSync(); + eventHubClient.sendSync(EventData.create("test message".getBytes())); + } + + @Test(expected = IllegalStateException.class) + public void testEventHubClientSendCloseAfterSomeSends() throws Exception { + final ConnectionStringBuilder connectionString = TestContext.getConnectionString(); + final EventHubClient eventHubClient = EventHubClient.createSync(connectionString.toString(), TestContext.EXECUTOR_SERVICE); + eventHubClient.sendSync(EventData.create("test message".getBytes())); + eventHubClient.closeSync(); + eventHubClient.sendSync(EventData.create("test message".getBytes())); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/ReactorFaultTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/ReactorFaultTest.java new file mode 100644 index 0000000000000..56786c4019a9d --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/ReactorFaultTest.java @@ -0,0 +1,132 @@ +package com.microsoft.azure.eventhubs.exceptioncontracts; + +import com.microsoft.azure.eventhubs.*; +import com.microsoft.azure.eventhubs.impl.EventHubClientImpl; +import com.microsoft.azure.eventhubs.impl.MessagingFactory; +import com.microsoft.azure.eventhubs.lib.ApiTestBase; +import com.microsoft.azure.eventhubs.lib.TestContext; +import org.apache.qpid.proton.engine.BaseHandler; +import org.apache.qpid.proton.engine.Event; +import org.apache.qpid.proton.engine.impl.CollectorImpl; +import org.apache.qpid.proton.engine.impl.ConnectionImpl; +import org.apache.qpid.proton.reactor.Reactor; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.lang.reflect.Field; +import java.util.Iterator; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +public class ReactorFaultTest extends ApiTestBase { + static final String PARTITION_ID = "0"; + static ConnectionStringBuilder connStr; + + @BeforeClass + public static void initialize() throws Exception { + connStr = TestContext.getConnectionString(); + } + + @Test() + public void VerifyReactorRestartsOnProtonBugs() throws Exception { + final EventHubClient eventHubClient = EventHubClient.createSync(connStr.toString(), TestContext.EXECUTOR_SERVICE); + try { + final PartitionReceiver partitionReceiver = eventHubClient.createEpochReceiverSync( + "$default", "0", EventPosition.fromStartOfStream(), System.currentTimeMillis()); + partitionReceiver.receiveSync(100); + + Executors.newScheduledThreadPool(1).schedule(new Runnable() { + @Override + public void run() { + try { + final Field factoryField = EventHubClientImpl.class.getDeclaredField("underlyingFactory"); + factoryField.setAccessible(true); + final MessagingFactory underlyingFactory = (MessagingFactory) factoryField.get(eventHubClient); + + final Field reactorField = MessagingFactory.class.getDeclaredField("reactor"); + reactorField.setAccessible(true); + final Reactor reactor = (Reactor) reactorField.get(underlyingFactory); + + org.apache.qpid.proton.engine.Handler handler = reactor.getHandler(); + handler.add(new BaseHandler() { + @Override + public void handle(org.apache.qpid.proton.engine.Event e) { + throw new NullPointerException(); + } + }); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + }, 2, TimeUnit.SECONDS); + + try { + Thread.sleep(4000); + + final Iterable events = partitionReceiver.receiveSync(100); + Assert.assertTrue(events != null && events.iterator().hasNext()); + } finally { + partitionReceiver.closeSync(); + } + } finally { + eventHubClient.closeSync(); + } + } + + @Test() + public void VerifyTransportAbort() throws Exception { + final EventHubClient eventHubClient = EventHubClient.createSync(connStr.toString(), TestContext.EXECUTOR_SERVICE); + try { + final PartitionReceiver partitionReceiver = eventHubClient.createEpochReceiverSync( + "$default", "0", EventPosition.fromStartOfStream(), System.currentTimeMillis()); + final Iterable firstBatch = partitionReceiver.receiveSync(100); + Assert.assertTrue(firstBatch != null); + + long sequenceNumber = -1; + final Iterator iterator = firstBatch.iterator(); + while (iterator.hasNext()) { + sequenceNumber = iterator.next().getSystemProperties().getSequenceNumber(); + } + + Assert.assertTrue(sequenceNumber > -1); + + Executors.newScheduledThreadPool(1).schedule(new Runnable() { + @Override + public void run() { + try { + final Field factoryField = EventHubClientImpl.class.getDeclaredField("underlyingFactory"); + factoryField.setAccessible(true); + final MessagingFactory underlyingFactory = (MessagingFactory) factoryField.get(eventHubClient); + + final Field reactorField = MessagingFactory.class.getDeclaredField("reactor"); + reactorField.setAccessible(true); + final Reactor reactor = (Reactor) reactorField.get(underlyingFactory); + + final Field connectionField = MessagingFactory.class.getDeclaredField("connection"); + connectionField.setAccessible(true); + final ConnectionImpl connection = (ConnectionImpl) connectionField.get(underlyingFactory); + + ((CollectorImpl) reactor.collector()).put(Event.Type.TRANSPORT_ERROR, connection.getTransport()); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + }, 5, TimeUnit.SECONDS); + + try { + Thread.sleep(10000); + + final Iterable events = partitionReceiver.receiveSync(100); + Assert.assertTrue(events != null && events.iterator().hasNext()); + Assert.assertEquals(sequenceNumber + 1, events.iterator().next().getSystemProperties().getSequenceNumber()); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } finally { + partitionReceiver.closeSync(); + } + } finally { + eventHubClient.closeSync(); + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/ReceiverEpochTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/ReceiverEpochTest.java new file mode 100644 index 0000000000000..140023b0cc818 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/ReceiverEpochTest.java @@ -0,0 +1,92 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.exceptioncontracts; + +import com.microsoft.azure.eventhubs.*; +import com.microsoft.azure.eventhubs.lib.ApiTestBase; +import com.microsoft.azure.eventhubs.lib.TestBase; +import com.microsoft.azure.eventhubs.lib.TestContext; +import org.junit.*; + +import java.io.IOException; +import java.time.Duration; +import java.time.Instant; +import java.util.Random; +import java.util.concurrent.ExecutionException; + +public class ReceiverEpochTest extends ApiTestBase { + static final String cgName = TestContext.getConsumerGroupName(); + static final String partitionId = "0"; + + static EventHubClient ehClient; + + PartitionReceiver receiver; + + @BeforeClass + public static void initializeEventHub() throws EventHubException, IOException { + final ConnectionStringBuilder connectionString = TestContext.getConnectionString(); + ehClient = EventHubClient.createSync(connectionString.toString(), TestContext.EXECUTOR_SERVICE); + } + + @AfterClass + public static void cleanup() throws EventHubException { + if (ehClient != null) + ehClient.closeSync(); + } + + @Test(expected = ReceiverDisconnectedException.class) + public void testEpochReceiverWins() throws EventHubException, InterruptedException, ExecutionException { + int sendEventCount = 5; + + PartitionReceiver receiverLowEpoch = ehClient.createReceiverSync(cgName, partitionId, EventPosition.fromEnqueuedTime(Instant.now())); + receiverLowEpoch.setReceiveTimeout(Duration.ofSeconds(2)); + TestBase.pushEventsToPartition(ehClient, partitionId, sendEventCount).get(); + receiverLowEpoch.receiveSync(20); + + receiver = ehClient.createEpochReceiverSync(cgName, partitionId, EventPosition.fromEnqueuedTime(Instant.now()), Long.MAX_VALUE); + + for (int retryCount = 0; retryCount < sendEventCount; retryCount++) // retry to flush all msgs in cache + receiverLowEpoch.receiveSync(10); + } + + @Test(expected = ReceiverDisconnectedException.class) + public void testOldHighestEpochWins() throws EventHubException, InterruptedException, ExecutionException { + Instant testStartTime = Instant.now(); + long epoch = Math.abs(new Random().nextLong()); + + if (epoch < 11L) + epoch += 11L; + + receiver = ehClient.createEpochReceiverSync(cgName, partitionId, EventPosition.fromEnqueuedTime(testStartTime), epoch); + receiver.setReceiveTimeout(Duration.ofSeconds(10)); + ehClient.createEpochReceiverSync(cgName, partitionId, EventPosition.fromStartOfStream(), epoch - 10); + + TestBase.pushEventsToPartition(ehClient, partitionId, 5).get(); + Assert.assertTrue(receiver.receiveSync(10).iterator().hasNext()); + } + + @Test(expected = ReceiverDisconnectedException.class) + public void testNewHighestEpochWins() throws EventHubException, InterruptedException, ExecutionException { + int sendEventCount = 5; + long epoch = new Random().nextInt(Integer.MAX_VALUE); + + PartitionReceiver receiverLowEpoch = ehClient.createEpochReceiverSync(cgName, partitionId, EventPosition.fromEnqueuedTime(Instant.now()), epoch); + receiverLowEpoch.setReceiveTimeout(Duration.ofSeconds(2)); + TestBase.pushEventsToPartition(ehClient, partitionId, sendEventCount).get(); + receiverLowEpoch.receiveSync(20); + + receiver = ehClient.createEpochReceiverSync(cgName, partitionId, EventPosition.fromEnqueuedTime(Instant.now()), Long.MAX_VALUE); + + for (int retryCount = 0; retryCount < sendEventCount; retryCount++) // retry to flush all msgs in cache + receiverLowEpoch.receiveSync(10); + } + + @After + public void testCleanup() throws EventHubException { + if (receiver != null) { + receiver.closeSync(); + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/RetryPolicyTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/RetryPolicyTest.java new file mode 100644 index 0000000000000..0cae3390a989c --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/RetryPolicyTest.java @@ -0,0 +1,85 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.exceptioncontracts; + +import com.microsoft.azure.eventhubs.AuthorizationFailedException; +import com.microsoft.azure.eventhubs.RetryPolicy; +import com.microsoft.azure.eventhubs.ServerBusyException; +import com.microsoft.azure.eventhubs.lib.TestBase; +import org.junit.Assert; +import org.junit.Test; + +import java.time.Duration; +import java.util.logging.Level; + +public class RetryPolicyTest extends TestBase { + @Test + public void testRetryPolicy() throws Exception { + String clientId = "someClientEntity"; + RetryPolicy retry = RetryPolicy.getDefault(); + + retry.incrementRetryCount(clientId); + Duration firstRetryInterval = retry.getNextRetryInterval(clientId, new ServerBusyException(), Duration.ofSeconds(60)); + TestBase.TEST_LOGGER.log(Level.FINE, "firstRetryInterval: " + firstRetryInterval.toString()); + Assert.assertTrue(firstRetryInterval != null); + + retry.incrementRetryCount(clientId); + Duration secondRetryInterval = retry.getNextRetryInterval(clientId, new ServerBusyException(), Duration.ofSeconds(60)); + TestBase.TEST_LOGGER.log(Level.FINE, "secondRetryInterval: " + secondRetryInterval.toString()); + + Assert.assertTrue(secondRetryInterval != null); + Assert.assertTrue(secondRetryInterval.getSeconds() > firstRetryInterval.getSeconds() || + (secondRetryInterval.getSeconds() == firstRetryInterval.getSeconds() && secondRetryInterval.getNano() > firstRetryInterval.getNano())); + + retry.incrementRetryCount(clientId); + Duration thirdRetryInterval = retry.getNextRetryInterval(clientId, new ServerBusyException(), Duration.ofSeconds(60)); + TestBase.TEST_LOGGER.log(Level.FINE, "thirdRetryInterval: " + thirdRetryInterval.toString()); + + Assert.assertTrue(thirdRetryInterval != null); + Assert.assertTrue(thirdRetryInterval.getSeconds() > secondRetryInterval.getSeconds() || + (thirdRetryInterval.getSeconds() == secondRetryInterval.getSeconds() && thirdRetryInterval.getNano() > secondRetryInterval.getNano())); + + retry.incrementRetryCount(clientId); + Duration fourthRetryInterval = retry.getNextRetryInterval(clientId, new ServerBusyException(), Duration.ofSeconds(60)); + TestBase.TEST_LOGGER.log(Level.FINE, "fourthRetryInterval: " + fourthRetryInterval.toString()); + + Assert.assertTrue(fourthRetryInterval != null); + Assert.assertTrue(fourthRetryInterval.getSeconds() > thirdRetryInterval.getSeconds() || + (fourthRetryInterval.getSeconds() == thirdRetryInterval.getSeconds() && fourthRetryInterval.getNano() > thirdRetryInterval.getNano())); + + retry.incrementRetryCount(clientId); + Duration fifthRetryInterval = retry.getNextRetryInterval(clientId, new ServerBusyException(), Duration.ofSeconds(60)); + TestBase.TEST_LOGGER.log(Level.FINE, "fifthRetryInterval: " + fifthRetryInterval.toString()); + + Assert.assertTrue(fifthRetryInterval != null); + Assert.assertTrue(fifthRetryInterval.getSeconds() > fourthRetryInterval.getSeconds() || + (fifthRetryInterval.getSeconds() == fourthRetryInterval.getSeconds() && fifthRetryInterval.getNano() > fourthRetryInterval.getNano())); + + retry.incrementRetryCount(clientId); + Duration sixthRetryInterval = retry.getNextRetryInterval(clientId, new ServerBusyException(), Duration.ofSeconds(60)); + TestBase.TEST_LOGGER.log(Level.FINE, "sixthRetryInterval: " + sixthRetryInterval.toString()); + + Assert.assertTrue(sixthRetryInterval != null); + Assert.assertTrue(sixthRetryInterval.getSeconds() > fifthRetryInterval.getSeconds() || + (sixthRetryInterval.getSeconds() == fifthRetryInterval.getSeconds() && sixthRetryInterval.getNano() > fifthRetryInterval.getNano())); + + retry.incrementRetryCount(clientId); + Duration seventhRetryInterval = retry.getNextRetryInterval(clientId, new ServerBusyException(), Duration.ofSeconds(60)); + TestBase.TEST_LOGGER.log(Level.FINE, "seventhRetryInterval: " + seventhRetryInterval.toString()); + + Assert.assertTrue(seventhRetryInterval != null); + Assert.assertTrue(seventhRetryInterval.getSeconds() > sixthRetryInterval.getSeconds() || + (seventhRetryInterval.getSeconds() == sixthRetryInterval.getSeconds() && seventhRetryInterval.getNano() > sixthRetryInterval.getNano())); + + retry.incrementRetryCount(clientId); + Duration nextRetryInterval = retry.getNextRetryInterval(clientId, new AuthorizationFailedException("authorizationerror"), Duration.ofSeconds(60)); + Assert.assertTrue(nextRetryInterval == null); + + retry.resetRetryCount(clientId); + retry.incrementRetryCount(clientId); + Duration firstRetryIntervalAfterReset = retry.getNextRetryInterval(clientId, new ServerBusyException(), Duration.ofSeconds(60)); + Assert.assertTrue(firstRetryInterval.equals(firstRetryIntervalAfterReset)); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/SecurityExceptionsTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/SecurityExceptionsTest.java new file mode 100644 index 0000000000000..1ce01910cd5df --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/SecurityExceptionsTest.java @@ -0,0 +1,161 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.exceptioncontracts; + +import com.microsoft.azure.eventhubs.*; +import com.microsoft.azure.eventhubs.impl.SharedAccessSignatureTokenProvider; +import com.microsoft.azure.eventhubs.lib.ApiTestBase; +import com.microsoft.azure.eventhubs.lib.TestContext; +import org.junit.After; +import org.junit.Test; + +import java.time.Duration; +import java.util.UUID; + +public class SecurityExceptionsTest extends ApiTestBase { + final static String PARTITION_ID = "0"; + EventHubClient ehClient; + + @Test(expected = AuthorizationFailedException.class) + public void testEventHubClientUnAuthorizedAccessKeyName() throws Throwable { + final ConnectionStringBuilder correctConnectionString = TestContext.getConnectionString(); + final ConnectionStringBuilder connectionString = new ConnectionStringBuilder() + .setEndpoint(correctConnectionString.getEndpoint()) + .setEventHubName(correctConnectionString.getEventHubName()) + .setSasKeyName("---------------wrongkey------------") + .setSasKey(correctConnectionString.getSasKey()); + + ehClient = EventHubClient.createSync(connectionString.toString(), TestContext.EXECUTOR_SERVICE); + ehClient.sendSync(EventData.create("Test Message".getBytes())); + } + + @Test(expected = AuthorizationFailedException.class) + public void testEventHubClientUnAuthorizedAccessKey() throws Throwable { + final ConnectionStringBuilder correctConnectionString = TestContext.getConnectionString(); + final ConnectionStringBuilder connectionString = new ConnectionStringBuilder() + .setEndpoint(correctConnectionString.getEndpoint()) + .setEventHubName(correctConnectionString.getEventHubName()) + .setSasKeyName(correctConnectionString.getSasKeyName()) + .setSasKey("--------------wrongvalue-----------"); + + ehClient = EventHubClient.createSync(connectionString.toString(), TestContext.EXECUTOR_SERVICE); + ehClient.sendSync(EventData.create("Test Message".getBytes())); + } + + @Test(expected = EventHubException.class) + public void testEventHubClientInvalidAccessToken() throws Throwable { + final ConnectionStringBuilder correctConnectionString = TestContext.getConnectionString(); + final ConnectionStringBuilder connectionString = new ConnectionStringBuilder() + .setEndpoint(correctConnectionString.getEndpoint()) + .setEventHubName(correctConnectionString.getEventHubName()) + .setSharedAccessSignature("--------------invalidtoken-------------"); + + ehClient = EventHubClient.createSync(connectionString.toString(), TestContext.EXECUTOR_SERVICE); + ehClient.sendSync(EventData.create(("Test Message".getBytes()))); + } + + @Test(expected = IllegalArgumentException.class) + public void testEventHubClientNullAccessToken() throws Throwable { + final ConnectionStringBuilder correctConnectionString = TestContext.getConnectionString(); + final ConnectionStringBuilder connectionString = new ConnectionStringBuilder() + .setEndpoint(correctConnectionString.getEndpoint()) + .setEventHubName(correctConnectionString.getEventHubName()) + .setSharedAccessSignature(null); + + ehClient = EventHubClient.createSync(connectionString.toString(), TestContext.EXECUTOR_SERVICE); + ehClient.sendSync(EventData.create(("Test Message".getBytes()))); + } + + @Test(expected = AuthorizationFailedException.class) + public void testEventHubClientUnAuthorizedAccessToken() throws Throwable { + final ConnectionStringBuilder correctConnectionString = TestContext.getConnectionString(); + final String wrongToken = SharedAccessSignatureTokenProvider.generateSharedAccessSignature( + "wrongkey", + correctConnectionString.getSasKey(), + String.format("amqps://%s/%s", correctConnectionString.getEndpoint().getHost(), correctConnectionString.getEventHubName()), + Duration.ofSeconds(10)); + final ConnectionStringBuilder connectionString = new ConnectionStringBuilder() + .setEndpoint(correctConnectionString.getEndpoint()) + .setEventHubName(correctConnectionString.getEventHubName()) + .setSharedAccessSignature(wrongToken); + + ehClient = EventHubClient.createSync(connectionString.toString(), TestContext.EXECUTOR_SERVICE); + ehClient.sendSync(EventData.create("Test Message".getBytes())); + } + + @Test(expected = AuthorizationFailedException.class) + public void testEventHubClientWrongResourceInAccessToken() throws Throwable { + final ConnectionStringBuilder correctConnectionString = TestContext.getConnectionString(); + final String wrongToken = SharedAccessSignatureTokenProvider.generateSharedAccessSignature( + correctConnectionString.getSasKeyName(), + correctConnectionString.getSasKey(), + "----------wrongresource-----------", + Duration.ofSeconds(10)); + final ConnectionStringBuilder connectionString = new ConnectionStringBuilder() + .setEndpoint(correctConnectionString.getEndpoint()) + .setEventHubName(correctConnectionString.getEventHubName()) + .setSharedAccessSignature(wrongToken); + + ehClient = EventHubClient.createSync(connectionString.toString(), TestContext.EXECUTOR_SERVICE); + ehClient.sendSync(EventData.create("Test Message".getBytes())); + } + + @Test(expected = AuthorizationFailedException.class) + public void testUnAuthorizedAccessSenderCreation() throws Throwable { + final ConnectionStringBuilder correctConnectionString = TestContext.getConnectionString(); + final ConnectionStringBuilder connectionString = new ConnectionStringBuilder() + .setEndpoint(correctConnectionString.getEndpoint()) + .setEventHubName(correctConnectionString.getEventHubName()) + .setSasKeyName("------------wrongkeyname----------") + .setSasKey(correctConnectionString.getSasKey()); + + ehClient = EventHubClient.createSync(connectionString.toString(), TestContext.EXECUTOR_SERVICE); + ehClient.createPartitionSenderSync(PARTITION_ID); + } + + @Test(expected = AuthorizationFailedException.class) + public void testUnAuthorizedAccessReceiverCreation() throws Throwable { + final ConnectionStringBuilder correctConnectionString = TestContext.getConnectionString(); + final ConnectionStringBuilder connectionString = new ConnectionStringBuilder() + .setEndpoint(correctConnectionString.getEndpoint()) + .setEventHubName(correctConnectionString.getEventHubName()) + .setSasKeyName("---------------wrongkey------------") + .setSasKey(correctConnectionString.getSasKey()); + + ehClient = EventHubClient.createSync(connectionString.toString(), TestContext.EXECUTOR_SERVICE); + ehClient.createReceiverSync(TestContext.getConsumerGroupName(), PARTITION_ID, EventPosition.fromStartOfStream()); + } + + @Test(expected = IllegalEntityException.class) + public void testSendToNonExistantEventHub() throws Throwable { + final ConnectionStringBuilder correctConnectionString = TestContext.getConnectionString(); + final ConnectionStringBuilder connectionString = new ConnectionStringBuilder() + .setEndpoint(correctConnectionString.getEndpoint()) + .setEventHubName("non-existant-entity" + UUID.randomUUID().toString()) + .setSasKeyName(correctConnectionString.getSasKeyName()) + .setSasKey(correctConnectionString.getSasKey()); + + ehClient = EventHubClient.createSync(connectionString.toString(), TestContext.EXECUTOR_SERVICE); + ehClient.sendSync(EventData.create("test string".getBytes())); + } + + @Test(expected = IllegalEntityException.class) + public void testReceiveFromNonExistantEventHub() throws Throwable { + final ConnectionStringBuilder correctConnectionString = TestContext.getConnectionString(); + final ConnectionStringBuilder connectionString = new ConnectionStringBuilder() + .setEndpoint(correctConnectionString.getEndpoint()) + .setEventHubName("non-existant-entity" + UUID.randomUUID().toString()) + .setSasKeyName(correctConnectionString.getSasKeyName()) + .setSasKey(correctConnectionString.getSasKey()); + + ehClient = EventHubClient.createSync(connectionString.toString(), TestContext.EXECUTOR_SERVICE); + ehClient.createReceiverSync(TestContext.getConsumerGroupName(), PARTITION_ID, EventPosition.fromStartOfStream()); + } + + @After + public void cleanup() throws EventHubException { + ehClient.closeSync(); + } +} \ No newline at end of file diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/SendLargeMessageTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/SendLargeMessageTest.java new file mode 100644 index 0000000000000..b5cccf85c9b4a --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/SendLargeMessageTest.java @@ -0,0 +1,93 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.exceptioncontracts; + +import com.microsoft.azure.eventhubs.*; +import com.microsoft.azure.eventhubs.lib.ApiTestBase; +import com.microsoft.azure.eventhubs.lib.TestContext; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.IOException; +import java.time.Instant; +import java.util.concurrent.ExecutionException; + +public class SendLargeMessageTest extends ApiTestBase { + static String partitionId = "0"; + + static EventHubClient ehClient; + static PartitionSender sender; + + static EventHubClient receiverHub; + static PartitionReceiver receiver; + + @BeforeClass + public static void initialize() throws Exception { + initializeEventHubClients(TestContext.getConnectionString()); + } + + public static void initializeEventHubClients(ConnectionStringBuilder connStr) throws Exception { + + ehClient = EventHubClient.createSync(connStr.toString(), TestContext.EXECUTOR_SERVICE); + sender = ehClient.createPartitionSender(partitionId).get(); + + receiverHub = EventHubClient.createSync(connStr.toString(), TestContext.EXECUTOR_SERVICE); + receiver = receiverHub.createReceiver(TestContext.getConsumerGroupName(), partitionId, EventPosition.fromEnqueuedTime(Instant.now())).get(); + } + + @AfterClass() + public static void cleanup() throws EventHubException { + if (receiverHub != null) { + receiverHub.close(); + } + + if (ehClient != null) { + ehClient.close(); + } + } + + @Test() + public void sendMsgLargerThan64k() throws EventHubException, InterruptedException, ExecutionException, IOException { + this.sendLargeMessageTest(100 * 1024); + } + + @Test(expected = PayloadSizeExceededException.class) + public void sendMsgLargerThan1024K() throws EventHubException, InterruptedException, ExecutionException, IOException { + int msgSize = 1024 * 1024 * 2; + byte[] body = new byte[msgSize]; + for (int i = 0; i < msgSize; i++) { + body[i] = 1; + } + + EventData largeMsg = EventData.create(body); + sender.sendSync(largeMsg); + } + + @Test() + public void sendMsgLargerThan128k() throws EventHubException, InterruptedException, ExecutionException, IOException { + this.sendLargeMessageTest(129 * 1024); + } + + public void sendLargeMessageTest(int msgSize) throws InterruptedException, ExecutionException, EventHubException { + byte[] body = new byte[msgSize]; + for (int i = 0; i < msgSize; i++) { + body[i] = 1; + } + + EventData largeMsg = EventData.create(body); + sender.sendSync(largeMsg); + + Iterable messages = receiver.receiveSync(100); + Assert.assertTrue(messages != null && messages.iterator().hasNext()); + + EventData recdMessage = messages.iterator().next(); + + Assert.assertTrue( + String.format("sent msg size: %s, recvd msg size: %s", msgSize, recdMessage.getBytes().length), + recdMessage.getBytes().length == msgSize); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/WebSocketsSendLargeMessageTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/WebSocketsSendLargeMessageTest.java new file mode 100644 index 0000000000000..71b6b12f59699 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/exceptioncontracts/WebSocketsSendLargeMessageTest.java @@ -0,0 +1,50 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.exceptioncontracts; + +import com.microsoft.azure.eventhubs.ConnectionStringBuilder; +import com.microsoft.azure.eventhubs.EventHubException; +import com.microsoft.azure.eventhubs.PayloadSizeExceededException; +import com.microsoft.azure.eventhubs.TransportType; +import com.microsoft.azure.eventhubs.lib.ApiTestBase; +import com.microsoft.azure.eventhubs.lib.TestContext; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.IOException; +import java.util.concurrent.ExecutionException; + +public class WebSocketsSendLargeMessageTest extends ApiTestBase { + private static SendLargeMessageTest sendLargeMessageTest; + + @BeforeClass + public static void initialize() throws Exception { + final ConnectionStringBuilder connectionStringBuilder = TestContext.getConnectionString(); + connectionStringBuilder.setTransportType(TransportType.AMQP_WEB_SOCKETS); + sendLargeMessageTest = new SendLargeMessageTest(); + SendLargeMessageTest.initializeEventHubClients(connectionStringBuilder); + } + + @AfterClass() + public static void cleanup() throws EventHubException { + SendLargeMessageTest.cleanup(); + } + + @Test() + public void sendMsgLargerThan64k() throws EventHubException, InterruptedException, ExecutionException, IOException { + sendLargeMessageTest.sendMsgLargerThan64k(); + } + + @Test(expected = PayloadSizeExceededException.class) + public void sendMsgLargerThan1024K() throws EventHubException, InterruptedException, ExecutionException, IOException { + sendLargeMessageTest.sendMsgLargerThan1024K(); + } + + @Test() + public void sendMsgLargerThan128k() throws EventHubException, InterruptedException, ExecutionException, IOException { + sendLargeMessageTest.sendMsgLargerThan128k(); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/impl/EventDataOrderTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/impl/EventDataOrderTest.java new file mode 100644 index 0000000000000..f36066a1fee1c --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/impl/EventDataOrderTest.java @@ -0,0 +1,52 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.impl; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; + +import com.microsoft.azure.eventhubs.EventData; +import org.apache.qpid.proton.amqp.Symbol; +import org.apache.qpid.proton.amqp.messaging.MessageAnnotations; +import org.apache.qpid.proton.message.Message; +import org.junit.Assert; +import org.junit.Test; + +public class EventDataOrderTest { + + private EventData constructMessage(long seqNumber) { + HashMap properties = new HashMap<>(); + properties.put(AmqpConstants.SEQUENCE_NUMBER, seqNumber); + + Message message = Message.Factory.create(); + + message.setMessageAnnotations(new MessageAnnotations(properties)); + + return new EventDataImpl(message); + } + + @Test + public void eventDataEmptyByteArray() { + ArrayList messages = new ArrayList<>(); + + EventData first = constructMessage(19); + EventData second = constructMessage(22); + EventData third = constructMessage(25); + EventData last = constructMessage(88); + + messages.add(second); + messages.add(first); + messages.add(last); + messages.add(third); + + Collections.sort(messages); + + Assert.assertEquals(messages.get(0), first); + Assert.assertEquals(messages.get(1), second); + Assert.assertEquals(messages.get(2), third); + Assert.assertEquals(messages.get(3), last); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/ApiTestBase.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/ApiTestBase.java new file mode 100644 index 0000000000000..cca2e7c637385 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/ApiTestBase.java @@ -0,0 +1,17 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.lib; + +import org.junit.Assume; +import org.junit.BeforeClass; + +public class ApiTestBase extends TestBase { + + @BeforeClass + public static void skipIfNotConfigured() { + + Assume.assumeTrue(TestContext.isTestConfigurationSet()); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/FaultInjectingReactorFactory.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/FaultInjectingReactorFactory.java new file mode 100644 index 0000000000000..d86d14b7a2e85 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/FaultInjectingReactorFactory.java @@ -0,0 +1,68 @@ +package com.microsoft.azure.eventhubs.lib; + +import com.microsoft.azure.eventhubs.impl.CustomIOHandler; +import com.microsoft.azure.eventhubs.impl.MessagingFactory; +import com.microsoft.azure.eventhubs.impl.ReactorHandler; +import org.apache.qpid.proton.Proton; +import org.apache.qpid.proton.amqp.Symbol; +import org.apache.qpid.proton.amqp.transport.ErrorCondition; +import org.apache.qpid.proton.engine.Event; +import org.apache.qpid.proton.engine.Transport; +import org.apache.qpid.proton.reactor.Reactor; + +import java.io.IOException; + +public class FaultInjectingReactorFactory extends MessagingFactory.ReactorFactory { + + private volatile FaultType faultType; + + public void setFaultType(final FaultType faultType) { + this.faultType = faultType; + } + + @Override + public Reactor create(final ReactorHandler reactorHandler, final int maxFrameSize) throws IOException { + final Reactor reactor = Proton.reactor(reactorHandler); + + switch (this.faultType) { + case NetworkOutage: + reactor.setGlobalHandler(new NetworkOutageSimulator()); + break; + default: + throw new UnsupportedOperationException(); + } + + return reactor; + } + + public enum FaultType { + NetworkOutage + } + + public final static class NetworkOutageSimulator extends CustomIOHandler { + + @Override + public void onUnhandled(final Event event) { + switch (event.getType()) { + case CONNECTION_BOUND: + this.handleBound(event); + break; + default: + super.onUnhandled(event); + } + } + + private void handleBound(final Event event) { + final Transport transport = event.getConnection().getTransport(); + final ErrorCondition condition = new ErrorCondition(); + condition.setCondition(Symbol.getSymbol("proton:io")); + condition.setDescription("induced fault"); + transport.setCondition(condition); + transport.close_tail(); + transport.close_head(); + transport.pop(Math.max(0, transport.pending())); + + this.selectableTransport(event.getReactor(), null, transport); + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/Mock/MockServer.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/Mock/MockServer.java new file mode 100644 index 0000000000000..68b12fe76f1cb --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/Mock/MockServer.java @@ -0,0 +1,56 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.lib.Mock; + +import com.microsoft.azure.eventhubs.lib.TestBase; +import org.apache.qpid.proton.Proton; +import org.apache.qpid.proton.engine.BaseHandler; +import org.apache.qpid.proton.reactor.Acceptor; +import org.apache.qpid.proton.reactor.Reactor; + +import java.io.Closeable; +import java.io.IOException; +import java.util.logging.Level; + +/** + * Mock Server (Singleton) designed to test AMQP related features in the javaClient + */ +public class MockServer implements Closeable { + public final static String HostName = "127.0.0.1"; + public final static int Port = 5671; + + private Reactor reactor; + private Acceptor acceptor; + + private MockServer(BaseHandler handler) throws IOException, InterruptedException { + this.reactor = Proton.reactor(); + + new Thread(new Runnable() { + @Override + public void run() { + if (TestBase.TEST_LOGGER.isLoggable(Level.FINE)) { + TestBase.TEST_LOGGER.log(Level.FINE, "starting reactor instance."); + } + + reactor.run(); + } + }).start(); + + this.acceptor = this.reactor.acceptor(MockServer.HostName, MockServer.Port, + handler == null ? new ServerTraceHandler() : handler); + } + + public static MockServer Create(BaseHandler handler) throws IOException, InterruptedException { + MockServer server = new MockServer(handler); + return server; + } + + @Override + public void close() throws IOException { + if (this.acceptor != null) { + this.acceptor.close(); + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/Mock/Sender1MsgOnLinkFlowHandler.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/Mock/Sender1MsgOnLinkFlowHandler.java new file mode 100644 index 0000000000000..b72aeee619334 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/Mock/Sender1MsgOnLinkFlowHandler.java @@ -0,0 +1,62 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.lib.Mock; + +import com.microsoft.azure.eventhubs.impl.AmqpConstants; +import org.apache.qpid.proton.Proton; +import org.apache.qpid.proton.amqp.Symbol; +import org.apache.qpid.proton.amqp.messaging.ApplicationProperties; +import org.apache.qpid.proton.amqp.messaging.MessageAnnotations; +import org.apache.qpid.proton.engine.Event; +import org.apache.qpid.proton.engine.Sender; +import org.apache.qpid.proton.message.Message; +import org.apache.qpid.proton.reactor.Handshaker; + +import java.util.*; + +/** + * Sends 1 Msg on the first onLinkFlow event + */ +public class Sender1MsgOnLinkFlowHandler extends ServerTraceHandler { + private final Object firstFlow; + private boolean isFirstFlow; + + public Sender1MsgOnLinkFlowHandler() { + add(new Handshaker()); + + this.firstFlow = new Object(); + this.isFirstFlow = true; + } + + @Override + public void onLinkFlow(Event event) { + if (this.isFirstFlow) { + synchronized (this.firstFlow) { + if (this.isFirstFlow) { + Sender sender = (Sender) event.getLink(); + if (sender != null) { + byte[] bytes = new byte[5 * 1024]; + Message msg = Proton.message(); + Map properties = new HashMap(); + properties.put("testkey", "testvalue"); + msg.setApplicationProperties(new ApplicationProperties(properties)); + Map annotations = new HashMap(); + annotations.put(AmqpConstants.OFFSET, "11111111"); + MessageAnnotations msgAnnotation = new MessageAnnotations(annotations); + msg.setMessageAnnotations(msgAnnotation); + int length = msg.encode(bytes, 0, 4 * 1024); + + byte[] tag = String.valueOf(1).getBytes(); + sender.delivery(tag); + sender.send(bytes, 0, length); + + sender.advance(); + this.isFirstFlow = false; + } + } + } + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/Mock/ServerTraceHandler.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/Mock/ServerTraceHandler.java new file mode 100644 index 0000000000000..db707685e0782 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/Mock/ServerTraceHandler.java @@ -0,0 +1,60 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.lib.Mock; + +import org.apache.qpid.proton.Proton; +import org.apache.qpid.proton.engine.*; +import org.apache.qpid.proton.engine.SslDomain.Mode; +import org.apache.qpid.proton.reactor.Handshaker; + +import java.util.*; +import java.util.logging.Level; +import java.util.logging.Logger; + +/** + * Traces all server events if enabled. used for debugging + */ +public class ServerTraceHandler extends BaseHandler { + + private static final Logger TRACE_LOGGER = Logger.getLogger("servicebus.test.trace"); + + public ServerTraceHandler(BaseHandler... handlers) { + add(new Handshaker()); + for (BaseHandler handler : handlers) { + add(handler); + } + } + + + @Override + public void onUnhandled(Event event) { + if (TRACE_LOGGER.isLoggable(Level.FINE)) { + TRACE_LOGGER.log(Level.FINE, + "Connection.onUnhandled: name[" + event.getConnection().getHostname() + "] : event[" + event + "]"); + } + super.onUnhandled(event); + } + + @Override + public void onConnectionBound(Event event) { + Transport transport = event.getTransport(); + SslDomain domain = Proton.sslDomain(); + domain.init(Mode.SERVER); + + domain.setPeerAuthentication(SslDomain.VerifyMode.ANONYMOUS_PEER); + transport.ssl(domain); + + Sasl sasl = transport.sasl(); + sasl.allowSkip(true); + sasl.setMechanisms("PLAIN"); + // sasl.done(SaslOutcome.PN_SASL_OK);*/ + } + + @Override + public void onConnectionRemoteOpen(Event event) { + super.onConnectionRemoteOpen(event); + event.getConnection().open(); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/SasTokenTestBase.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/SasTokenTestBase.java new file mode 100644 index 0000000000000..fa522e663f9dd --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/SasTokenTestBase.java @@ -0,0 +1,43 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.lib; + +import com.microsoft.azure.eventhubs.ConnectionStringBuilder; +import com.microsoft.azure.eventhubs.EventHubException; +import com.microsoft.azure.eventhubs.impl.SharedAccessSignatureTokenProvider; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.time.Duration; + +public class SasTokenTestBase extends ApiTestBase { + + private static ConnectionStringBuilder originalConnectionString; + + @BeforeClass + public static void replaceConnectionString() throws Exception { + + originalConnectionString = TestContext.getConnectionString(); + final String connectionStringWithSasToken = new ConnectionStringBuilder() + .setEndpoint(originalConnectionString.getEndpoint()) + .setEventHubName(originalConnectionString.getEventHubName()) + .setSharedAccessSignature( + SharedAccessSignatureTokenProvider.generateSharedAccessSignature(originalConnectionString.getSasKeyName(), + originalConnectionString.getSasKey(), + String.format("amqp://%s/%s", originalConnectionString.getEndpoint().getHost(), originalConnectionString.getEventHubName()), + Duration.ofDays(1)) + ) + .toString(); + + TestContext.setConnectionString(connectionStringWithSasToken); + } + + @AfterClass + public static void undoReplace() throws EventHubException { + + if (originalConnectionString != null) + TestContext.setConnectionString(originalConnectionString.toString()); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/TestBase.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/TestBase.java new file mode 100644 index 0000000000000..afc861e80ef3a --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/TestBase.java @@ -0,0 +1,41 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.lib; + +import com.microsoft.azure.eventhubs.EventData; +import com.microsoft.azure.eventhubs.EventHubClient; +import com.microsoft.azure.eventhubs.EventHubException; +import com.microsoft.azure.eventhubs.PartitionSender; + +import java.util.concurrent.CompletableFuture; +import java.util.function.Function; +import java.util.logging.Logger; + +/** + * all tests derive from this base - provides common functionality + * - provides a way to checkout EventHub for each test to exclusively run with + * - ******* Before running all Tests - fill data here ********* + */ +public abstract class TestBase { + public static final Logger TEST_LOGGER = Logger.getLogger("servicebus.test.trace"); + + public static CompletableFuture pushEventsToPartition(final EventHubClient ehClient, final String partitionId, final int noOfEvents) + throws EventHubException { + return ehClient.createPartitionSender(partitionId) + .thenComposeAsync(new Function>() { + @Override + public CompletableFuture apply(PartitionSender pSender) { + @SuppressWarnings("unchecked") + final CompletableFuture[] sends = new CompletableFuture[noOfEvents]; + for (int count = 0; count < noOfEvents; count++) { + final EventData sendEvent = EventData.create("test string".getBytes()); + sends[count] = pSender.send(sendEvent); + } + + return CompletableFuture.allOf(sends); + } + }); + } +} \ No newline at end of file diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/TestContext.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/TestContext.java new file mode 100644 index 0000000000000..fa5215a342127 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/lib/TestContext.java @@ -0,0 +1,40 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.lib; + +import com.microsoft.azure.eventhubs.ConnectionStringBuilder; + +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledThreadPoolExecutor; + +public final class TestContext { + + public final static ScheduledExecutorService EXECUTOR_SERVICE = Executors.newScheduledThreadPool(1); + + final static String EVENT_HUB_CONNECTION_STRING_ENV_NAME = "EVENT_HUB_CONNECTION_STRING"; + + private static String CONNECTION_STRING = System.getenv(EVENT_HUB_CONNECTION_STRING_ENV_NAME); + + private TestContext() { + // eq. of c# static class + } + + public static ConnectionStringBuilder getConnectionString() { + return new ConnectionStringBuilder(CONNECTION_STRING); + } + + public static void setConnectionString(final String connectionString) { + CONNECTION_STRING = connectionString; + } + + public static String getConsumerGroupName() { + return "$default"; + } + + public static boolean isTestConfigurationSet() { + return System.getenv(EVENT_HUB_CONNECTION_STRING_ENV_NAME) != null; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/proxy/ProxyReceiveTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/proxy/ProxyReceiveTest.java new file mode 100644 index 0000000000000..52551cc896886 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/proxy/ProxyReceiveTest.java @@ -0,0 +1,82 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.proxy; + +import com.microsoft.azure.eventhubs.ConnectionStringBuilder; +import com.microsoft.azure.eventhubs.EventHubException; +import com.microsoft.azure.eventhubs.TransportType; +import com.microsoft.azure.eventhubs.lib.SasTokenTestBase; +import com.microsoft.azure.eventhubs.lib.TestContext; +import com.microsoft.azure.eventhubs.sendrecv.ReceiveTest; +import org.jutils.jproxy.ProxyServer; +import org.junit.*; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.Proxy; +import java.net.ProxySelector; +import java.net.SocketAddress; +import java.net.URI; +import java.util.LinkedList; +import java.util.List; + +public class ProxyReceiveTest extends SasTokenTestBase { + + private static final int proxyPort = 8899; + private static ProxyServer proxyServer; + private static ReceiveTest receiveTest; + private static ProxySelector defaultProxySelector; + + @BeforeClass + public static void initialize() throws Exception { + proxyServer = ProxyServer.create("localhost", proxyPort); + proxyServer.start(t -> {}); + + defaultProxySelector = ProxySelector.getDefault(); + ProxySelector.setDefault(new ProxySelector() { + @Override + public List select(URI uri) { + LinkedList proxies = new LinkedList<>(); + proxies.add(new Proxy(Proxy.Type.HTTP, new InetSocketAddress("localhost", proxyPort))); + return proxies; + } + + @Override + public void connectFailed(URI uri, SocketAddress sa, IOException ioe) { + // no-op + } + }); + + Assert.assertTrue(TestContext.getConnectionString().getSharedAccessSignature() != null + && TestContext.getConnectionString().getSasKey() == null + && TestContext.getConnectionString().getSasKeyName() == null); + + receiveTest = new ReceiveTest(); + ConnectionStringBuilder connectionString = TestContext.getConnectionString(); + connectionString.setTransportType(TransportType.AMQP_WEB_SOCKETS); + ReceiveTest.initializeEventHub(connectionString); + } + + @AfterClass() + public static void cleanup() throws Exception { + ReceiveTest.cleanup(); + + if (proxyServer != null) { + proxyServer.stop(); + } + + ProxySelector.setDefault(defaultProxySelector); + } + + @Test() + public void testReceiverStartOfStreamFilters() throws EventHubException { + receiveTest.testReceiverStartOfStreamFilters(); + } + + @After + public void testCleanup() throws EventHubException { + receiveTest.testCleanup(); + } +} \ No newline at end of file diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/proxy/ProxySelectorTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/proxy/ProxySelectorTest.java new file mode 100644 index 0000000000000..fb2733e5c4099 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/proxy/ProxySelectorTest.java @@ -0,0 +1,67 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.proxy; + +import com.microsoft.azure.eventhubs.ConnectionStringBuilder; +import com.microsoft.azure.eventhubs.EventHubClient; +import com.microsoft.azure.eventhubs.EventHubException; +import com.microsoft.azure.eventhubs.TransportType; +import com.microsoft.azure.eventhubs.lib.ApiTestBase; +import com.microsoft.azure.eventhubs.lib.TestContext; +import org.junit.Assert; +import org.junit.Test; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.Proxy; +import java.net.ProxySelector; +import java.net.SocketAddress; +import java.net.URI; +import java.time.Duration; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; + +public class ProxySelectorTest extends ApiTestBase { + + @Test + public void proxySelectorConnectFailedInvokeTest() throws Exception { + // doesn't start proxy server and verifies that the connectFailed callback is invoked. + int proxyPort = 8899; + final CompletableFuture connectFailedTask = new CompletableFuture<>(); + final ProxySelector defaultProxySelector = ProxySelector.getDefault(); + ProxySelector.setDefault(new ProxySelector() { + @Override + public List select(URI uri) { + LinkedList proxies = new LinkedList<>(); + proxies.add(new Proxy(Proxy.Type.HTTP, new InetSocketAddress("localhost", proxyPort))); + return proxies; + } + + @Override + public void connectFailed(URI uri, SocketAddress sa, IOException ioe) { + connectFailedTask.complete(null); + } + }); + + try { + ConnectionStringBuilder builder = new ConnectionStringBuilder(TestContext.getConnectionString().toString()); + builder.setTransportType(TransportType.AMQP_WEB_SOCKETS); + builder.setOperationTimeout(Duration.ofSeconds(10)); + + try { + EventHubClient.createSync(builder.toString(), TestContext.EXECUTOR_SERVICE); + Assert.assertTrue(false); // shouldn't reach here + } catch (EventHubException ex) { + Assert.assertEquals("connection aborted", ex.getMessage()); + } + + connectFailedTask.get(2, TimeUnit.SECONDS); + } finally { + ProxySelector.setDefault(defaultProxySelector); + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/proxy/ProxySendLargeMessageTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/proxy/ProxySendLargeMessageTest.java new file mode 100644 index 0000000000000..ec8f94b679802 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/proxy/ProxySendLargeMessageTest.java @@ -0,0 +1,83 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.proxy; + +import com.microsoft.azure.eventhubs.ConnectionStringBuilder; +import com.microsoft.azure.eventhubs.EventHubException; +import com.microsoft.azure.eventhubs.PayloadSizeExceededException; +import com.microsoft.azure.eventhubs.TransportType; +import com.microsoft.azure.eventhubs.exceptioncontracts.SendLargeMessageTest; +import com.microsoft.azure.eventhubs.lib.ApiTestBase; +import com.microsoft.azure.eventhubs.lib.TestContext; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.jutils.jproxy.ProxyServer; + +import java.io.IOException; +import java.net.*; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.ExecutionException; + +public class ProxySendLargeMessageTest extends ApiTestBase { + private static int proxyPort = 8899; + private static ProxyServer proxyServer; + private static SendLargeMessageTest sendLargeMessageTest; + private static ProxySelector defaultProxySelector; + + @BeforeClass + public static void initialize() throws Exception { + proxyServer = ProxyServer.create("localhost", proxyPort); + proxyServer.start(t -> { + }); + + defaultProxySelector = ProxySelector.getDefault(); + ProxySelector.setDefault(new ProxySelector() { + @Override + public List select(URI uri) { + LinkedList proxies = new LinkedList<>(); + proxies.add(new Proxy(Proxy.Type.HTTP, new InetSocketAddress("localhost", proxyPort))); + return proxies; + } + + @Override + public void connectFailed(URI uri, SocketAddress sa, IOException ioe) { + // no-op + } + }); + + final ConnectionStringBuilder connectionStringBuilder = TestContext.getConnectionString(); + connectionStringBuilder.setTransportType(TransportType.AMQP_WEB_SOCKETS); + sendLargeMessageTest = new SendLargeMessageTest(); + SendLargeMessageTest.initializeEventHubClients(connectionStringBuilder); + } + + @AfterClass() + public static void cleanup() throws Exception { + SendLargeMessageTest.cleanup(); + + if (proxyServer != null) { + proxyServer.stop(); + } + + ProxySelector.setDefault(defaultProxySelector); + } + + @Test() + public void sendMsgLargerThan64k() throws EventHubException, InterruptedException, ExecutionException, IOException { + sendLargeMessageTest.sendMsgLargerThan64k(); + } + + @Test(expected = PayloadSizeExceededException.class) + public void sendMsgLargerThan256K() throws EventHubException, InterruptedException, ExecutionException, IOException { + sendLargeMessageTest.sendMsgLargerThan1024K(); + } + + @Test() + public void sendMsgLargerThan128k() throws EventHubException, InterruptedException, ExecutionException, IOException { + sendLargeMessageTest.sendMsgLargerThan128k(); + } +} \ No newline at end of file diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/proxy/ProxySendTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/proxy/ProxySendTest.java new file mode 100644 index 0000000000000..8c47489dfcf7d --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/proxy/ProxySendTest.java @@ -0,0 +1,91 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.proxy; + +import com.microsoft.azure.eventhubs.ConnectionStringBuilder; +import com.microsoft.azure.eventhubs.EventHubClient; +import com.microsoft.azure.eventhubs.EventHubException; +import com.microsoft.azure.eventhubs.TransportType; +import com.microsoft.azure.eventhubs.lib.SasTokenTestBase; +import com.microsoft.azure.eventhubs.lib.TestContext; +import com.microsoft.azure.eventhubs.sendrecv.SendTest; +import org.jutils.jproxy.ProxyServer; +import org.junit.*; + +import java.io.IOException; +import java.net.*; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; + +public class ProxySendTest extends SasTokenTestBase { + + private static int proxyPort = 8899; + private static ProxyServer proxyServer; + private static SendTest sendTest; + private static ProxySelector defaultProxySelector; + + @BeforeClass + public static void initialize() throws Exception { + proxyServer = ProxyServer.create("localhost", proxyPort); + proxyServer.start(t -> {}); + + defaultProxySelector = ProxySelector.getDefault(); + ProxySelector.setDefault(new ProxySelector() { + @Override + public List select(URI uri) { + LinkedList proxies = new LinkedList<>(); + proxies.add(new Proxy(Proxy.Type.HTTP, new InetSocketAddress("localhost", proxyPort))); + return proxies; + } + + @Override + public void connectFailed(URI uri, SocketAddress sa, IOException ioe) { + // no-op + } + }); + + Assert.assertTrue(TestContext.getConnectionString().getSharedAccessSignature() != null + && TestContext.getConnectionString().getSasKey() == null + && TestContext.getConnectionString().getSasKeyName() == null); + + sendTest = new SendTest(); + + ConnectionStringBuilder connectionString = TestContext.getConnectionString(); + connectionString.setTransportType(TransportType.AMQP_WEB_SOCKETS); + SendTest.initializeEventHub(connectionString); + } + + @AfterClass + public static void cleanupClient() throws Exception { + + SendTest.cleanupClient(); + + if (proxyServer != null) { + proxyServer.stop(); + } + + ProxySelector.setDefault(defaultProxySelector); + } + + @Test + public void sendBatchRetainsOrderWithinBatch() throws EventHubException, InterruptedException, ExecutionException, TimeoutException { + + sendTest.sendBatchRetainsOrderWithinBatch(); + } + + @Test + public void sendResultsInSysPropertiesWithPartitionKey() throws EventHubException, InterruptedException, ExecutionException, TimeoutException { + + sendTest.sendResultsInSysPropertiesWithPartitionKey(); + } + + @After + public void cleanup() throws Exception { + + sendTest.cleanup(); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/EventDataBatchAPITest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/EventDataBatchAPITest.java new file mode 100644 index 0000000000000..cd4b94ccd262b --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/EventDataBatchAPITest.java @@ -0,0 +1,268 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.sendrecv; + +import com.microsoft.azure.eventhubs.*; +import com.microsoft.azure.eventhubs.lib.ApiTestBase; +import com.microsoft.azure.eventhubs.lib.TestContext; +import junit.framework.AssertionFailedError; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.time.Duration; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.Random; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Consumer; + +public class EventDataBatchAPITest extends ApiTestBase { + + private static final String cgName = TestContext.getConsumerGroupName(); + private static final String partitionId = "0"; + private static EventHubClient ehClient; + private static PartitionSender sender = null; + + @BeforeClass + public static void initializeEventHub() throws Exception { + final ConnectionStringBuilder connectionString = TestContext.getConnectionString(); + ehClient = EventHubClient.createSync(connectionString.toString(), TestContext.EXECUTOR_SERVICE); + sender = ehClient.createPartitionSenderSync(partitionId); + } + + @AfterClass + public static void cleanupClient() throws EventHubException { + if (sender != null) + sender.closeSync(); + + if (ehClient != null) + ehClient.closeSync(); + } + + @Test + public void sendSmallEventsFullBatchTest() + throws EventHubException, InterruptedException, ExecutionException, TimeoutException { + final EventDataBatch batchEvents = sender.createBatch(); + + while (batchEvents.tryAdd(EventData.create("a".getBytes()))) ; + + sender = ehClient.createPartitionSenderSync(partitionId); + sender.sendSync(batchEvents); + } + + @Test + public void sendSmallEventsFullBatchPartitionKeyTest() + throws EventHubException, InterruptedException, ExecutionException, TimeoutException { + final BatchOptions options = new BatchOptions() + .with(o -> o.partitionKey = UUID.randomUUID().toString()); + final EventDataBatch batchEvents = ehClient.createBatch(options); + + while (batchEvents.tryAdd(EventData.create("a".getBytes()))) ; + + ehClient.sendSync(batchEvents); + } + + @Test + public void sendBatchPartitionKeyValidateTest() + throws EventHubException, InterruptedException, ExecutionException, TimeoutException { + final String partitionKey = UUID.randomUUID().toString(); + + final BatchOptions options = new BatchOptions().with(o -> o.partitionKey = partitionKey); + final EventDataBatch batchEvents = ehClient.createBatch(options); + + int count = 0; + while (batchEvents.tryAdd(EventData.create("a".getBytes())) && count++ < 10) ; + + final int sentCount = count; + final CompletableFuture testResult = new CompletableFuture<>(); + final PartitionReceiveHandler validator = new PartitionReceiveHandler() { + final AtomicInteger netCount = new AtomicInteger(0); + + @Override + public int getMaxEventCount() { + return 100; + } + + @Override + public void onReceive(Iterable events) { + if (events != null) { + final Iterator eterator = events.iterator(); + while (eterator.hasNext()) { + final EventData currentData = eterator.next(); + final String currentPartitionKey = currentData.getSystemProperties().getPartitionKey(); + if (!currentPartitionKey.equalsIgnoreCase(partitionKey)) + testResult.completeExceptionally(new AssertionFailedError()); + + final int countSoFar = netCount.incrementAndGet(); + if (countSoFar >= sentCount) + testResult.complete(null); + } + } + } + + @Override + public void onError(Throwable error) { + testResult.completeExceptionally(error); + } + }; + + final LinkedList receivers = new LinkedList<>(); + try { + final String[] partitionIds = ehClient.getRuntimeInformation().get().getPartitionIds(); + for (int index = 0; index < partitionIds.length; index++) { + final PartitionReceiver receiver = ehClient.createReceiverSync(TestContext.getConsumerGroupName(), partitionIds[index], EventPosition.fromEndOfStream()); + receiver.setReceiveTimeout(Duration.ofSeconds(5)); + receiver.setReceiveHandler(validator); + receivers.add(receiver); + } + + ehClient.sendSync(batchEvents); + testResult.get(); + } finally { + if (receivers.size() > 0) + receivers.forEach(new Consumer() { + @Override + public void accept(PartitionReceiver partitionReceiver) { + try { + partitionReceiver.closeSync(); + } catch (EventHubException ignore) { + } + } + }); + } + } + + @Test + public void sendEventsFullBatchWithAppPropsTest() + throws EventHubException, InterruptedException, ExecutionException, TimeoutException { + final CompletableFuture validator = new CompletableFuture<>(); + final PartitionReceiver receiver = ehClient.createReceiverSync(cgName, partitionId, EventPosition.fromEndOfStream()); + receiver.setReceiveTimeout(Duration.ofSeconds(5)); + + try { + final EventDataBatch batchEvents = sender.createBatch(); + + int count = 0; + while (true) { + final EventData eventData = EventData.create(new String(new char[50000]).replace("\0", "a").getBytes()); + for (int i = 0; i < new Random().nextInt(20); i++) + eventData.getProperties().put("somekey" + i, "somevalue"); + + if (batchEvents.tryAdd(eventData)) + count++; + else + break; + } + + Assert.assertEquals(count, batchEvents.getSize()); + receiver.setReceiveHandler(new CountValidator(validator, count)); + + sender.sendSync(batchEvents); + + validator.get(100, TimeUnit.SECONDS); + + receiver.setReceiveHandler(null); + } finally { + receiver.closeSync(); + } + } + + @Test + public void sendEventsFullBatchWithPartitionKeyTest() + throws EventHubException, InterruptedException, ExecutionException, TimeoutException { + + final String partitionKey = UUID.randomUUID().toString(); + final BatchOptions options = new BatchOptions().with(o -> o.partitionKey = partitionKey); + final EventDataBatch batchEvents = ehClient.createBatch(options); + + int count = 0; + while (true) { + final EventData eventData = EventData.create(new String("a").getBytes()); + for (int i = 0; i < new Random().nextInt(20); i++) + eventData.getProperties().put("somekey" + i, "somevalue"); + + if (batchEvents.tryAdd(eventData)) + count++; + else + break; + } + + Assert.assertEquals(count, batchEvents.getSize()); + ehClient.sendSync(batchEvents); + } + + @Test(expected = IllegalArgumentException.class) + public void sendBatchWithPartitionKeyOnPartitionSenderTest() + throws EventHubException, InterruptedException, ExecutionException, TimeoutException { + + + final BatchOptions options = new BatchOptions().with(o -> o.partitionKey = UUID.randomUUID().toString()); + final EventDataBatch batchEvents = sender.createBatch(options); + + int count = 0; + while (true) { + final EventData eventData = EventData.create(new String("a").getBytes()); + for (int i = 0; i < new Random().nextInt(20); i++) + eventData.getProperties().put("somekey" + i, "somevalue"); + + if (batchEvents.tryAdd(eventData)) + count++; + else + break; + } + + Assert.assertEquals(count, batchEvents.getSize()); + + // the CreateBatch was created without taking PartitionKey size into account + // so this call should fail with payload size exceeded + sender.sendSync(batchEvents); + } + + public static class CountValidator implements PartitionReceiveHandler { + final CompletableFuture validateSignal; + final int netEventCount; + + int currentCount = 0; + + public CountValidator(final CompletableFuture validateSignal, final int netEventCount) { + + this.validateSignal = validateSignal; + this.netEventCount = netEventCount; + } + + @Override + public int getMaxEventCount() { + return PartitionReceiver.DEFAULT_PREFETCH_COUNT; + } + + @Override + public void onReceive(Iterable events) { + if (events != null) + for (EventData event : events) { + currentCount++; + } + + if (currentCount >= netEventCount) + this.validateSignal.complete(null); + + try { + Thread.sleep(100); // wait for events to accumulate in the receive pump + } catch (InterruptedException ignore) { + } + } + + @Override + public void onError(Throwable error) { + this.validateSignal.completeExceptionally(error); + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceiveParallelManualTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceiveParallelManualTest.java new file mode 100644 index 0000000000000..86935e74148dd --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceiveParallelManualTest.java @@ -0,0 +1,124 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.sendrecv; + +import com.microsoft.azure.eventhubs.*; +import com.microsoft.azure.eventhubs.impl.IteratorUtil; +import com.microsoft.azure.eventhubs.lib.ApiTestBase; +import com.microsoft.azure.eventhubs.lib.TestBase; +import com.microsoft.azure.eventhubs.lib.TestContext; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.util.concurrent.ExecutionException; +import java.util.logging.FileHandler; +import java.util.logging.Level; +import java.util.logging.Logger; +import java.util.logging.SimpleFormatter; + +public class ReceiveParallelManualTest extends ApiTestBase { + static final String cgName = TestContext.getConsumerGroupName(); + static final String partitionId = "0"; + + static EventHubClient[] ehClient; + + + @BeforeClass + public static void initializeEventHub() throws Exception { + FileHandler fhc = new FileHandler("c:\\proton-sb-sendbatch-1100.log", false); + Logger lc1 = Logger.getLogger("servicebus.trace"); + fhc.setFormatter(new SimpleFormatter()); + lc1.addHandler(fhc); + lc1.setLevel(Level.FINE); + + final ConnectionStringBuilder connectionString = TestContext.getConnectionString(); + ehClient = new EventHubClient[4]; + ehClient[0] = EventHubClient.createSync(connectionString.toString(), TestContext.EXECUTOR_SERVICE); + ehClient[1] = EventHubClient.createSync(connectionString.toString(), TestContext.EXECUTOR_SERVICE); + ehClient[2] = EventHubClient.createSync(connectionString.toString(), TestContext.EXECUTOR_SERVICE); + ehClient[3] = EventHubClient.createSync(connectionString.toString(), TestContext.EXECUTOR_SERVICE); + } + + @AfterClass() + public static void cleanup() throws EventHubException { + for (int i = 0; i < 4; i++) + if (ehClient[i] != null) { + ehClient[i].closeSync(); + } + } + + // Run this test manually and introduce network failures to test + // send/receive code is resilient to n/w failures + // and continues to run once the n/w is back online + // @Test() + public void testReceiverStartOfStreamFilters() throws Exception { + new Thread(new PRunnable("0")).start(); + new Thread(new PRunnable("1")).start(); + new Thread(new PRunnable("2")).start(); + new Thread(new PRunnable("3")).start(); + System.out.println("scheduled receivers"); + System.in.read(); + } + + class PRunnable implements Runnable { + final String sPartitionId; + + PRunnable(final String sPartitionId) { + this.sPartitionId = sPartitionId; + } + + @Override + public void run() { + + int partitionIdInt = Integer.parseInt(sPartitionId); + try { + TestBase.pushEventsToPartition(ehClient[partitionIdInt], sPartitionId, 100).get(); + } catch (InterruptedException e) { + e.printStackTrace(); + } catch (ExecutionException e) { + e.printStackTrace(); + } catch (EventHubException e) { + e.printStackTrace(); + } + + PartitionReceiver offsetReceiver1 = null; + try { + offsetReceiver1 = + ehClient[partitionIdInt].createReceiverSync(cgName, sPartitionId, EventPosition.fromStartOfStream()); + } catch (EventHubException e) { + e.printStackTrace(); + } + + Iterable receivedEvents; + long totalEvents = 0L; + while (true) { + try { + if ((receivedEvents = offsetReceiver1.receiveSync(10)) != null && !IteratorUtil.sizeEquals(receivedEvents, 0)) { + + long batchSize = (1 + IteratorUtil.getLast(receivedEvents.iterator()).getSystemProperties().getSequenceNumber()) - + (IteratorUtil.getFirst(receivedEvents).getSystemProperties().getSequenceNumber()); + totalEvents += batchSize; + System.out.println(String.format("[partitionId: %s] received %s events; total sofar: %s, begin: %s, end: %s", + sPartitionId, + batchSize, + totalEvents, + IteratorUtil.getLast(receivedEvents.iterator()).getSystemProperties().getSequenceNumber(), + IteratorUtil.getFirst(receivedEvents).getSystemProperties().getSequenceNumber())); + } else { + System.out.println(String.format("received null on partition %s", sPartitionId)); + } + } catch (Exception exp) { + System.out.println(exp.getMessage() + exp.toString()); + } + + try { + Thread.sleep(150); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceivePumpEventHubTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceivePumpEventHubTest.java new file mode 100644 index 0000000000000..9383cfbd44ee1 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceivePumpEventHubTest.java @@ -0,0 +1,137 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.sendrecv; + +import com.microsoft.azure.eventhubs.*; +import com.microsoft.azure.eventhubs.lib.ApiTestBase; +import com.microsoft.azure.eventhubs.lib.TestContext; +import org.junit.*; + +import java.io.IOException; +import java.time.Duration; +import java.time.Instant; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class ReceivePumpEventHubTest extends ApiTestBase { + static final String cgName = TestContext.getConsumerGroupName(); + static final String partitionId = "0"; + + static EventHubClient ehClient; + + PartitionReceiver receiver; + + @BeforeClass + public static void initializeEventHub() throws EventHubException, IOException { + final ConnectionStringBuilder connectionString = TestContext.getConnectionString(); + ehClient = EventHubClient.createSync(connectionString.toString(), TestContext.EXECUTOR_SERVICE); + } + + @AfterClass + public static void cleanup() throws EventHubException { + if (ehClient != null) + ehClient.closeSync(); + } + + @Before + public void initializeTest() throws EventHubException { + receiver = ehClient.createReceiverSync(cgName, partitionId, EventPosition.fromEnqueuedTime(Instant.now())); + } + + @Test(expected = TimeoutException.class) + public void testInvokeOnTimeoutKnobDefault() throws EventHubException, InterruptedException, ExecutionException, TimeoutException { + CompletableFuture invokeSignal = new CompletableFuture(); + receiver.setReceiveTimeout(Duration.ofSeconds(1)); + receiver.setReceiveHandler(new InvokeOnReceiveEventValidator(invokeSignal)); + invokeSignal.get(3, TimeUnit.SECONDS); + } + + @Test(expected = TimeoutException.class) + public void testInvokeOnTimeoutKnobFalse() throws EventHubException, InterruptedException, ExecutionException, TimeoutException { + CompletableFuture invokeSignal = new CompletableFuture(); + receiver.setReceiveTimeout(Duration.ofSeconds(1)); + receiver.setReceiveHandler(new InvokeOnReceiveEventValidator(invokeSignal), false); + invokeSignal.get(3, TimeUnit.SECONDS); + } + + @Test() + public void testInvokeOnTimeoutKnobTrue() throws EventHubException, InterruptedException, ExecutionException, TimeoutException { + CompletableFuture invokeSignal = new CompletableFuture(); + receiver.setReceiveTimeout(Duration.ofSeconds(1)); + receiver.setReceiveHandler(new InvokeOnReceiveEventValidator(invokeSignal), true); + invokeSignal.get(3, TimeUnit.SECONDS); + } + + @Test(expected = IllegalArgumentException.class) + public void testInvokeWithInvalidArgs() throws Throwable { + final CompletableFuture invokeSignal = new CompletableFuture(); + receiver.setReceiveTimeout(Duration.ofSeconds(1)); + receiver.setReceiveHandler(new InvokeOnReceiveEventValidator(invokeSignal, PartitionReceiver.DEFAULT_PREFETCH_COUNT + 1), true); + try { + invokeSignal.get(3, TimeUnit.SECONDS); + } catch (ExecutionException executionException) { + throw executionException.getCause(); + } + } + + @Test(expected = IllegalArgumentException.class) + public void testSetReceiveHandlerMultipleTimes() throws EventHubException, InterruptedException, ExecutionException, TimeoutException { + CompletableFuture invokeSignal = new CompletableFuture(); + receiver.setReceiveTimeout(Duration.ofSeconds(1)); + receiver.setReceiveHandler(new InvokeOnReceiveEventValidator(invokeSignal), true); + + receiver.setReceiveHandler(new InvokeOnReceiveEventValidator(invokeSignal), true); + } + + @Test() + public void testGraceFullCloseReceivePump() throws EventHubException, InterruptedException, ExecutionException, TimeoutException { + CompletableFuture invokeSignal = new CompletableFuture(); + receiver.setReceiveTimeout(Duration.ofSeconds(1)); + receiver.setReceiveHandler(new InvokeOnReceiveEventValidator(invokeSignal), true); + + receiver.setReceiveHandler(null).get(); + + invokeSignal = new CompletableFuture(); + receiver.setReceiveHandler(new InvokeOnReceiveEventValidator(invokeSignal), true); + invokeSignal.get(3, TimeUnit.SECONDS); + } + + @After + public void cleanupTest() throws EventHubException { + if (receiver != null) + receiver.closeSync(); + } + + public static final class InvokeOnReceiveEventValidator implements PartitionReceiveHandler { + final CompletableFuture signalInvoked; + final int maxEventCount; + + public InvokeOnReceiveEventValidator(final CompletableFuture signalInvoked) { + this(signalInvoked, 50); + } + + public InvokeOnReceiveEventValidator(final CompletableFuture signalInvoked, final int maxEventCount) { + this.signalInvoked = signalInvoked; + this.maxEventCount = maxEventCount; + } + + @Override + public int getMaxEventCount() { + return this.maxEventCount; + } + + @Override + public void onReceive(Iterable events) { + this.signalInvoked.complete(null); + } + + @Override + public void onError(Throwable error) { + this.signalInvoked.completeExceptionally(error); + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceivePumpTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceivePumpTest.java new file mode 100644 index 0000000000000..878fdbdd39c83 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceivePumpTest.java @@ -0,0 +1,226 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.sendrecv; + +import com.microsoft.azure.eventhubs.EventData; +import com.microsoft.azure.eventhubs.EventHubException; +import com.microsoft.azure.eventhubs.PartitionReceiveHandler; +import com.microsoft.azure.eventhubs.TimeoutException; +import com.microsoft.azure.eventhubs.impl.IteratorUtil; +import com.microsoft.azure.eventhubs.impl.ReceivePump; +import com.microsoft.azure.eventhubs.lib.TestContext; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.util.LinkedList; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; + +public class ReceivePumpTest { + private final String exceptionMessage = "receive Exception"; + private volatile boolean assertion = false; + + @Before + public void initializeValidation() { + assertion = false; + } + + @Test() + public void testPumpOnReceiveEventFlow() throws Exception { + final CompletableFuture pumpRun = new CompletableFuture<>(); + final ReceivePump receivePump = new ReceivePump( + "eventhub1", "consumerGroup1", + new ReceivePump.IPartitionReceiver() { + @Override + public CompletableFuture> receive(int maxBatchSize) { + final LinkedList events = new LinkedList(); + events.add(EventData.create("some".getBytes())); + return CompletableFuture.completedFuture(events); + } + + @Override + public String getPartitionId() { + return "0"; + } + }, + new PartitionReceiveHandler() { + @Override + public int getMaxEventCount() { + return 10; + } + + @Override + public void onReceive(Iterable events) { + assertion = IteratorUtil.sizeEquals(events, 1); + + // stop-pump + throw new PumpClosedException(); + } + + @Override + public void onError(Throwable error) { + Assert.assertTrue(error instanceof PumpClosedException); + pumpRun.complete(null); + } + }, + true, + TestContext.EXECUTOR_SERVICE); + + try { + receivePump.receiveAndProcess(); + pumpRun.get(); + } finally { + receivePump.stop().get(); + } + + Assert.assertTrue(assertion); + } + + @Test() + public void testPumpReceiveTransientErrorsPropagated() throws Exception { + final CompletableFuture pumpRun = new CompletableFuture<>(); + final ReceivePump receivePump = new ReceivePump( + "eventhub1", "consumerGroup1", + new ReceivePump.IPartitionReceiver() { + @Override + public CompletableFuture> receive(int maxBatchSize) { + final CompletableFuture> result = new CompletableFuture<>(); + result.completeExceptionally(new RuntimeException(exceptionMessage)); + return result; + } + + @Override + public String getPartitionId() { + return "0"; + } + }, + new PartitionReceiveHandler() { + @Override + public int getMaxEventCount() { + return 10; + } + + @Override + public void onReceive(Iterable events) { + } + + @Override + public void onError(Throwable error) { + assertion = error.getMessage().equals(exceptionMessage); + pumpRun.complete(null); + } + }, + false, + TestContext.EXECUTOR_SERVICE); + + try { + receivePump.receiveAndProcess(); + pumpRun.get(); + } finally { + receivePump.stop().get(); + } + + Assert.assertTrue(assertion); + } + + @Test() + public void testPumpReceiveExceptionsPropagated() throws Exception { + final CompletableFuture pumpRun = new CompletableFuture<>(); + final ReceivePump receivePump = new ReceivePump( + "eventhub1", "consumerGroup1", + new ReceivePump.IPartitionReceiver() { + @Override + public CompletableFuture> receive(int maxBatchSize) { + final CompletableFuture> result = new CompletableFuture<>(); + result.completeExceptionally(new RuntimeException(exceptionMessage)); + return result; + } + + @Override + public String getPartitionId() { + return "0"; + } + }, + new PartitionReceiveHandler() { + @Override + public int getMaxEventCount() { + return 10; + } + + @Override + public void onReceive(Iterable events) { + } + + @Override + public void onError(Throwable error) { + assertion = error.getMessage().equals(exceptionMessage); + pumpRun.complete(null); + } + }, + true, + TestContext.EXECUTOR_SERVICE); + + try { + receivePump.receiveAndProcess(); + pumpRun.get(); + } finally { + receivePump.stop().get(); + } + + Assert.assertTrue(assertion); + } + + @Test() + public void testPumpOnReceiveExceptionsPropagated() throws EventHubException, InterruptedException, ExecutionException, TimeoutException { + final String runtimeExceptionMsg = "random exception"; + final CompletableFuture pumpRun = new CompletableFuture<>(); + final ReceivePump receivePump = new ReceivePump( + "eventhub1", "consumerGroup1", + new ReceivePump.IPartitionReceiver() { + @Override + public CompletableFuture> receive(int maxBatchSize) { + return CompletableFuture.completedFuture(null); + } + + @Override + public String getPartitionId() { + return "0"; + } + }, + new PartitionReceiveHandler() { + @Override + public int getMaxEventCount() { + return 10; + } + + @Override + public void onReceive(Iterable events) { + throw new RuntimeException(runtimeExceptionMsg); + } + + @Override + public void onError(Throwable error) { + assertion = error.getMessage().equals(runtimeExceptionMsg); + pumpRun.complete(null); + } + }, + true, + TestContext.EXECUTOR_SERVICE); + + try { + receivePump.receiveAndProcess(); + pumpRun.get(); + } finally { + receivePump.stop().get(); + } + + Assert.assertTrue(assertion); + } + + public class PumpClosedException extends RuntimeException { + private static final long serialVersionUID = -5050327636359966016L; + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceiveTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceiveTest.java new file mode 100644 index 0000000000000..fe072dcf610e9 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceiveTest.java @@ -0,0 +1,199 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.sendrecv; + +import com.microsoft.azure.eventhubs.*; +import com.microsoft.azure.eventhubs.impl.AmqpConstants; +import com.microsoft.azure.eventhubs.lib.ApiTestBase; +import com.microsoft.azure.eventhubs.lib.TestBase; +import com.microsoft.azure.eventhubs.lib.TestContext; +import org.junit.*; + +import java.time.Duration; +import java.time.Instant; +import java.util.Iterator; +import java.util.concurrent.ExecutionException; +import java.util.function.Consumer; + +public class ReceiveTest extends ApiTestBase { + static final String cgName = TestContext.getConsumerGroupName(); + static final String partitionId = "0"; + + static EventHubClient ehClient; + + PartitionReceiver offsetReceiver = null; + PartitionReceiver datetimeReceiver = null; + + @BeforeClass + public static void initialize() throws Exception { + final ConnectionStringBuilder connectionString = TestContext.getConnectionString(); + initializeEventHub(connectionString); + } + + public static void initializeEventHub(ConnectionStringBuilder connectionString) throws Exception { + ehClient = EventHubClient.createSync(connectionString.toString(), TestContext.EXECUTOR_SERVICE); + TestBase.pushEventsToPartition(ehClient, partitionId, 25).get(); + } + + @AfterClass() + public static void cleanup() throws EventHubException { + if (ehClient != null) { + ehClient.closeSync(); + } + } + + @Test() + public void testReceiverStartOfStreamFilters() throws EventHubException { + offsetReceiver = ehClient.createReceiverSync(cgName, partitionId, EventPosition.fromStartOfStream()); + Iterable startingEventsUsingOffsetReceiver = offsetReceiver.receiveSync(100); + + Assert.assertTrue(startingEventsUsingOffsetReceiver != null && startingEventsUsingOffsetReceiver.iterator().hasNext()); + + datetimeReceiver = ehClient.createReceiverSync(cgName, partitionId, EventPosition.fromEnqueuedTime(Instant.EPOCH)); + Iterable startingEventsUsingDateTimeReceiver = datetimeReceiver.receiveSync(100); + + Assert.assertTrue(startingEventsUsingOffsetReceiver != null && startingEventsUsingDateTimeReceiver.iterator().hasNext()); + + Iterator dateTimeIterator = startingEventsUsingDateTimeReceiver.iterator(); + for (EventData eventDataUsingOffset : startingEventsUsingOffsetReceiver) { + EventData eventDataUsingDateTime = dateTimeIterator.next(); + Assert.assertTrue( + String.format("START_OF_STREAM offset: %s, EPOCH offset: %s", eventDataUsingOffset.getSystemProperties().getOffset(), eventDataUsingDateTime.getSystemProperties().getOffset()), + eventDataUsingOffset.getSystemProperties().getOffset().equalsIgnoreCase(eventDataUsingDateTime.getSystemProperties().getOffset())); + + if (!dateTimeIterator.hasNext()) + break; + } + } + + @Test() + public void testReceiverLatestFilter() throws EventHubException, ExecutionException, InterruptedException { + offsetReceiver = ehClient.createReceiverSync(cgName, partitionId, EventPosition.fromEndOfStream()); + Iterable events = offsetReceiver.receiveSync(100); + Assert.assertTrue(events == null); + + TestBase.pushEventsToPartition(ehClient, partitionId, 10).get(); + events = offsetReceiver.receiveSync(100); + Assert.assertTrue(events != null && events.iterator().hasNext()); + } + + @Test() + public void testReceiverOffsetInclusiveFilter() throws EventHubException { + datetimeReceiver = ehClient.createReceiverSync(cgName, partitionId, EventPosition.fromEnqueuedTime(Instant.EPOCH)); + final Iterable events = datetimeReceiver.receiveSync(100); + + Assert.assertTrue(events != null && events.iterator().hasNext()); + final EventData event = events.iterator().next(); + + offsetReceiver = ehClient.createReceiverSync(cgName, partitionId, EventPosition.fromOffset(event.getSystemProperties().getOffset(), true)); + final EventData eventReturnedByOffsetReceiver = offsetReceiver.receiveSync(10).iterator().next(); + + Assert.assertTrue(eventReturnedByOffsetReceiver.getSystemProperties().getOffset().equals(event.getSystemProperties().getOffset())); + Assert.assertTrue(eventReturnedByOffsetReceiver.getSystemProperties().getSequenceNumber() == event.getSystemProperties().getSequenceNumber()); + } + + @Test() + public void testReceiverOffsetNonInclusiveFilter() throws EventHubException { + datetimeReceiver = ehClient.createReceiverSync(cgName, partitionId, EventPosition.fromEnqueuedTime(Instant.EPOCH)); + Iterable events = datetimeReceiver.receiveSync(100); + + Assert.assertTrue(events != null && events.iterator().hasNext()); + + EventData event = events.iterator().next(); + offsetReceiver = ehClient.createReceiverSync(cgName, partitionId, EventPosition.fromOffset(event.getSystemProperties().getOffset(), false)); + EventData eventReturnedByOffsetReceiver = offsetReceiver.receiveSync(10).iterator().next(); + + Assert.assertTrue(eventReturnedByOffsetReceiver.getSystemProperties().getSequenceNumber() == event.getSystemProperties().getSequenceNumber() + 1); + } + + @Test() + public void testReceiverSequenceNumberInclusiveFilter() throws EventHubException { + datetimeReceiver = ehClient.createReceiverSync(cgName, partitionId, EventPosition.fromEnqueuedTime(Instant.EPOCH)); + Iterable events = datetimeReceiver.receiveSync(100); + + Assert.assertTrue(events != null && events.iterator().hasNext()); + EventData event = events.iterator().next(); + + offsetReceiver = ehClient.createReceiverSync(cgName, partitionId, EventPosition.fromSequenceNumber(event.getSystemProperties().getSequenceNumber(), true)); + EventData eventReturnedByOffsetReceiver = offsetReceiver.receiveSync(10).iterator().next(); + + Assert.assertTrue(eventReturnedByOffsetReceiver.getSystemProperties().getOffset().equals(event.getSystemProperties().getOffset())); + Assert.assertTrue(eventReturnedByOffsetReceiver.getSystemProperties().getSequenceNumber() == event.getSystemProperties().getSequenceNumber()); + } + + @Test() + public void testReceiverSequenceNumberNonInclusiveFilter() throws EventHubException { + datetimeReceiver = ehClient.createReceiverSync(cgName, partitionId, EventPosition.fromEnqueuedTime(Instant.EPOCH)); + Iterable events = datetimeReceiver.receiveSync(100); + + Assert.assertTrue(events != null && events.iterator().hasNext()); + + EventData event = events.iterator().next(); + offsetReceiver = ehClient.createReceiverSync(cgName, partitionId, EventPosition.fromSequenceNumber(event.getSystemProperties().getSequenceNumber(), false)); + EventData eventReturnedByOffsetReceiver = offsetReceiver.receiveSync(10).iterator().next(); + + Assert.assertTrue(eventReturnedByOffsetReceiver.getSystemProperties().getSequenceNumber() == event.getSystemProperties().getSequenceNumber() + 1); + } + + @Test() + public void testReceivedBodyAndProperties() throws EventHubException { + datetimeReceiver = ehClient.createReceiverSync(cgName, partitionId, EventPosition.fromEndOfStream()); + datetimeReceiver.setReceiveTimeout(Duration.ofSeconds(5)); + + Iterable drainedEvents = datetimeReceiver.receiveSync(100); + while (drainedEvents != null && drainedEvents.iterator().hasNext()) { + drainedEvents = datetimeReceiver.receiveSync(100); + } + + final String payload = "TestMessage1"; + final String property1 = "property1"; + final String propertyValue1 = "something1"; + final String property2 = AmqpConstants.AMQP_PROPERTY_MESSAGE_ID; + final String propertyValue2 = "something2"; + + final Consumer validateReceivedEvent = new Consumer() { + @Override + public void accept(EventData event) { + Assert.assertEquals(new String(event.getBytes()), payload); + Assert.assertTrue(event.getProperties().containsKey(property1) && event.getProperties().get(property1).equals(propertyValue1)); + Assert.assertTrue(event.getProperties().containsKey(property2) && event.getProperties().get(property2).equals(propertyValue2)); + Assert.assertTrue(event.getSystemProperties().getOffset() != null); + Assert.assertTrue(event.getSystemProperties().getSequenceNumber() > 0L); + Assert.assertTrue(event.getSystemProperties().getEnqueuedTime() != null); + Assert.assertTrue(event.getSystemProperties().getPartitionKey() == null); + Assert.assertTrue(event.getSystemProperties().getPublisher() == null); + } + }; + + final EventData sentEvent = EventData.create(payload.getBytes()); + sentEvent.getProperties().put(property1, propertyValue1); + sentEvent.getProperties().put(property2, propertyValue2); + final PartitionSender sender = ehClient.createPartitionSenderSync(partitionId); + try { + sender.sendSync(sentEvent); + final EventData receivedEvent = datetimeReceiver.receiveSync(10).iterator().next(); + validateReceivedEvent.accept(receivedEvent); + + sender.sendSync(receivedEvent); + final EventData reSendReceivedEvent = datetimeReceiver.receiveSync(10).iterator().next(); + validateReceivedEvent.accept(reSendReceivedEvent); + } finally { + sender.closeSync(); + } + } + + @After + public void testCleanup() throws EventHubException { + if (offsetReceiver != null) { + offsetReceiver.closeSync(); + offsetReceiver = null; + } + + if (datetimeReceiver != null) { + datetimeReceiver.closeSync(); + datetimeReceiver = null; + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceiverIdentifierTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceiverIdentifierTest.java new file mode 100644 index 0000000000000..d16233f416e8b --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceiverIdentifierTest.java @@ -0,0 +1,70 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.sendrecv; + +import com.microsoft.azure.eventhubs.*; +import com.microsoft.azure.eventhubs.lib.ApiTestBase; +import com.microsoft.azure.eventhubs.lib.TestBase; +import com.microsoft.azure.eventhubs.lib.TestContext; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.time.Instant; +import java.util.LinkedList; +import java.util.List; +import java.util.UUID; + +public class ReceiverIdentifierTest extends ApiTestBase { + + static final String cgName = TestContext.getConsumerGroupName(); + static final String partitionId = "0"; + static final Instant beforeTestStart = Instant.now(); + static final int sentEvents = 25; + static final List receivers = new LinkedList<>(); + + static EventHubClient ehClient; + + @BeforeClass + public static void initializeEventHub() throws Exception { + + final ConnectionStringBuilder connectionString = TestContext.getConnectionString(); + ehClient = EventHubClient.createSync(connectionString.toString(), TestContext.EXECUTOR_SERVICE); + + TestBase.pushEventsToPartition(ehClient, partitionId, sentEvents).get(); + } + + @AfterClass() + public static void cleanup() throws EventHubException { + + for (PartitionReceiver receiver : receivers) + receiver.closeSync(); + + if (ehClient != null) + ehClient.closeSync(); + } + + @Test() + public void testReceiverIdentierShowsUpInQuotaErrors() throws EventHubException { + + final String receiverIdentifierPrefix = UUID.randomUUID().toString(); + for (int receiverCount = 0; receiverCount < 5; receiverCount++) { + final ReceiverOptions options = new ReceiverOptions(); + options.setIdentifier(receiverIdentifierPrefix + receiverCount); + ehClient.createReceiverSync(cgName, partitionId, EventPosition.fromStartOfStream(), options); + } + + try { + ehClient.createReceiverSync(cgName, partitionId, EventPosition.fromStartOfStream()); + Assert.assertTrue(false); + } catch (QuotaExceededException quotaError) { + final String errorMsg = quotaError.getMessage(); + for (int receiverCount = 0; receiverCount < 5; receiverCount++) { + Assert.assertTrue(errorMsg.contains(receiverIdentifierPrefix + receiverCount)); + } + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceiverRuntimeMetricsTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceiverRuntimeMetricsTest.java new file mode 100644 index 0000000000000..e86f4ed54a0fb --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/ReceiverRuntimeMetricsTest.java @@ -0,0 +1,101 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.sendrecv; + +import com.microsoft.azure.eventhubs.*; +import com.microsoft.azure.eventhubs.lib.ApiTestBase; +import com.microsoft.azure.eventhubs.lib.TestBase; +import com.microsoft.azure.eventhubs.lib.TestContext; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.time.Instant; +import java.util.HashSet; +import java.util.LinkedList; + +public class ReceiverRuntimeMetricsTest extends ApiTestBase { + + static final String cgName = TestContext.getConsumerGroupName(); + static final String partitionId = "0"; + static final Instant beforeTestStart = Instant.now(); + static final int sentEvents = 25; + + static EventHubClient ehClient; + + static PartitionReceiver receiverWithOptions = null; + static PartitionReceiver receiverWithoutOptions = null; + static PartitionReceiver receiverWithOptionsDisabled = null; + + @BeforeClass + public static void initializeEventHub() throws Exception { + + final ConnectionStringBuilder connectionString = TestContext.getConnectionString(); + ehClient = EventHubClient.createSync(connectionString.toString(), TestContext.EXECUTOR_SERVICE); + + ReceiverOptions options = new ReceiverOptions(); + options.setReceiverRuntimeMetricEnabled(true); + + ReceiverOptions optionsWithMetricsDisabled = new ReceiverOptions(); + optionsWithMetricsDisabled.setReceiverRuntimeMetricEnabled(false); + + receiverWithOptions = ehClient.createReceiverSync(cgName, partitionId, EventPosition.fromEnqueuedTime(Instant.now()), options); + receiverWithoutOptions = ehClient.createReceiverSync(cgName, partitionId, EventPosition.fromEnqueuedTime(Instant.EPOCH)); + receiverWithOptionsDisabled = ehClient.createReceiverSync(cgName, partitionId, EventPosition.fromEnqueuedTime(Instant.EPOCH), optionsWithMetricsDisabled); + + TestBase.pushEventsToPartition(ehClient, partitionId, sentEvents).get(); + } + + @AfterClass() + public static void cleanup() throws EventHubException { + + if (receiverWithOptions != null) + receiverWithOptions.closeSync(); + + if (receiverWithoutOptions != null) + receiverWithoutOptions.closeSync(); + + if (receiverWithOptionsDisabled != null) + receiverWithOptionsDisabled.closeSync(); + + if (ehClient != null) + ehClient.closeSync(); + } + + @Test() + public void testRuntimeMetricsReturnedWhenEnabled() throws EventHubException { + + LinkedList receivedEventsWithOptions = new LinkedList<>(); + while (receivedEventsWithOptions.size() < sentEvents) + for (EventData eData : receiverWithOptions.receiveSync(1)) { + receivedEventsWithOptions.add(eData); + Assert.assertEquals((Long) eData.getSystemProperties().getSequenceNumber(), + receiverWithOptions.getEventPosition().getSequenceNumber()); + } + + HashSet offsets = new HashSet<>(); + for (EventData eData : receivedEventsWithOptions) + offsets.add(eData.getSystemProperties().getOffset()); + + Assert.assertTrue(receiverWithOptions.getRuntimeInformation() != null); + Assert.assertTrue(offsets.contains(receiverWithOptions.getRuntimeInformation().getLastEnqueuedOffset())); + Assert.assertTrue(receiverWithOptions.getRuntimeInformation().getLastEnqueuedSequenceNumber() >= receivedEventsWithOptions.iterator().next().getSystemProperties().getSequenceNumber()); + } + + @Test() + public void testRuntimeMetricsWhenDisabled() throws EventHubException { + + receiverWithOptionsDisabled.receiveSync(10); + Assert.assertTrue(receiverWithOptionsDisabled.getRuntimeInformation() == null); + } + + @Test() + public void testRuntimeMetricsDefaultDisabled() throws EventHubException { + + receiverWithoutOptions.receiveSync(10); + Assert.assertTrue(receiverWithoutOptions.getRuntimeInformation() == null); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/RequestResponseTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/RequestResponseTest.java new file mode 100644 index 0000000000000..c95643f9c2c2a --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/RequestResponseTest.java @@ -0,0 +1,385 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.sendrecv; + +import com.microsoft.azure.eventhubs.*; +import com.microsoft.azure.eventhubs.impl.*; +import com.microsoft.azure.eventhubs.lib.ApiTestBase; +import com.microsoft.azure.eventhubs.lib.TestContext; +import junit.framework.AssertionFailedError; +import org.apache.qpid.proton.Proton; +import org.apache.qpid.proton.amqp.Symbol; +import org.apache.qpid.proton.amqp.messaging.AmqpValue; +import org.apache.qpid.proton.amqp.messaging.ApplicationProperties; +import org.apache.qpid.proton.amqp.transport.ErrorCondition; +import org.apache.qpid.proton.message.Message; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.IOException; +import java.lang.reflect.*; +import java.time.Duration; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; + +public class RequestResponseTest extends ApiTestBase { + + static MessagingFactory factory; + static ConnectionStringBuilder connectionString; + + @BeforeClass + public static void initializeEventHub() throws Exception { + + connectionString = TestContext.getConnectionString(); + factory = MessagingFactory.createFromConnectionString(connectionString.toString(), TestContext.EXECUTOR_SERVICE).get(); + } + + @AfterClass() + public static void cleanup() throws EventHubException { + + if (factory != null) + factory.closeSync(); + } + + @Test() + public void testRequestResponse() throws Exception { + + final ReactorDispatcher dispatcher = factory.getReactorDispatcher(); + final RequestResponseChannel requestResponseChannel = new RequestResponseChannel( + "reqresp", + ClientConstants.MANAGEMENT_ADDRESS, + factory.getSession("path", null, null)); + final FaultTolerantObject fchannel = new FaultTolerantObject<>( + new Operation() { + @Override + public void run(OperationResult operationCallback) { + + requestResponseChannel.open( + new OperationResult() { + @Override + public void onComplete(Void result) { + factory.registerForConnectionError(requestResponseChannel.getSendLink()); + factory.registerForConnectionError(requestResponseChannel.getReceiveLink()); + + operationCallback.onComplete(requestResponseChannel); + } + + @Override + public void onError(Exception error) { + operationCallback.onError(error); + } + }, + new OperationResult() { + @Override + public void onComplete(Void result) { + factory.deregisterForConnectionError(requestResponseChannel.getSendLink()); + factory.deregisterForConnectionError(requestResponseChannel.getReceiveLink()); + } + + @Override + public void onError(Exception error) { + factory.deregisterForConnectionError(requestResponseChannel.getSendLink()); + factory.deregisterForConnectionError(requestResponseChannel.getReceiveLink()); + } + }); + } + }, + new Operation() { + @Override + public void run(OperationResult operationCallback) { + requestResponseChannel.close(new OperationResult() { + @Override + public void onComplete(Void result) { + operationCallback.onComplete(result); + } + + @Override + public void onError(Exception error) { + operationCallback.onError(error); + } + }); + } + }); + + int parallelization = 10; + final CompletableFuture[] tasks = new CompletableFuture[parallelization]; + + int i = 0; + while (true) { + final CompletableFuture task = new CompletableFuture<>(); + + final Message request = Proton.message(); + final Map properties = new HashMap<>(); + properties.put(ClientConstants.MANAGEMENT_ENTITY_TYPE_KEY, ClientConstants.MANAGEMENT_EVENTHUB_ENTITY_TYPE); + properties.put(ClientConstants.MANAGEMENT_ENTITY_NAME_KEY, connectionString.getEventHubName()); + properties.put(ClientConstants.MANAGEMENT_OPERATION_KEY, ClientConstants.READ_OPERATION_VALUE); + final ApplicationProperties applicationProperties = new ApplicationProperties(properties); + request.setApplicationProperties(applicationProperties); + + fchannel.runOnOpenedObject(dispatcher, + new OperationResult() { + @Override + public void onComplete(RequestResponseChannel result) { + result.request(request, + new OperationResult() { + @Override + public void onComplete(Message response) { + Map resultMap = null; + + final int statusCode = (int) response.getApplicationProperties().getValue().get(ClientConstants.MANAGEMENT_STATUS_CODE_KEY); + final String statusDescription = (String) response.getApplicationProperties().getValue().get(ClientConstants.MANAGEMENT_STATUS_DESCRIPTION_KEY); + + if (statusCode == AmqpResponseCode.ACCEPTED.getValue() || statusCode == AmqpResponseCode.OK.getValue()) { + + if (response.getBody() == null) + resultMap = null; + else + resultMap = (Map) ((AmqpValue) response.getBody()).getValue(); + } else { + + final Symbol condition = (Symbol) response.getApplicationProperties().getValue().get(ClientConstants.MANAGEMENT_RESPONSE_ERROR_CONDITION); + final ErrorCondition error = new ErrorCondition(condition, statusDescription); + this.onError(new AmqpException(error)); + } + + if (connectionString.getEventHubName().equalsIgnoreCase((String) resultMap.get(ClientConstants.MANAGEMENT_ENTITY_NAME_KEY))) + task.complete(null); + else + task.completeExceptionally(new AssertionFailedError("response doesn't have correct eventhub name")); + } + + @Override + public void onError(Exception error) { + task.completeExceptionally(error); + } + }); + } + + @Override + public void onError(Exception error) { + task.completeExceptionally(error); + } + }); + + tasks[i % parallelization] = task; + i++; + if (i % parallelization == 0) { + CompletableFuture.allOf(tasks).get(); + if (i >= (parallelization * 5)) + break; + } + } + + final CompletableFuture closeFuture = new CompletableFuture<>(); + fchannel.close(dispatcher, new OperationResult() { + @Override + public void onComplete(Void result) { + closeFuture.complete(null); + } + + @Override + public void onError(Exception error) { + closeFuture.completeExceptionally(error); + } + }); + + closeFuture.get(); + } + + @Test + public void testGetRuntimes() throws Exception { + testGetRuntimeInfos(TestContext.getConnectionString()); + } + + public void testGetRuntimeInfos(ConnectionStringBuilder connectionString) throws Exception { + EventHubClient ehc = EventHubClient.createSync(connectionString.toString(), TestContext.EXECUTOR_SERVICE); + EventHubRuntimeInformation ehInfo = ehc.getRuntimeInformation().get(); + + Assert.assertNotNull(ehInfo); + Assert.assertTrue(connectionString.getEventHubName().equalsIgnoreCase(ehInfo.getPath())); + Assert.assertNotNull(ehInfo.getCreatedAt()); // creation time could be almost anything, can't really check value + Assert.assertTrue(ehInfo.getPartitionCount() >= 1); // max legal partition count is variable but 2 is hard minimum + Assert.assertEquals(ehInfo.getPartitionIds().length, ehInfo.getPartitionCount()); + /* + System.out.println("Event hub name: " + ehInfo.getPath()); + System.out.println("Created at: " + ehInfo.getCreatedAt().toString()); + System.out.println("Partition count: " + ehInfo.getPartitionCount()); + */ + for (int i = 0; i < ehInfo.getPartitionCount(); i++) { + String id = ehInfo.getPartitionIds()[i]; + Assert.assertNotNull(id); + Assert.assertFalse(id.isEmpty()); + //System.out.println("Partition id[" + i + "]: " + ehInfo.getPartitionIds()[i]); + } + + for (String id : ehInfo.getPartitionIds()) { + PartitionRuntimeInformation partInfo = ehc.getPartitionRuntimeInformation(id).get(); + + Assert.assertNotNull(partInfo); + Assert.assertTrue(connectionString.getEventHubName().equalsIgnoreCase(partInfo.getEventHubPath())); + Assert.assertTrue(id.equalsIgnoreCase(partInfo.getPartitionId())); + Assert.assertTrue(partInfo.getBeginSequenceNumber() >= -1); + Assert.assertTrue(partInfo.getLastEnqueuedSequenceNumber() >= -1); + Assert.assertTrue(partInfo.getLastEnqueuedSequenceNumber() >= partInfo.getBeginSequenceNumber()); + Assert.assertNotNull(partInfo.getLastEnqueuedOffset()); + Assert.assertFalse(partInfo.getLastEnqueuedOffset().isEmpty()); + Assert.assertNotNull(partInfo.getLastEnqueuedTimeUtc()); // last enqueued time could be almost anything, can't really check value + /* + System.out.println("Event hub name: " + partInfo.getEventHubPath()); + System.out.println("Partition id: " + partInfo.getPartitionId()); + System.out.println("Begin seq: " + partInfo.getBeginSequenceNumber()); + System.out.println("Last seq: " + partInfo.getLastEnqueuedSequenceNumber()); + System.out.println("Last offset: " + partInfo.getLastEnqueuedOffset()); + System.out.println("Last time: " + partInfo.getLastEnqueuedTimeUtc().toString()); + */ + } + + ehc.closeSync(); + } + + @Test + public void testGetRuntimesWebSockets() throws Exception { + ConnectionStringBuilder connectionStringBuilder = TestContext.getConnectionString(); + connectionStringBuilder.setTransportType(TransportType.AMQP_WEB_SOCKETS); + testGetRuntimeInfos(connectionStringBuilder); + } + + @Test + public void testGetRuntimeInfoCallTimesout() throws Exception { + final EventHubClientImpl eventHubClient = (EventHubClientImpl) EventHubClient.createSync(connectionString.toString(), TestContext.EXECUTOR_SERVICE); + + // set operation timeout to 5ms - so that the actual operation doesn't event start + final Field factoryField = EventHubClientImpl.class.getDeclaredField("underlyingFactory"); + factoryField.setAccessible(true); + final MessagingFactory factory = (MessagingFactory)factoryField.get(eventHubClient); + + final Field timeoutField = MessagingFactory.class.getDeclaredField("operationTimeout"); + timeoutField.setAccessible(true); + final Duration originalTimeout = factory.getOperationTimeout(); + timeoutField.set(factory, Duration.ofMillis(ClientConstants.MGMT_CHANNEL_MIN_RETRY_IN_MILLIS)); + + try { + eventHubClient.getPartitionRuntimeInformation("0").get(); + Assert.assertTrue(false); // exception should be thrown + } catch (ExecutionException exception) { + Assert.assertTrue(exception.getCause() instanceof TimeoutException); + } finally { + timeoutField.set(factory, originalTimeout); + eventHubClient.closeSync(); + } + } + + @Test + public void testGetRuntimesBadHub() throws EventHubException, IOException { + ConnectionStringBuilder bogusConnectionString = new ConnectionStringBuilder() + .setEndpoint(connectionString.getEndpoint()) + .setEventHubName("NOHUBZZZZZ") + .setSasKeyName(connectionString.getSasKeyName()) + .setSasKey(connectionString.getSasKey()); + EventHubClient ehc = EventHubClient.createSync(bogusConnectionString.toString(), TestContext.EXECUTOR_SERVICE); + + try { + ehc.getRuntimeInformation().get(); + Assert.fail("Expected exception, got success"); + } catch (ExecutionException e) { + if (e.getCause() == null) { + Assert.fail("Got ExecutionException but no inner exception"); + } else if (e.getCause() instanceof IllegalEntityException) { + Assert.assertTrue(e.getCause().getMessage().contains("could not be found")); + } else { + Assert.fail("Got unexpected inner exception " + e.getCause().toString()); + } + } catch (Exception e) { + Assert.fail("Unexpected exception " + e.toString()); + } + + try { + ehc.getPartitionRuntimeInformation("0").get(); + Assert.fail("Expected exception, got success"); + } catch (ExecutionException e) { + if (e.getCause() == null) { + Assert.fail("Got ExecutionException but no inner exception"); + } else if (e.getCause() instanceof IllegalEntityException) { + Assert.assertTrue(e.getCause().getMessage().contains("could not be found")); + } else { + Assert.fail("Got unexpected inner exception " + e.getCause().toString()); + } + } catch (Exception e) { + Assert.fail("Unexpected exception " + e.toString()); + } + + ehc.closeSync(); + } + + @Test + public void testGetRuntimesBadKeyname() throws EventHubException, IOException { + ConnectionStringBuilder bogusConnectionString = new ConnectionStringBuilder() + .setEndpoint(connectionString.getEndpoint()) + .setEventHubName(connectionString.getEventHubName()) + .setSasKeyName("xxxnokeyxxx") + .setSasKey(connectionString.getSasKey()); + EventHubClient ehc = EventHubClient.createSync(bogusConnectionString.toString(), TestContext.EXECUTOR_SERVICE); + + try { + ehc.getRuntimeInformation().get(); + Assert.fail("Expected exception, got success"); + } catch (ExecutionException e) { + if (e.getCause() == null) { + Assert.fail("Got ExecutionException but no inner exception"); + } else if (e.getCause() instanceof AuthorizationFailedException) { + // Success + } else { + Assert.fail("Got unexpected inner exception " + e.getCause().toString()); + } + } catch (Exception e) { + Assert.fail("Unexpected exception " + e.toString()); + } + + try { + ehc.getPartitionRuntimeInformation("0").get(); + Assert.fail("Expected exception, got success"); + } catch (ExecutionException e) { + if (e.getCause() == null) { + Assert.fail("Got ExecutionException but no inner exception"); + } else if (e.getCause() instanceof AuthorizationFailedException) { + // Success + } else { + Assert.fail("Got unexpected inner exception " + e.getCause().toString()); + } + } catch (Exception e) { + Assert.fail("Unexpected exception " + e.toString()); + } + + ehc.closeSync(); + } + + @Test + public void testGetRuntimesClosedClient() throws EventHubException, IOException, InterruptedException, ExecutionException { + EventHubClient ehc = EventHubClient.createSync(connectionString.toString(), TestContext.EXECUTOR_SERVICE); + ehc.closeSync(); + + try { + ehc.getRuntimeInformation().get(); + Assert.fail("getRuntimeInformation did not throw as expected"); + } catch (IllegalStateException e) { + // Success + } catch (Exception e) { + Assert.fail("Unexpected exception from getRuntimeInformation " + e.toString()); + } + + try { + ehc.getPartitionRuntimeInformation("0").get(); + Assert.fail("getPartitionRuntimeInformation did not throw as expected"); + } catch (IllegalStateException e) { + // Success + } catch (Exception e) { + Assert.fail("Unexpected exception from getPartitionRuntimeInformation " + e.toString()); + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/SasTokenReceiveTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/SasTokenReceiveTest.java new file mode 100644 index 0000000000000..5d975598129f0 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/SasTokenReceiveTest.java @@ -0,0 +1,41 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.sendrecv; + +import com.microsoft.azure.eventhubs.EventHubException; +import com.microsoft.azure.eventhubs.lib.SasTokenTestBase; +import com.microsoft.azure.eventhubs.lib.TestContext; +import org.junit.*; + +public class SasTokenReceiveTest extends SasTokenTestBase { + + private static ReceiveTest receiveTest; + + @BeforeClass + public static void initialize() throws Exception { + + Assert.assertTrue(TestContext.getConnectionString().getSharedAccessSignature() != null + && TestContext.getConnectionString().getSasKey() == null + && TestContext.getConnectionString().getSasKeyName() == null); + + receiveTest = new ReceiveTest(); + ReceiveTest.initializeEventHub(TestContext.getConnectionString()); + } + + @AfterClass() + public static void cleanup() throws EventHubException { + ReceiveTest.cleanup(); + } + + @Test() + public void testReceiverStartOfStreamFilters() throws EventHubException { + receiveTest.testReceiverStartOfStreamFilters(); + } + + @After + public void testCleanup() throws EventHubException { + receiveTest.testCleanup(); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/SasTokenSendTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/SasTokenSendTest.java new file mode 100644 index 0000000000000..bd96fc0a7eef0 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/SasTokenSendTest.java @@ -0,0 +1,53 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.sendrecv; + +import com.microsoft.azure.eventhubs.EventHubException; +import com.microsoft.azure.eventhubs.lib.SasTokenTestBase; +import com.microsoft.azure.eventhubs.lib.TestContext; +import org.junit.*; + +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; + +public class SasTokenSendTest extends SasTokenTestBase { + + private static SendTest sendTest; + + @BeforeClass + public static void initialize() throws Exception { + + Assert.assertTrue(TestContext.getConnectionString().getSharedAccessSignature() != null + && TestContext.getConnectionString().getSasKey() == null + && TestContext.getConnectionString().getSasKeyName() == null); + + sendTest = new SendTest(); + SendTest.initializeEventHub(TestContext.getConnectionString()); + } + + @AfterClass + public static void cleanupClient() throws EventHubException { + + SendTest.cleanupClient(); + } + + @Test + public void sendBatchRetainsOrderWithinBatch() throws EventHubException, InterruptedException, ExecutionException, TimeoutException { + + sendTest.sendBatchRetainsOrderWithinBatch(); + } + + @Test + public void sendResultsInSysPropertiesWithPartitionKey() throws EventHubException, InterruptedException, ExecutionException, TimeoutException { + + sendTest.sendResultsInSysPropertiesWithPartitionKey(); + } + + @After + public void cleanup() throws EventHubException { + + sendTest.cleanup(); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/SendTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/SendTest.java new file mode 100644 index 0000000000000..7f4eaed43b9e4 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/SendTest.java @@ -0,0 +1,221 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.sendrecv; + +import com.microsoft.azure.eventhubs.*; +import com.microsoft.azure.eventhubs.lib.ApiTestBase; +import com.microsoft.azure.eventhubs.lib.TestContext; +import junit.framework.AssertionFailedError; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.time.Duration; +import java.time.Instant; +import java.util.LinkedList; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.TimeUnit; + +public class SendTest extends ApiTestBase { + static final String cgName = TestContext.getConsumerGroupName(); + static final String partitionId = "0"; + static final String ORDER_PROPERTY = "order"; + static EventHubClient ehClient; + + PartitionSender sender = null; + List receivers = new LinkedList<>(); + + @BeforeClass + public static void initialize() throws Exception { + final ConnectionStringBuilder connectionString = TestContext.getConnectionString(); + initializeEventHub(connectionString); + } + public static void initializeEventHub(final ConnectionStringBuilder connectionString) throws Exception { + ehClient = EventHubClient.createSync(connectionString.toString(), TestContext.EXECUTOR_SERVICE); + } + + @AfterClass + public static void cleanupClient() throws EventHubException { + if (ehClient != null) + ehClient.closeSync(); + } + + @Test + public void sendBatchRetainsOrderWithinBatch() throws EventHubException, InterruptedException, ExecutionException, TimeoutException { + LinkedList batchEvents = new LinkedList<>(); + final int batchSize = 50; + for (int count = 0; count < batchSize; count++) { + EventData event = EventData.create("a".getBytes()); + event.getProperties().put(ORDER_PROPERTY, count); + batchEvents.add(event); + } + + final CompletableFuture validator = new CompletableFuture<>(); + final PartitionReceiver receiver = ehClient.createReceiverSync(cgName, partitionId, EventPosition.fromEnqueuedTime(Instant.now())); + this.receivers.add(receiver); + receiver.setReceiveTimeout(Duration.ofSeconds(1)); + receiver.setReceiveHandler(new OrderValidator(validator, batchSize)); + + // run out of messages in that specific partition - to account for clock-skew with Instant.now() on test machine vs eventhubs service + Iterable clockSkewEvents; + do { + clockSkewEvents = receiver.receiveSync(100); + } while (clockSkewEvents != null && clockSkewEvents.iterator().hasNext()); + + sender = ehClient.createPartitionSenderSync(partitionId); + sender.sendSync(batchEvents); + + validator.get(25, TimeUnit.SECONDS); + } + + @Test + public void sendResultsInSysPropertiesWithPartitionKey() throws EventHubException, InterruptedException, ExecutionException, TimeoutException { + final int partitionCount = ehClient.getRuntimeInformation().get().getPartitionCount(); + final String partitionKey = UUID.randomUUID().toString(); + CompletableFuture validateSignal = new CompletableFuture<>(); + PartitionKeyValidator validator = new PartitionKeyValidator(validateSignal, partitionKey, 1); + for (int receiversCount = 0; receiversCount < partitionCount; receiversCount++) { + final PartitionReceiver receiver = ehClient.createReceiverSync(cgName, Integer.toString(receiversCount), EventPosition.fromEnqueuedTime(Instant.now())); + receivers.add(receiver); + + // run out of messages in that specific partition - to account for clock-skew with Instant.now() on test machine vs eventhubs service + receiver.setReceiveTimeout(Duration.ofSeconds(5)); + Iterable clockSkewEvents; + do { + clockSkewEvents = receiver.receiveSync(100); + } while (clockSkewEvents != null && clockSkewEvents.iterator().hasNext()); + + receiver.setReceiveHandler(validator); + } + + ehClient.sendSync(EventData.create("TestMessage".getBytes()), partitionKey); + validateSignal.get(partitionCount * 5, TimeUnit.SECONDS); + } + + @Test + public void sendBatchResultsInSysPropertiesWithPartitionKey() throws EventHubException, InterruptedException, ExecutionException, TimeoutException { + final int batchSize = 20; + final int partitionCount = ehClient.getRuntimeInformation().get().getPartitionCount(); + final String partitionKey = UUID.randomUUID().toString(); + CompletableFuture validateSignal = new CompletableFuture<>(); + PartitionKeyValidator validator = new PartitionKeyValidator(validateSignal, partitionKey, batchSize); + for (int receiversCount = 0; receiversCount < partitionCount; receiversCount++) { + final PartitionReceiver receiver = ehClient.createReceiverSync(cgName, Integer.toString(receiversCount), EventPosition.fromEnqueuedTime(Instant.now())); + receivers.add(receiver); + + // run out of messages in that specific partition - to account for clock-skew with Instant.now() on test machine vs eventhubs service + receiver.setReceiveTimeout(Duration.ofSeconds(5)); + Iterable clockSkewEvents; + do { + clockSkewEvents = receiver.receiveSync(100); + } while (clockSkewEvents != null && clockSkewEvents.iterator().hasNext()); + + receiver.setReceiveHandler(validator); + } + + List events = new LinkedList<>(); + for (int index = 0; index < batchSize; index++) + events.add(EventData.create("TestMessage".getBytes())); + + ehClient.sendSync(events, partitionKey); + validateSignal.get(partitionCount * 5, TimeUnit.SECONDS); + } + + @After + public void cleanup() throws EventHubException { + if (sender != null) { + sender.closeSync(); + sender = null; + } + + if (receivers != null && !receivers.isEmpty()) { + for (PartitionReceiver receiver : receivers) + receiver.closeSync(); + + receivers.clear(); + } + } + + public static class PartitionKeyValidator implements PartitionReceiveHandler { + final CompletableFuture validateSignal; + final String partitionKey; + final int eventCount; + int currentEventCount = 0; + + protected PartitionKeyValidator(final CompletableFuture validateSignal, final String partitionKey, final int eventCount) { + this.validateSignal = validateSignal; + this.partitionKey = partitionKey; + this.eventCount = eventCount; + } + + @Override + public int getMaxEventCount() { + return 50; + } + + @Override + public void onReceive(Iterable events) { + if (events != null && events.iterator().hasNext()) { + for (EventData event : events) { + if (!partitionKey.equals(event.getSystemProperties().getPartitionKey())) + this.validateSignal.completeExceptionally( + new AssertionFailedError(String.format("received partitionKey: %s, expected partitionKey: %s", event.getSystemProperties().getPartitionKey(), partitionKey))); + + this.currentEventCount++; + } + + if (this.currentEventCount == this.eventCount) + this.validateSignal.complete(null); + } + } + + @Override + public void onError(Throwable error) { + this.validateSignal.completeExceptionally(error); + } + } + + public static class OrderValidator implements PartitionReceiveHandler { + final CompletableFuture validateSignal; + final int netEventCount; + + int currentCount = 0; + + public OrderValidator(final CompletableFuture validateSignal, final int netEventCount) { + this.validateSignal = validateSignal; + this.netEventCount = netEventCount; + } + + @Override + public int getMaxEventCount() { + return 100; + } + + @Override + public void onReceive(Iterable events) { + if (events != null) + for (EventData event : events) { + final int currentEventOrder = (int) event.getProperties().get(ORDER_PROPERTY); + if (currentEventOrder != currentCount) + this.validateSignal.completeExceptionally(new AssertionError(String.format("expected %s, got %s", currentCount, currentEventOrder))); + + currentCount++; + } + + if (currentCount >= netEventCount) + this.validateSignal.complete(null); + } + + @Override + public void onError(Throwable error) { + this.validateSignal.completeExceptionally(error); + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/SetPrefetchCountTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/SetPrefetchCountTest.java new file mode 100644 index 0000000000000..1a39f3ab353aa --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/SetPrefetchCountTest.java @@ -0,0 +1,87 @@ +package com.microsoft.azure.eventhubs.sendrecv; + +import com.microsoft.azure.eventhubs.*; +import com.microsoft.azure.eventhubs.lib.ApiTestBase; +import com.microsoft.azure.eventhubs.lib.TestBase; +import com.microsoft.azure.eventhubs.lib.TestContext; +import org.junit.*; + +import java.time.Duration; +import java.util.LinkedList; + +public class SetPrefetchCountTest extends ApiTestBase { + static final String CONSUMER_GROUP_NAME = TestContext.getConsumerGroupName(); + static final String PARTITION_ID = "0"; + + // since we cannot test receiving very large prefetch like 100000 - in a unit test + // defaultPrefetchCount * 3 was chosen + static final int EVENT_COUNT = PartitionReceiver.DEFAULT_PREFETCH_COUNT * 3; + + static final int MAX_RETRY_TO_DECLARE_RECEIVE_STUCK = 3; + + static EventHubClient ehClient; + + PartitionReceiver testReceiver = null; + + @BeforeClass + public static void initializeEventHub() throws Exception { + final ConnectionStringBuilder connectionString = TestContext.getConnectionString(); + ehClient = EventHubClient.createSync(connectionString.toString(), TestContext.EXECUTOR_SERVICE); + TestBase.pushEventsToPartition(ehClient, PARTITION_ID, EVENT_COUNT).get(); + } + + @AfterClass() + public static void cleanup() throws EventHubException { + if (ehClient != null) { + ehClient.closeSync(); + } + } + + @Test() + public void testSetPrefetchCountToLargeValue() throws EventHubException { + ReceiverOptions options = new ReceiverOptions(); + options.setPrefetchCount(2000); + testReceiver = ehClient.createReceiverSync(CONSUMER_GROUP_NAME, PARTITION_ID, EventPosition.fromStartOfStream(), options); + testReceiver.setReceiveTimeout(Duration.ofSeconds(2)); + int eventsReceived = 0; + int retryCount = 0; + while (eventsReceived < EVENT_COUNT && retryCount < MAX_RETRY_TO_DECLARE_RECEIVE_STUCK) { + final Iterable events = testReceiver.receiveSync(EVENT_COUNT); + if (events == null || !events.iterator().hasNext()) { + retryCount++; + } else { + eventsReceived += ((LinkedList) events).size(); + } + } + + Assert.assertTrue(eventsReceived >= EVENT_COUNT); + } + + @Test() + public void testSetPrefetchCountToSmallValue() throws EventHubException { + ReceiverOptions options = new ReceiverOptions(); + options.setPrefetchCount(11); + testReceiver = ehClient.createReceiverSync(CONSUMER_GROUP_NAME, PARTITION_ID, EventPosition.fromStartOfStream(), options); + testReceiver.setReceiveTimeout(Duration.ofSeconds(2)); + int eventsReceived = 0; + int retryCount = 0; + while (eventsReceived < EVENT_COUNT && retryCount < MAX_RETRY_TO_DECLARE_RECEIVE_STUCK) { + final Iterable events = testReceiver.receiveSync(10); + if (events == null || !events.iterator().hasNext()) { + retryCount++; + } else { + eventsReceived += ((LinkedList) events).size(); + } + } + + Assert.assertTrue(eventsReceived >= EVENT_COUNT); + } + + @After + public void testCleanup() throws EventHubException { + + if (testReceiver != null) { + testReceiver.closeSync(); + } + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/WebSocketsReceiveTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/WebSocketsReceiveTest.java new file mode 100644 index 0000000000000..29783d65e6327 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/WebSocketsReceiveTest.java @@ -0,0 +1,45 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.sendrecv; + +import com.microsoft.azure.eventhubs.ConnectionStringBuilder; +import com.microsoft.azure.eventhubs.EventHubException; +import com.microsoft.azure.eventhubs.TransportType; +import com.microsoft.azure.eventhubs.lib.SasTokenTestBase; +import com.microsoft.azure.eventhubs.lib.TestContext; +import org.junit.*; + +public class WebSocketsReceiveTest extends SasTokenTestBase { + + private static ReceiveTest receiveTest; + + @BeforeClass + public static void initialize() throws Exception { + + Assert.assertTrue(TestContext.getConnectionString().getSharedAccessSignature() != null + && TestContext.getConnectionString().getSasKey() == null + && TestContext.getConnectionString().getSasKeyName() == null); + + receiveTest = new ReceiveTest(); + ConnectionStringBuilder connectionString = TestContext.getConnectionString(); + connectionString.setTransportType(TransportType.AMQP_WEB_SOCKETS); + ReceiveTest.initializeEventHub(connectionString); + } + + @AfterClass() + public static void cleanup() throws EventHubException { + ReceiveTest.cleanup(); + } + + @Test() + public void testReceiverStartOfStreamFilters() throws EventHubException { + receiveTest.testReceiverStartOfStreamFilters(); + } + + @After + public void testCleanup() throws EventHubException { + receiveTest.testCleanup(); + } +} diff --git a/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/WebSocketsSendTest.java b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/WebSocketsSendTest.java new file mode 100644 index 0000000000000..005e6b115afc4 --- /dev/null +++ b/eventhubs/data-plane/azure-eventhubs/src/test/java/com/microsoft/azure/eventhubs/sendrecv/WebSocketsSendTest.java @@ -0,0 +1,58 @@ +/* + * Copyright (c) Microsoft. All rights reserved. + * Licensed under the MIT license. See LICENSE file in the project root for full license information. + */ +package com.microsoft.azure.eventhubs.sendrecv; + +import com.microsoft.azure.eventhubs.ConnectionStringBuilder; +import com.microsoft.azure.eventhubs.EventHubException; +import com.microsoft.azure.eventhubs.TransportType; +import com.microsoft.azure.eventhubs.lib.SasTokenTestBase; +import com.microsoft.azure.eventhubs.lib.TestContext; +import org.junit.*; + +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; + +public class WebSocketsSendTest extends SasTokenTestBase { + + private static SendTest sendTest; + + @BeforeClass + public static void initialize() throws Exception { + + Assert.assertTrue(TestContext.getConnectionString().getSharedAccessSignature() != null + && TestContext.getConnectionString().getSasKey() == null + && TestContext.getConnectionString().getSasKeyName() == null); + + sendTest = new SendTest(); + + ConnectionStringBuilder connectionString = TestContext.getConnectionString(); + connectionString.setTransportType(TransportType.AMQP_WEB_SOCKETS); + SendTest.initializeEventHub(connectionString); + } + + @AfterClass + public static void cleanupClient() throws EventHubException { + + SendTest.cleanupClient(); + } + + @Test + public void sendBatchRetainsOrderWithinBatch() throws EventHubException, InterruptedException, ExecutionException, TimeoutException { + + sendTest.sendBatchRetainsOrderWithinBatch(); + } + + @Test + public void sendResultsInSysPropertiesWithPartitionKey() throws EventHubException, InterruptedException, ExecutionException, TimeoutException { + + sendTest.sendResultsInSysPropertiesWithPartitionKey(); + } + + @After + public void cleanup() throws EventHubException { + + sendTest.cleanup(); + } +} diff --git a/eventhubs/data-plane/event-hubs.png b/eventhubs/data-plane/event-hubs.png new file mode 100644 index 0000000000000..62f34790bbe32 Binary files /dev/null and b/eventhubs/data-plane/event-hubs.png differ diff --git a/eventhubs/data-plane/pom.xml b/eventhubs/data-plane/pom.xml new file mode 100644 index 0000000000000..c99ba22312426 --- /dev/null +++ b/eventhubs/data-plane/pom.xml @@ -0,0 +1,79 @@ + + + Java libraries for talking to Windows Azure Event Hubs + + 4.0.0 + + com.microsoft.azure + azure-eventhubs-clients + 2.0.0 + pom + + https://github.com/Azure/azure-event-hubs + + + 0.31.0 + 1.1.0 + 4.12 + 1.8.0-alpha2 + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.5 + + 1.8 + 1.8 + true + true + true + + + + + + + HTTP proxy server in java + http://raw.github.com/SreeramGarlapati/jproxy/master/releases + + + + + org.apache.qpid + proton-j + ${proton-j-version} + + + com.microsoft.azure + qpid-proton-j-extensions + ${qpid-proton-j-extensions-version} + + + org.slf4j + slf4j-api + ${slf4j-version} + + + junit + junit + ${junit-version} + test + + + org.jutils.jproxy + jproxy + 0.0.1 + test + + + + + azure-eventhubs + azure-eventhubs-eph + azure-eventhubs-extensions + + + diff --git a/eventhubs/data-plane/readme.md b/eventhubs/data-plane/readme.md new file mode 100644 index 0000000000000..ea52c6191cd04 --- /dev/null +++ b/eventhubs/data-plane/readme.md @@ -0,0 +1,104 @@ +

    + Microsoft Azure Event Hubs +

    + +

    Microsoft Azure Event Hubs Client for Java +

    + + star our repo + + follow on Twitter +

    + +|Branch|Status| +|------|-------------| +|master|[![Build status](https://ci.appveyor.com/api/projects/status/dq8qyu2k3wu2uexd/branch/master?svg=true)](https://ci.appveyor.com/project/sabeegrewal/azure-event-hubs-java/branch/master)| +|dev|[![Build status](https://ci.appveyor.com/api/projects/status/dq8qyu2k3wu2uexd/branch/dev?svg=true)](https://ci.appveyor.com/project/sabeegrewal/azure-event-hubs-java/branch/dev)| + +Azure Event Hubs is a hyper-scale data ingestion service, fully-managed by Microsoft, that enables you to collect, store and process trillions of events from websites, apps, IoT devices, and any stream of data. + +Refer to the [online documentation](https://azure.microsoft.com/services/event-hubs/) to learn more about Event Hubs in general and [General Overview document](Overview.md) for an overview of Event Hubs Client for Java. + +## Using the library + +### Samples + +Code samples are [here](https://github.com/Azure/azure-event-hubs/tree/master/samples/Java). + +### Referencing the library + +Two java packages are released to Maven Central Repository from this GitHub repository. + +#### Microsoft Azure EventHubs Java Client + +This library exposes the send and receive APIs. This library will in turn pull further required dependencies, specifically +the required versions of Apache Qpid Proton-J, and the cryptography library BCPKIX by the Legion of Bouncy Castle. + +|Package|Package Version| +|--------|------------------| +|azure-eventhubs|[![Maven Central](https://maven-badges.herokuapp.com/maven-central/com.microsoft.azure/azure-eventhubs/badge.svg)](https://maven-badges.herokuapp.com/maven-central/com.microsoft.azure/azure-eventhubs) + +```XML + + com.microsoft.azure + azure-eventhubs + 2.0.0 + +``` + +#### Microsoft Azure EventHubs Java Event Processor Host library + +This library exposes an out-of-the-box distributed partition processor for Event Hubs. +It pulls the required versions of Event Hubs, Azure Storage and GSon libraries. + +|Package|Package Version| +|--------|------------------| +|azure-eventhubs-eph|[![Maven Central](https://maven-badges.herokuapp.com/maven-central/com.microsoft.azure/azure-eventhubs-eph/badge.svg)](https://maven-badges.herokuapp.com/maven-central/com.microsoft.azure/azure-eventhubs-eph) + +```XML + + com.microsoft.azure + azure-eventhubs-eph + 2.2.0 + +``` + +## How to provide feedback + +First, if you experience any issues with the runtime behavior of the Azure Event Hubs service, please consider filing a support request +right away. Your options for [getting support are enumerated here](https://azure.microsoft.com/support/options/). In the Azure portal, +you can file a support request from the "Help and support" menu in the upper right hand corner of the page. + +If you find issues in this library or have suggestions for improvement of code or documentation, you can [file an issue in the project's +GitHub repository](https://github.com/Azure/azure-event-hubs/issues) or send across a pull request - see our [Contribution Guidelines](./.github/CONTRIBUTING.md). + +Issues related to runtime behavior of the service, such as sporadic exceptions or apparent service-side performance or reliability issues can not be handled here. + +Generally, if you want to discuss Azure Event Hubs or this client library with the community and the maintainers, you can turn to +[stackoverflow.com under the #azure-eventhub tag](http://stackoverflow.com/questions/tagged/azure-eventhub) or the +[MSDN Service Bus Forum](https://social.msdn.microsoft.com/Forums/en-US/home?forum=servbus). + +## Build & contribute to the library + +You will generally not have to build this client library yourself - this library is available on maven central. +If you have any specific requirement for which you want to contribute or need to generate a SNAPSHOT version, this section is for you. +**Your contributions are welcome and encouraged!** + +We adopted maven build model and strive to keep the project model intuitive enough to developers. +If you need any help with any specific IDE or cannot get the build going in any environment - please open an issue. +Here are few general topics, which we thought developers would need help with: + +### Running Integration tests + +Set the following two Environment variables to be able to run unit tests targeting Microsoft Azure EventHubs service: + + * EVENT_HUB_CONNECTION_STRING - the event hub connection string to which the tests should target. the format of the connection string is: `Endpoint=----NAMESPACE_ENDPOINT------;EntityPath=----EVENTHUB_NAME----;SharedAccessKeyName=----KEY_NAME----;SharedAccessKey=----KEY_VALUE----`. [Here's how to create an Event Hub on Azure Portal and get the connection string](https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-create). + + * EPHTESTSTORAGE - the Microsoft Azure Storage account connection string to use while running EPH tests. The format of the connection string is: `DefaultEndpointsProtocol=https;AccountName=---STORAGE_ACCOUNT_NAME---;AccountKey=---ACCOUNT_KEY---;EndpointSuffix=---ENPOINT_SUFFIX---`. For more details on this visit - [how to create an Azure Storage account connection string](https://docs.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string#create-a-connection-string-for-an-azure-storage-account). + +### Explore the client library with IDEs + +* If you see any Build Errors - make sure the Execution Environment is set to JDK version 1.8 or higher + diff --git a/eventhubs/data-plane/templates/azuredeploy.json b/eventhubs/data-plane/templates/azuredeploy.json new file mode 100644 index 0000000000000..addbca811f986 --- /dev/null +++ b/eventhubs/data-plane/templates/azuredeploy.json @@ -0,0 +1,108 @@ +{ + "$schema": "http://schema.management.azure.com/schemas/2014-04-01-preview/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "namespaceName": { + "type": "string", + "metadata": { + "description": "Name of the EventHub namespace" + } + }, + "eventHubName": { + "type": "string", + "metadata": { + "description": "Name of the Event Hub" + } + }, + "consumerGroupName": { + "type": "string", + "metadata": { + "description": "Name of the Consumer Group" + } + }, + "storageAccountName": { + "type": "string", + "metadata": { + "description": "Name of the storage account that is used by the Event Processor Host" + } + }, + "storageAccountType": { + "type": "string", + "defaultValue": "Standard_LRS", + "allowedValues": [ + "Standard_LRS", + "Standard_GRS", + "Standard_ZRS", + "Premium_LRS" + ], + "metadata": { + "description": "Storage Account type" + } + } + }, + "variables": { + "location": "[resourceGroup().location]", + "apiVersion": "2015-08-01", + "defaultSASKeyName": "RootManageSharedAccessKey", + "authRuleResourceId": "[resourceId('Microsoft.EventHub/namespaces/authorizationRules', parameters('namespaceName'), variables('defaultSASKeyName'))]", + "storageAccountResourceId": "[resourceId('Microsoft.Storage/storageAccounts', parameters('storageAccountName'))]" + }, + "resources": [ + { + "apiVersion": "2015-08-01", + "name": "[parameters('namespaceName')]", + "type": "Microsoft.EventHub/Namespaces", + "location": "[variables('location')]", + "sku": { + "name": "Standard", + "tier": "Standard" + }, + "resources": [ + { + "apiVersion": "2015-08-01", + "name": "[parameters('eventHubName')]", + "type": "EventHubs", + "dependsOn": [ + "[concat('Microsoft.EventHub/namespaces/', parameters('namespaceName'))]" + ], + "properties": { + "path": "[parameters('eventHubName')]" + }, + "resources": [ + { + "apiVersion": "2015-08-01", + "name": "[parameters('consumerGroupName')]", + "type": "ConsumerGroups", + "dependsOn": [ + "[parameters('eventHubName')]" + ], + "properties": {} + } + ] + } + ] + }, + { + "type": "Microsoft.Storage/storageAccounts", + "name": "[parameters('storageAccountName')]", + "apiVersion": "2016-01-01", + "location": "[variables('location')]", + "sku": { + "name": "[parameters('storageAccountType')]" + }, + "kind": "Storage", + "properties": { + } + } + ], + "outputs": { + "NamespaceConnectionString": { + "type": "string", + "value": "[listkeys(variables('authRuleResourceId'), variables('apiVersion')).primaryConnectionString]" + }, + "StorageAccountConnectionString": { + "type": "string", + "value": "[Concat('DefaultEndpointsProtocol=https;AccountName=',parameters('storageAccountName'),';AccountKey=',listKeys(resourceId('Microsoft.Storage/storageAccounts', parameters('storageAccountName')), providers('Microsoft.Storage', 'storageAccounts').apiVersions[0]).keys[0].value)]" + } + } +} \ No newline at end of file diff --git a/pom.client.xml b/pom.client.xml index f7d505269c1ac..8777687a06ecc 100644 --- a/pom.client.xml +++ b/pom.client.xml @@ -821,9 +821,9 @@ + ./azconfig/client ./batch/data-plane ./keyvault/data-plane - ./azconfig/client \ No newline at end of file diff --git a/storage/data-plane/.gitignore b/storage/data-plane/.gitignore new file mode 100644 index 0000000000000..a1120ba310938 --- /dev/null +++ b/storage/data-plane/.gitignore @@ -0,0 +1,15 @@ +target/ +pom.xml.tag +pom.xml.releaseBackup +pom.xml.versionsBackup +pom.xml.next +release.properties +dependency-reduced-pom.xml +buildNumber.properties +.mvn/timing.properties + +# Avoid ignoring Maven wrapper jar file (.jar files are usually ignored) +!/.mvn/wrapper/maven-wrapper.jar + +*.iml +.idea/ diff --git a/storage/data-plane/.travis.yml b/storage/data-plane/.travis.yml new file mode 100644 index 0000000000000..03a53ee3aabf0 --- /dev/null +++ b/storage/data-plane/.travis.yml @@ -0,0 +1,5 @@ +language: groovy +jdk: + - oraclejdk8 +script: + - mvn test \ No newline at end of file diff --git a/storage/data-plane/BreakingChanges.txt b/storage/data-plane/BreakingChanges.txt new file mode 100644 index 0000000000000..d217724e0b61c --- /dev/null +++ b/storage/data-plane/BreakingChanges.txt @@ -0,0 +1,25 @@ +2018.08.11 Version 10.1.0 +* Interfaces for helper types updated to be more consistent throughout the library. All types, with the exception of the options for pipeline factories, use a fluent pattern. +* Removed RetryReader type as it's functionality was moved to be built into the DownloadResponse. RetryReaderOptions are now named ReliableDownloadOptions. +* Restructured the access conditions to be more logically adhere to their respective functions. +* Added support for context parameter on each api to allow communication with the pipeline from the application level + +2018.08.22 Version 10.0.4-rc +* Changed BlobURL.startCopy sourceAccessConditions parameter to be HTTPAccessConditions as lease is not actually supported. +* UploadFromFile now takes an AsynchronousFileChannel. +* UploadByteBuffersToBlockBlob, UploadByteBufferToBlockBlob, and DownloadToBuffer have been removed. +* IPRange fields are now strings. + +2018.08.07 Version 10.0.2-Preview +* Changed BlobListingDetails constructor to take a flag to include deleted blobs. +* Restructured the blob and container listing responses. +* BlockBlobURL.MAX_PUT_BLOCK_BYTES renamed to BlockBlobURL.MAX_STAGE_BLOCK_BYTES. +* Changed the accessConditions parameter to be HTTPAccessConditions instead of BlobAccessConditions, since only http access conditions are supported. + +2018.07.03 Version 10.0.1-Preview +* Created the StorageException type, which deserializes the XML payload in an error response if present and gives access to the ErrorCode header as a property. +* Changed the AppendBlobAccessConditions field types to be Long instead of Int. +* Changed RequestRetryOptions maxTries and tryTimeout fields to be Integer instead of int. 0 is no longer allowed. +* Changed the return type of BlobURL.download to be a DownloadResponse instead of BlobsDownloadResponse for integration with RetryReader. +* Changed CommonRestResponse.lastModifiedTime to be lastModified. +* Changed the dateProperty field in all auto-generated files to be date. \ No newline at end of file diff --git a/storage/data-plane/CONTRIBUTING.md b/storage/data-plane/CONTRIBUTING.md new file mode 100644 index 0000000000000..477d88efc9e55 --- /dev/null +++ b/storage/data-plane/CONTRIBUTING.md @@ -0,0 +1,58 @@ +Hello! Thank you for being interested in contributing to our project! +Please make sure you've followed the instructions provided in the [Azure Projects Contribution Guidelines](http://azure.github.io/guidelines/). +## Project Setup +The Azure Storage development team uses Intellij. However, any preferred IDE or other toolset should be usable. + +### Install +* Java SE 8+ +* [Maven](https://maven.apache.org/install.html) +* Clone the source code from GitHub + +#### IntelliJ Installation +* [IntelliJ](https://www.jetbrains.com/idea/download) +* [Importing project from Maven for IntelliJ](https://www.jetbrains.com/help/idea//2017.1/importing-project-from-maven-model.html) + +#### Eclipse Installation +* [Eclipse](https://eclipse.org/downloads/) +* [Maven plugin for Eclipse](http://www.eclipse.org/m2e/index.html). Some Eclipse packages (ex Eclipse IDE for Java Developers) may come with this plugin already installed. +* Open the project from Eclipse using File->Import->Maven->Existing Maven Projects and navigating to the azure-storage-java folder. Select the listed pom. This imports the source and the test files and downloads the required dependencies via Maven. If you'd like to import the samples, follow the same procedure but navigate to the azure-storage-java\microsoft-azure-storage-samples folder and select that pom. Both projects can be opened at the same time and will be shown in the Package Explorer. + +## Tests + +### Configuration +The only step to configure testing is to set the appropriate environment variables. Create environment variables named "ACCOUNT_NAME" and "ACCOUNT_KEY", holding your Azure storage account name and key respectively. This will satisfy most tests. +To run any tests requiring two accounts (generally those testing copy-related apis), set environment variables "SECONDARY_ACCOUNT_NAME", and "SECONDARY_ACCOUNT_KEY". +To run any tests related to setting blob tiers on block blobs, set environment variables "BLOB_STORAGE_ACCOUNT_NAME" and "BLOB_STORAGE_ACCOUNT_KEY". Note that a GPV2 account is also sufficient here. +To run any tests related to setting blob tiers on page blobs, set environment variables "PREMIUM_ACCOUNT_NAME" and "PREMIUM_ACCOUNT_KEY". +It is valid to use a single account for multiple scenarios; a GPV2 account would work for both the primary account and the blob storage account, for instance. The only restriction is that the primary and secondary accounts must be distinct. + +### Running +To actually run tests, right click on the test class in the Package Explorer or the individual test in the Outline and select Run As->GroovyTest. Alternatively, run mvn test from the command line. +### Testing Features +As you develop a feature, you'll need to write tests to ensure quality. You should also run existing tests related to your change to address any unexpected breaks. + +## Pull Requests + +### Guidelines +The following are the minimum requirements for any pull request that must be met before contributions can be accepted. +* Make sure you've signed the CLA before you start working on any change. +* Discuss any proposed contribution with the team via a GitHub issue **before** starting development. +* Code must be professional quality + * No style issues + * You should strive to mimic the style with which we have written the library + * Clean, well-commented, well-designed code + * Try to limit the number of commits for a feature to 1-2. If you end up having too many we may ask you to squash your changes into fewer commits. +* [ChangeLog.md](ChangeLog.md) needs to be updated describing the new change +* Thoroughly test your feature + +### Branching Policy +Changes should be based on the **dev** branch for non-breaking changes and **dev_breaking** for breaking changes. Do not submit pull requests against master as master is considered publicly released code. Each breaking change should be recorded in [BreakingChanges.md](BreakingChanges.md). + +### Adding Features for Java 8+ +We strive to release each new feature in a backward compatible manner. Therefore, we ask that all contributions be written to work in Java 8 and 9. + +### Review Process +We expect all guidelines to be met before accepting a pull request. As such, we will work with you to address issues we find by leaving comments in your code. Please understand that it may take a few iterations before the code is accepted as we maintain high standards on code quality. Once we feel comfortable with a contribution, we will validate the change and accept the pull request. + + +Thank you for any contributions! Please let the team know if you have any questions or concerns about our contribution policy. \ No newline at end of file diff --git a/storage/data-plane/ChangeLog.txt b/storage/data-plane/ChangeLog.txt new file mode 100644 index 0000000000000..f64365a7a426d --- /dev/null +++ b/storage/data-plane/ChangeLog.txt @@ -0,0 +1,70 @@ +2019.02.15 Version 10.5.0 +* Added uploadFromNonReplayableFlowable to support uploading arbitrary data sources (like network streams) to a block blob. + +2019.01.11 Version 10.4.0 +* Fixed a bug that caused errors when java.io.tempdir has no trailing separator. +* Upgrade autorest-clientruntime dependency to include some bug fixes. + +2018.11.19 Version 10.3.0 +* Added support for SLF4J. +* Included default logging to log warnings and errors to the temp directory by default. +* Fixed a bug in hierarchical listings that would sometimes return incomplete results. +* Included the whole HTTP Request in log statements (except for sensitive authorization information, which is redacted). +* Fixed a bug that made the request property on the response object always null. + +2018.10.29 Version 10.2.0 +* Added overloads which only accept the required parameters. +* Added CopyFromURL, which will do a synchronous server-side copy, meaning the service will not return an HTTP response until it has completed the copy. +* Added support for IProgressReceiver in TransferManager operations. This parameter was previously ignored but is now supported. +* Removed internal dependency on javafx to be compatible with openjdk. +* Fixed a bug that would cause downloading large files with the TransferManager to fail. +* Fixed a bug in BlobURL.download() logic for setting up reliable download. This had the potential to download the wrong range when a download stream was retried. + +2018.09.11 Version 10.1.0 +* Interfaces for helper types updated to be more consistent throughout the library. All types, with the exception of the options for pipeline factories, use a fluent pattern. +* Removed RetryReader type as it's functionality was moved to be built into the DownloadResponse. RetryReaderOptions are now named ReliableDownloadOptions. +* Restructured the access conditions to be more logically adhere to their respective functions. +* Added support for context parameter on each api to allow communication with the pipeline from the application level + +2018.08.22 Version 10.0.4-rc +* Support for the 2017-11-09 REST version. Please see our REST api documentation and blogs for information about the related added features. +* Support for 2018-03-28 REST version. Please see our REST api documentation and blogs for information about the related added features. +* Support for the getAccountInfo api on ServiceURL, ContainerURL, and BlobURL. +* Added support for setting service properties related to static websites. +* Changed BlobURL.startCopy sourceAccessConditions parameter to be HTTPAccessConditions as lease is not actually supported. +* Added methods to TransferManager for conveniently downloading a blob to a file. +* UploadFromFile now takes an AsynchronousFileChannel. +* UploadByteBuffersToBlockBlob, UploadByteBufferToBlockBlob, and DownloadToBuffer have been removed. +* IPRange fields are now strings. +* Fixed retry policy. +* Fixed logging policy. + +2018.08.08 Version 10.0.3-Preview +* Resolved dependency issues + +2018.08.07 Version 10.0.2-Preview +* Support for 2017-07-29 REST version. Please see our REST api documentation and blogs for information about the related added features. +* Support for setting a block blob's tier. +* Added support for soft delete feature. If a delete retention policy is enabled through the set service properties API, then blobs or snapshots can be deleted softly and retained for a specified number of days, before being permanently removed by garbage collection. +* Changed BlobListingDetails constructor to take a flag to include deleted blobs. +* Restructured the blob and container listing responses. +* BlockBlobURL.MAX_PUT_BLOCK_BYTES renamed to BlockBlobURL.MAX_STAGE_BLOCK_BYTES. +* Changed the accessConditions parameter to be HTTPAccessConditions instead of BlobAccessConditions, since only http access conditions are supported. + +2018.07.03 Version 10.0.1-Preview +* Added the RetryReader class to allow for more reliable streaming on large downloads. This is now the return type of blobURL.download +* Fixed a bug that caused generation of signatures to fail at high levels of parallelism. +* Created the StorageException type to give easy access to the ErrorCode, StatusCode, and Message as available for unsuccessful responses. +* Added the StorageErrorCode type for checking against error codes returned by the service. +* Changed the AppendBlobAccessConditions field types to be Long instead of Int. +* Upgraded Netty dependency to allow uploading memory mapped files with https. +* Upgraded the autorest runtime dependency to fix a dependency bug in their package. +* Changed RequestRetryOptions maxTries and tryTimeout fields to be Integer instead of int. 0 is no longer allowed. +* Changed CommonRestResponse.lastModifiedTime to be lastModified. +* Added statusCode property to CommonRestResponse. +* Change dateProperty to be date on all generated types. +* Fixed a bug that prevented proper reset of body stream upon retry. +* Updated the defaults for RequestRetryOptions. + +2018.04.27 Version 10.0.0-preview +* Initial Release. Please see the README and wiki for information on the new design. diff --git a/storage/data-plane/ISSUE_TEMPLATE.md b/storage/data-plane/ISSUE_TEMPLATE.md new file mode 100644 index 0000000000000..1f651a424ba9d --- /dev/null +++ b/storage/data-plane/ISSUE_TEMPLATE.md @@ -0,0 +1,10 @@ +### Which service(blob, file, queue, table) does this issue concern? + + +### Which version of the SDK was used? + + +### What problem was encountered? + + +### Have you found a mitigation/solution? diff --git a/storage/data-plane/LICENSE b/storage/data-plane/LICENSE new file mode 100644 index 0000000000000..21071075c2459 --- /dev/null +++ b/storage/data-plane/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/storage/data-plane/README.md b/storage/data-plane/README.md new file mode 100644 index 0000000000000..5c79212827a35 --- /dev/null +++ b/storage/data-plane/README.md @@ -0,0 +1,188 @@ +# Microsoft Azure Storage SDK v10 for Java + +This project provides a client library in Java that makes it easy to consume Microsoft Azure Storage services. For documentation please see the [Storage API doc page](https://docs.microsoft.com/en-us/java/api/overview/azure/storage/client?view=azure-java-preview) and the [quick start document](https://docs.microsoft.com/en-us/azure/storage/blobs/storage-quickstart-blobs-java-v10). +Please note that this version of the library is a compete overhaul of the current Azure Storage Java Client Library, and is based on the new Storage SDK architecture, also referred to as V10. + +| SDK Name | Version | Description | Maven/API Reference Links | +| ------------- | ------------- | ----------- | ----- | +| [Blob Storage SDK v10 for Java](https://github.com/Azure/azure-storage-java/) | v10.2.0 | The next generation async Storage SDK | [Maven](https://mvnrepository.com/artifact/com.microsoft.azure/azure-storage-blob) - [Reference](https://docs.microsoft.com/en-us/java/api/overview/azure/storage/client?view=azure-java-stable) | +| [Queue Storage SDK v10 for Java](https://github.com/azure/azure-storage-java/tree/New-Storage-SDK-V10-Preview) | V10.0.0-Preview | The next generation async Storage SDK | [Maven](https://mvnrepository.com/artifact/com.microsoft.azure/azure-storage-queue) - [Reference](https://docs.microsoft.com/en-us/java/api/overview/azure/storage/queue?view=azure-java-preview) +| [Storage SDK v8 for Java](https://github.com/azure/azure-storage-java/tree/legacy-master) | v8 | Legacy Storage SDK (sync only) | [Maven](https://mvnrepository.com/artifact/com.microsoft.azure/azure-storage) - [Reference](https://docs.microsoft.com/en-us/java/api/overview/azure/storage_stable?view=azure-java-legacy)| +| [Storage SDK for Android](https://github.com/Azure/azure-storage-android) | v2 | Storage SDK for Android | [Maven](https://mvnrepository.com/artifact/com.microsoft.azure.android/azure-storage-android) - [Reference](http://azure.github.io/azure-storage-android/) +| [Azure Management Libraries for Java](https://github.com/Azure/azure-libraries-for-java) | v1 | Management libraries including Storage Resource Provider APIs | [Maven](https://mvnrepository.com/artifact/com.microsoft.azure/azure-mgmt-resources) - [Reference](http://azure.github.io/azure-storage-android/)| + +## Migrating to V10 + +Migrating to the newest version of the SDK will require a substantial rewrite of any component that interfaces with Azure Storage. Despite this, we feel the benefits offered by this new design are worth it, and we are happy to help with the transition! Please refer to the wiki for information on the core ideas behind the new design and best practices on how to use it effectively. + +# Features + * Blob + * Create/Read/Update/Delete containers + * Create/Read/Update/Delete blobs + * Advanced Blob Operations wrapped in the TransferManager class + * Features new to V10 + * Asynchronous I/O for all operations using the [ReactiveX](https://github.com/ReactiveX/RxJava) framework + * HttpPipeline which enables a high degree of per-request configurability and guaranteed thread safety + * Please see the wiki for more information + * 1-to-1 correlation with the Storage REST API for clarity and simplicity + +# Getting Started + +## Download +### Option 1: Via Maven + +To get the binaries of this library as distributed by Microsoft, ready for use within your project, you can use Maven. + +```xml + + com.microsoft.azure + azure-storage-blob + 10.5.0 + +``` + +### Option 2: Source Via Git + +To get the source code of the SDK via git just type: + + git clone git://github.com/Azure/azure-storage-java.git + cd ./azure-storage-java + mvn compile + +### Option 3: Source Zip + +To download a copy of the source code, click "Download ZIP" on the right side of the page or click [here](https://github.com/Azure/azure-storage-java/archive/master.zip). Unzip and navigate to the microsoft-azure-storage folder. + +## Minimum Requirements + +* Java 1.8+ +* [Jackson-Core](https://github.com/FasterXML/jackson-core) is used for JSON and XML parsing. +* [ReactiveX](https://github.com/ReactiveX/RxJava) is used for reactive, asynchronous IO operations. +* [Autorest-runtime](https://github.com/Azure/autorest-clientruntime-for-java) is used to interact with auto-generated code. +* (Optional) Maven + +The three dependencies, [Jackson-Core](https://github.com/FasterXML/jackson-core), [ReactiveX](https://github.com/ReactiveX/RxJava), and [Autorest-runtime](https://github.com/Azure/autorest-clientruntime-for-java), will be added automatically if Maven is used. Otherwise, please download the jars and add them to your build path. + +## Usage + +To use this SDK to call Microsoft Azure storage services, you need to first [create an account](https://azure.microsoft.com/free). + +Samples are provided in azure-storage/src/test/groovy/com/microsoft/azure/storage/Samples.java. The unit tests in the same directory can also be helpful. + +## Code Sample + +The following is a quick example on how to upload some data to an azure blob and download it back. You may also run the samples in azure-storage/src/test/groovy/com/microsoft/azure/storage/Samples.java. For additional information on using the client libraries to access Azure services see the How To guides for [blobs](http://azure.microsoft.com/en-us/documentation/articles/storage-java-how-to-use-blob-storage/) and the [general documentation](http://azure.microsoft.com/en-us/develop/java/). + +```java +public class Sample { + /** + * This example shows how to start using the Azure Storage Blob SDK for Java. + */ + public void basicExample() throws InvalidKeyException, MalformedURLException { + // From the Azure portal, get your Storage account's name and account key. + String accountName = getAccountName(); + String accountKey = getAccountKey(); + + // Use your Storage account's name and key to create a credential object; this is used to access your account. + SharedKeyCredentials credential = new SharedKeyCredentials(accountName, accountKey); + + /* + Create a request pipeline that is used to process HTTP(S) requests and responses. It requires your accont + credentials. In more advanced scenarios, you can configure telemetry, retry policies, logging, and other + options. Also you can configure multiple pipelines for different scenarios. + */ + HttpPipeline pipeline = StorageURL.createPipeline(credential, new PipelineOptions()); + + /* + From the Azure portal, get your Storage account blob service URL endpoint. + The URL typically looks like this: + */ + URL u = new URL(String.format(Locale.ROOT, "https://%s.blob.core.windows.net", accountName)); + + // Create a ServiceURL objet that wraps the service URL and a request pipeline. + ServiceURL serviceURL = new ServiceURL(u, pipeline); + + // Now you can use the ServiceURL to perform various container and blob operations. + + // This example shows several common operations just to get you started. + + /* + Create a URL that references a to-be-created container in your Azure Storage account. This returns a + ContainerURL object that wraps the container's URL and a request pipeline (inherited from serviceURL). + Note that container names require lowercase. + */ + ContainerURL containerURL = serviceURL.createContainerURL("myjavacontainerbasic"); + + /* + Create a URL that references a to-be-created blob in your Azure Storage account's container. + This returns a BlockBlobURL object that wraps the blob's URl and a request pipeline + (inherited from containerURL). Note that blob names can be mixed case. + */ + BlockBlobURL blobURL = containerURL.createBlockBlobURL("HelloWorld.txt"); + + String data = "Hello world!"; + + // Create the container on the service (with no metadata and no public access) + containerURL.create(null, null) + .flatMap(containersCreateResponse -> + /* + Create the blob with string (plain text) content. + NOTE: It is imperative that the provided length matches the actual length exactly. + */ + blobURL.upload(Flowable.just(ByteBuffer.wrap(data.getBytes())), data.length(), + null, null, null)) + .flatMap(blobsDownloadResponse -> + // Download the blob's content. + blobURL.download(null, null, false)) + .flatMap(blobsDownloadResponse -> + // Verify that the blob data round-tripped correctly. + FlowableUtil.collectBytesInBuffer(blobsDownloadResponse.body(null)) + .doOnSuccess(byteBuffer -> { + if (byteBuffer.compareTo(ByteBuffer.wrap(data.getBytes())) != 0) { + throw new Exception("The downloaded data does not match the uploaded data."); + } + })) + .flatMap(byteBuffer -> + // Delete the blob we created earlier. + blobURL.delete(null, null)) + .flatMap(blobsDeleteResponse -> + // Delete the container we created earlier. + containerURL.delete(null)) + /* + This will synchronize all the above operations. This is strongly discouraged for use in production as + it eliminates the benefits of asynchronous IO. We use it here to enable the sample to complete and + demonstrate its effectiveness. + */ + .blockingGet(); + } +} +``` +## Building + +If building from sources, run mvn compile to build. No build steps are necessary if including the package as a maven dependency. + +## Running tests + +Please refer to CONTRIBUTING.md for information on how to run the tests. + + + +# Need Help? + +Be sure to check out the Microsoft Azure [Developer Forums on MSDN](http://social.msdn.microsoft.com/Forums/windowsazure/en-US/home?forum=windowsazuredata) or the [Developer Forums on Stack Overflow](http://stackoverflow.com/questions/tagged/azure+windows-azure-storage) if you have trouble with the provided code. + +# Contribute Code or Provide Feedback + +If you would like to become an active contributor to this project please follow the instructions provided in [Azure Projects Contribution Guidelines](http://azure.github.io/guidelines/). + +If you encounter any bugs with the library please file an issue in the [Issues](https://github.com/Azure/azure-storage-java/issues) section of the project. + +When sending pull requests, please send non-breaking PRs to the dev branch and breaking changes to the dev_breaking branch. Do not make PRs against master. + +# Learn More + +* [Quick Start with the Azure Storage SDK v10 for Java](https://docs.microsoft.com/en-us/azure/storage/blobs/storage-quickstart-blobs-java-v10) +* [Java API Reference](https://docs.microsoft.com/en-us/java/api/overview/azure/storage/client?view=azure-java-preview) +* [Azure Storage Service](http://azure.microsoft.com/en-us/documentation/services/storage/) +* [Azure Storage Team Blog](http://blogs.msdn.com/b/windowsazurestorage/) +* [Javadoc](http://azure.github.io/azure-storage-java/) diff --git a/storage/data-plane/pom.xml b/storage/data-plane/pom.xml new file mode 100644 index 0000000000000..22b87f9c997e8 --- /dev/null +++ b/storage/data-plane/pom.xml @@ -0,0 +1,194 @@ + + + + 4.0.0 + + com.microsoft.azure + azure-storage-blob + 10.5.0 + + Azure Storage Blob + The Azure Storage Java Blob library. + https://github.com/Azure/azure-storage-java + + + + The MIT License (MIT) + http://opensource.org/licenses/MIT + repo + + + + + scm:git:https://github.com/Azure/azure-storage-java + scm:git:git@github.com:Azure/azure-storage-java.git + HEAD + + + + UTF-8 + + + + + + + microsoft + Microsoft + + + + + + bintray + Groovy Bintray + https://dl.bintray.com/groovy/maven + + never + + + false + + + + + + + com.microsoft.rest.v2 + client-runtime + 2.0.2 + + + org.slf4j + slf4j-api + 1.7.25 + + + junit + junit + 4.12 + test + + + org.spockframework + spock-core + test + 1.1-groovy-2.4 + + + org.codehaus.groovy + groovy-all + 2.4.13 + test + + + cglib + cglib-nodep + 3.2.7 + test + + + org.objenesis + objenesis + 2.6 + test + + + uk.org.lidalia + slf4j-test + 1.2.0 + test + + + + + src/main/java + src/test/java + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.1 + + groovy-eclipse-compiler + -Xlint:unchecked + 1.8 + 1.8 + true + + + + org.codehaus.groovy + groovy-eclipse-compiler + 2.9.3-01 + + + org.codehaus.groovy + groovy-eclipse-batch + 2.4.15-01 + + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.20.1 + + false + + **/Test*.* + **/*Test.* + **/*Tests.* + **/*Samples*.* + + + + + org.apache.maven.plugins + maven-javadoc-plugin + 2.10.1 + + + + + api_1.8 + https://docs.oracle.com/javase/8/docs/api/ + + + + + + + + + apiNote + m + API Note: + + + + + + org.apache.maven.plugins + maven-release-plugin + 2.5.2 + + + org.codehaus.mojo + build-helper-maven-plugin + 3.0.0 + + + + diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/GeneratedAppendBlobs.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/GeneratedAppendBlobs.java new file mode 100644 index 0000000000000..d32f7750a2848 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/GeneratedAppendBlobs.java @@ -0,0 +1,353 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage; + +import com.microsoft.azure.storage.blob.models.AppendBlobAppendBlockResponse; +import com.microsoft.azure.storage.blob.models.AppendBlobCreateResponse; +import com.microsoft.azure.storage.blob.models.AppendPositionAccessConditions; +import com.microsoft.azure.storage.blob.models.BlobHTTPHeaders; +import com.microsoft.azure.storage.blob.models.LeaseAccessConditions; +import com.microsoft.azure.storage.blob.models.ModifiedAccessConditions; +import com.microsoft.azure.storage.blob.models.StorageErrorException; +import com.microsoft.rest.v2.Context; +import com.microsoft.rest.v2.DateTimeRfc1123; +import com.microsoft.rest.v2.RestProxy; +import com.microsoft.rest.v2.ServiceCallback; +import com.microsoft.rest.v2.ServiceFuture; +import com.microsoft.rest.v2.Validator; +import com.microsoft.rest.v2.annotations.BodyParam; +import com.microsoft.rest.v2.annotations.ExpectedResponses; +import com.microsoft.rest.v2.annotations.HeaderParam; +import com.microsoft.rest.v2.annotations.Host; +import com.microsoft.rest.v2.annotations.HostParam; +import com.microsoft.rest.v2.annotations.PUT; +import com.microsoft.rest.v2.annotations.QueryParam; +import com.microsoft.rest.v2.annotations.UnexpectedResponseExceptionType; +import com.microsoft.rest.v2.util.Base64Util; +import io.reactivex.Completable; +import io.reactivex.Flowable; +import io.reactivex.Single; +import io.reactivex.annotations.NonNull; +import java.nio.ByteBuffer; +import java.time.OffsetDateTime; +import java.util.HashMap; +import java.util.Map; + +/** + * An instance of this class provides access to all the operations defined in + * GeneratedAppendBlobs. + */ +public final class GeneratedAppendBlobs { + /** + * The proxy service used to perform REST calls. + */ + private AppendBlobsService service; + + /** + * The service client containing this operation class. + */ + private GeneratedStorageClient client; + + /** + * Initializes an instance of GeneratedAppendBlobs. + * + * @param client the instance of the service client containing this operation class. + */ + public GeneratedAppendBlobs(GeneratedStorageClient client) { + this.service = RestProxy.create(AppendBlobsService.class, client); + this.client = client; + } + + /** + * The interface defining all the services for GeneratedAppendBlobs to be + * used by the proxy service to perform REST calls. + */ + @Host("{url}") + private interface AppendBlobsService { + @PUT("{containerName}/{blob}") + @ExpectedResponses({201}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single create(Context context, @HostParam("url") String url, @QueryParam("timeout") Integer timeout, @HeaderParam("Content-Length") long contentLength, @HeaderParam("x-ms-meta-") Map metadata, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @HeaderParam("x-ms-blob-type") String blobType, @HeaderParam("x-ms-blob-content-type") String blobContentType, @HeaderParam("x-ms-blob-content-encoding") String blobContentEncoding, @HeaderParam("x-ms-blob-content-language") String blobContentLanguage, @HeaderParam("x-ms-blob-content-md5") String blobContentMD5, @HeaderParam("x-ms-blob-cache-control") String blobCacheControl, @HeaderParam("x-ms-blob-content-disposition") String blobContentDisposition, @HeaderParam("x-ms-lease-id") String leaseId, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince, @HeaderParam("If-Match") String ifMatch, @HeaderParam("If-None-Match") String ifNoneMatch); + + @PUT("{containerName}/{blob}") + @ExpectedResponses({201}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single appendBlock(Context context, @HostParam("url") String url, @BodyParam("application/xml; charset=utf-8") Flowable body, @QueryParam("timeout") Integer timeout, @HeaderParam("Content-Length") long contentLength, @HeaderParam("Content-MD5") String transactionalContentMD5, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("comp") String comp, @HeaderParam("x-ms-lease-id") String leaseId, @HeaderParam("x-ms-blob-condition-maxsize") Long maxSize, @HeaderParam("x-ms-blob-condition-appendpos") Long appendPosition, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince, @HeaderParam("If-Match") String ifMatch, @HeaderParam("If-None-Match") String ifNoneMatch); + } + + /** + * The Create Append Blob operation creates a new append blob. + * + * @param context The context to associate with this operation. + * @param contentLength The length of the request. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param blobHTTPHeaders Additional parameters for the operation. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void create(Context context, @NonNull long contentLength, Integer timeout, Map metadata, String requestId, BlobHTTPHeaders blobHTTPHeaders, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + createAsync(context, contentLength, timeout, metadata, requestId, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions).blockingAwait(); + } + + /** + * The Create Append Blob operation creates a new append blob. + * + * @param context The context to associate with this operation. + * @param contentLength The length of the request. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param blobHTTPHeaders Additional parameters for the operation. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture createAsync(Context context, @NonNull long contentLength, Integer timeout, Map metadata, String requestId, BlobHTTPHeaders blobHTTPHeaders, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(createAsync(context, contentLength, timeout, metadata, requestId, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions), serviceCallback); + } + + /** + * The Create Append Blob operation creates a new append blob. + * + * @param context The context to associate with this operation. + * @param contentLength The length of the request. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param blobHTTPHeaders Additional parameters for the operation. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single createWithRestResponseAsync(Context context, @NonNull long contentLength, Integer timeout, Map metadata, String requestId, BlobHTTPHeaders blobHTTPHeaders, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(metadata); + Validator.validate(blobHTTPHeaders); + Validator.validate(leaseAccessConditions); + Validator.validate(modifiedAccessConditions); + final String blobType = "AppendBlob"; + String blobContentType = null; + if (blobHTTPHeaders != null) { + blobContentType = blobHTTPHeaders.blobContentType(); + } + String blobContentEncoding = null; + if (blobHTTPHeaders != null) { + blobContentEncoding = blobHTTPHeaders.blobContentEncoding(); + } + String blobContentLanguage = null; + if (blobHTTPHeaders != null) { + blobContentLanguage = blobHTTPHeaders.blobContentLanguage(); + } + byte[] blobContentMD5 = null; + if (blobHTTPHeaders != null) { + blobContentMD5 = blobHTTPHeaders.blobContentMD5(); + } + String blobCacheControl = null; + if (blobHTTPHeaders != null) { + blobCacheControl = blobHTTPHeaders.blobCacheControl(); + } + String blobContentDisposition = null; + if (blobHTTPHeaders != null) { + blobContentDisposition = blobHTTPHeaders.blobContentDisposition(); + } + String leaseId = null; + if (leaseAccessConditions != null) { + leaseId = leaseAccessConditions.leaseId(); + } + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + OffsetDateTime ifUnmodifiedSince = null; + if (modifiedAccessConditions != null) { + ifUnmodifiedSince = modifiedAccessConditions.ifUnmodifiedSince(); + } + String ifMatch = null; + if (modifiedAccessConditions != null) { + ifMatch = modifiedAccessConditions.ifMatch(); + } + String ifNoneMatch = null; + if (modifiedAccessConditions != null) { + ifNoneMatch = modifiedAccessConditions.ifNoneMatch(); + } + String blobContentMD5Converted = Base64Util.encodeToString(blobContentMD5); + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + DateTimeRfc1123 ifUnmodifiedSinceConverted = null; + if (ifUnmodifiedSince != null) { + ifUnmodifiedSinceConverted = new DateTimeRfc1123(ifUnmodifiedSince); + } + return service.create(context, this.client.url(), timeout, contentLength, metadata, this.client.version(), requestId, blobType, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5Converted, blobCacheControl, blobContentDisposition, leaseId, ifModifiedSinceConverted, ifUnmodifiedSinceConverted, ifMatch, ifNoneMatch); + } + + /** + * The Create Append Blob operation creates a new append blob. + * + * @param context The context to associate with this operation. + * @param contentLength The length of the request. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param blobHTTPHeaders Additional parameters for the operation. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable createAsync(Context context, @NonNull long contentLength, Integer timeout, Map metadata, String requestId, BlobHTTPHeaders blobHTTPHeaders, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + return createWithRestResponseAsync(context, contentLength, timeout, metadata, requestId, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions) + .toCompletable(); + } + + /** + * The Append Block operation commits a new block of data to the end of an existing append blob. The Append Block operation is permitted only if the blob was created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later. + * + * @param context The context to associate with this operation. + * @param body Initial data. + * @param contentLength The length of the request. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param transactionalContentMD5 Specify the transactional md5 for the body, to be validated by the service. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param appendPositionAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void appendBlock(Context context, @NonNull Flowable body, @NonNull long contentLength, Integer timeout, byte[] transactionalContentMD5, String requestId, LeaseAccessConditions leaseAccessConditions, AppendPositionAccessConditions appendPositionAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + appendBlockAsync(context, body, contentLength, timeout, transactionalContentMD5, requestId, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions).blockingAwait(); + } + + /** + * The Append Block operation commits a new block of data to the end of an existing append blob. The Append Block operation is permitted only if the blob was created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later. + * + * @param context The context to associate with this operation. + * @param body Initial data. + * @param contentLength The length of the request. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param transactionalContentMD5 Specify the transactional md5 for the body, to be validated by the service. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param appendPositionAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture appendBlockAsync(Context context, @NonNull Flowable body, @NonNull long contentLength, Integer timeout, byte[] transactionalContentMD5, String requestId, LeaseAccessConditions leaseAccessConditions, AppendPositionAccessConditions appendPositionAccessConditions, ModifiedAccessConditions modifiedAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(appendBlockAsync(context, body, contentLength, timeout, transactionalContentMD5, requestId, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions), serviceCallback); + } + + /** + * The Append Block operation commits a new block of data to the end of an existing append blob. The Append Block operation is permitted only if the blob was created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later. + * + * @param context The context to associate with this operation. + * @param body Initial data. + * @param contentLength The length of the request. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param transactionalContentMD5 Specify the transactional md5 for the body, to be validated by the service. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param appendPositionAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single appendBlockWithRestResponseAsync(Context context, @NonNull Flowable body, @NonNull long contentLength, Integer timeout, byte[] transactionalContentMD5, String requestId, LeaseAccessConditions leaseAccessConditions, AppendPositionAccessConditions appendPositionAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (body == null) { + throw new IllegalArgumentException("Parameter body is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(leaseAccessConditions); + Validator.validate(appendPositionAccessConditions); + Validator.validate(modifiedAccessConditions); + final String comp = "appendblock"; + String leaseId = null; + if (leaseAccessConditions != null) { + leaseId = leaseAccessConditions.leaseId(); + } + Long maxSize = null; + if (appendPositionAccessConditions != null) { + maxSize = appendPositionAccessConditions.maxSize(); + } + Long appendPosition = null; + if (appendPositionAccessConditions != null) { + appendPosition = appendPositionAccessConditions.appendPosition(); + } + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + OffsetDateTime ifUnmodifiedSince = null; + if (modifiedAccessConditions != null) { + ifUnmodifiedSince = modifiedAccessConditions.ifUnmodifiedSince(); + } + String ifMatch = null; + if (modifiedAccessConditions != null) { + ifMatch = modifiedAccessConditions.ifMatch(); + } + String ifNoneMatch = null; + if (modifiedAccessConditions != null) { + ifNoneMatch = modifiedAccessConditions.ifNoneMatch(); + } + String transactionalContentMD5Converted = Base64Util.encodeToString(transactionalContentMD5); + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + DateTimeRfc1123 ifUnmodifiedSinceConverted = null; + if (ifUnmodifiedSince != null) { + ifUnmodifiedSinceConverted = new DateTimeRfc1123(ifUnmodifiedSince); + } + return service.appendBlock(context, this.client.url(), body, timeout, contentLength, transactionalContentMD5Converted, this.client.version(), requestId, comp, leaseId, maxSize, appendPosition, ifModifiedSinceConverted, ifUnmodifiedSinceConverted, ifMatch, ifNoneMatch); + } + + /** + * The Append Block operation commits a new block of data to the end of an existing append blob. The Append Block operation is permitted only if the blob was created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later. + * + * @param context The context to associate with this operation. + * @param body Initial data. + * @param contentLength The length of the request. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param transactionalContentMD5 Specify the transactional md5 for the body, to be validated by the service. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param appendPositionAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable appendBlockAsync(Context context, @NonNull Flowable body, @NonNull long contentLength, Integer timeout, byte[] transactionalContentMD5, String requestId, LeaseAccessConditions leaseAccessConditions, AppendPositionAccessConditions appendPositionAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + return appendBlockWithRestResponseAsync(context, body, contentLength, timeout, transactionalContentMD5, requestId, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions) + .toCompletable(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/GeneratedBlobs.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/GeneratedBlobs.java new file mode 100644 index 0000000000000..e23326b3108cc --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/GeneratedBlobs.java @@ -0,0 +1,1900 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage; + +import com.microsoft.azure.storage.blob.models.AccessTier; +import com.microsoft.azure.storage.blob.models.BlobAbortCopyFromURLResponse; +import com.microsoft.azure.storage.blob.models.BlobAcquireLeaseResponse; +import com.microsoft.azure.storage.blob.models.BlobBreakLeaseResponse; +import com.microsoft.azure.storage.blob.models.BlobChangeLeaseResponse; +import com.microsoft.azure.storage.blob.models.BlobCopyFromURLResponse; +import com.microsoft.azure.storage.blob.models.BlobCreateSnapshotResponse; +import com.microsoft.azure.storage.blob.models.BlobDeleteResponse; +import com.microsoft.azure.storage.blob.models.BlobDownloadResponse; +import com.microsoft.azure.storage.blob.models.BlobGetAccountInfoResponse; +import com.microsoft.azure.storage.blob.models.BlobGetPropertiesResponse; +import com.microsoft.azure.storage.blob.models.BlobHTTPHeaders; +import com.microsoft.azure.storage.blob.models.BlobReleaseLeaseResponse; +import com.microsoft.azure.storage.blob.models.BlobRenewLeaseResponse; +import com.microsoft.azure.storage.blob.models.BlobSetHTTPHeadersResponse; +import com.microsoft.azure.storage.blob.models.BlobSetMetadataResponse; +import com.microsoft.azure.storage.blob.models.BlobSetTierResponse; +import com.microsoft.azure.storage.blob.models.BlobStartCopyFromURLResponse; +import com.microsoft.azure.storage.blob.models.BlobUndeleteResponse; +import com.microsoft.azure.storage.blob.models.DeleteSnapshotsOptionType; +import com.microsoft.azure.storage.blob.models.LeaseAccessConditions; +import com.microsoft.azure.storage.blob.models.ModifiedAccessConditions; +import com.microsoft.azure.storage.blob.models.SourceModifiedAccessConditions; +import com.microsoft.azure.storage.blob.models.StorageErrorException; +import com.microsoft.rest.v2.Context; +import com.microsoft.rest.v2.DateTimeRfc1123; +import com.microsoft.rest.v2.RestProxy; +import com.microsoft.rest.v2.ServiceCallback; +import com.microsoft.rest.v2.ServiceFuture; +import com.microsoft.rest.v2.Validator; +import com.microsoft.rest.v2.annotations.DELETE; +import com.microsoft.rest.v2.annotations.ExpectedResponses; +import com.microsoft.rest.v2.annotations.GET; +import com.microsoft.rest.v2.annotations.HEAD; +import com.microsoft.rest.v2.annotations.HeaderParam; +import com.microsoft.rest.v2.annotations.Host; +import com.microsoft.rest.v2.annotations.HostParam; +import com.microsoft.rest.v2.annotations.PUT; +import com.microsoft.rest.v2.annotations.QueryParam; +import com.microsoft.rest.v2.annotations.UnexpectedResponseExceptionType; +import com.microsoft.rest.v2.util.Base64Util; +import io.reactivex.Completable; +import io.reactivex.Flowable; +import io.reactivex.Maybe; +import io.reactivex.Single; +import io.reactivex.annotations.NonNull; +import java.net.URL; +import java.nio.ByteBuffer; +import java.time.OffsetDateTime; +import java.util.HashMap; +import java.util.Map; + +/** + * An instance of this class provides access to all the operations defined in + * GeneratedBlobs. + */ +public final class GeneratedBlobs { + /** + * The proxy service used to perform REST calls. + */ + private BlobsService service; + + /** + * The service client containing this operation class. + */ + private GeneratedStorageClient client; + + /** + * Initializes an instance of GeneratedBlobs. + * + * @param client the instance of the service client containing this operation class. + */ + public GeneratedBlobs(GeneratedStorageClient client) { + this.service = RestProxy.create(BlobsService.class, client); + this.client = client; + } + + /** + * The interface defining all the services for GeneratedBlobs to be used by + * the proxy service to perform REST calls. + */ + @Host("{url}") + private interface BlobsService { + @GET("{containerName}/{blob}") + @ExpectedResponses({200, 206}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single download(Context context, @HostParam("url") String url, @QueryParam("snapshot") String snapshot, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-range") String range, @HeaderParam("x-ms-range-get-content-md5") Boolean rangeGetContentMD5, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @HeaderParam("x-ms-lease-id") String leaseId, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince, @HeaderParam("If-Match") String ifMatch, @HeaderParam("If-None-Match") String ifNoneMatch); + + @HEAD("{containerName}/{blob}") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single getProperties(Context context, @HostParam("url") String url, @QueryParam("snapshot") String snapshot, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @HeaderParam("x-ms-lease-id") String leaseId, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince, @HeaderParam("If-Match") String ifMatch, @HeaderParam("If-None-Match") String ifNoneMatch); + + @DELETE("{containerName}/{blob}") + @ExpectedResponses({202}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single delete(Context context, @HostParam("url") String url, @QueryParam("snapshot") String snapshot, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-delete-snapshots") DeleteSnapshotsOptionType deleteSnapshots, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @HeaderParam("x-ms-lease-id") String leaseId, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince, @HeaderParam("If-Match") String ifMatch, @HeaderParam("If-None-Match") String ifNoneMatch); + + @PUT("{containerName}/{blob}") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single undelete(Context context, @HostParam("url") String url, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("comp") String comp); + + @PUT("{containerName}/{blob}") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single setHTTPHeaders(Context context, @HostParam("url") String url, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("comp") String comp, @HeaderParam("x-ms-blob-cache-control") String blobCacheControl, @HeaderParam("x-ms-blob-content-type") String blobContentType, @HeaderParam("x-ms-blob-content-md5") String blobContentMD5, @HeaderParam("x-ms-blob-content-encoding") String blobContentEncoding, @HeaderParam("x-ms-blob-content-language") String blobContentLanguage, @HeaderParam("x-ms-blob-content-disposition") String blobContentDisposition, @HeaderParam("x-ms-lease-id") String leaseId, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince, @HeaderParam("If-Match") String ifMatch, @HeaderParam("If-None-Match") String ifNoneMatch); + + @PUT("{containerName}/{blob}") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single setMetadata(Context context, @HostParam("url") String url, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-meta-") Map metadata, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("comp") String comp, @HeaderParam("x-ms-lease-id") String leaseId, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince, @HeaderParam("If-Match") String ifMatch, @HeaderParam("If-None-Match") String ifNoneMatch); + + @PUT("{containerName}/{blob}") + @ExpectedResponses({201}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single acquireLease(Context context, @HostParam("url") String url, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-lease-duration") Integer duration, @HeaderParam("x-ms-proposed-lease-id") String proposedLeaseId, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("comp") String comp, @HeaderParam("x-ms-lease-action") String action, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince, @HeaderParam("If-Match") String ifMatch, @HeaderParam("If-None-Match") String ifNoneMatch); + + @PUT("{containerName}/{blob}") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single releaseLease(Context context, @HostParam("url") String url, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-lease-id") String leaseId, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("comp") String comp, @HeaderParam("x-ms-lease-action") String action, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince, @HeaderParam("If-Match") String ifMatch, @HeaderParam("If-None-Match") String ifNoneMatch); + + @PUT("{containerName}/{blob}") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single renewLease(Context context, @HostParam("url") String url, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-lease-id") String leaseId, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("comp") String comp, @HeaderParam("x-ms-lease-action") String action, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince, @HeaderParam("If-Match") String ifMatch, @HeaderParam("If-None-Match") String ifNoneMatch); + + @PUT("{containerName}/{blob}") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single changeLease(Context context, @HostParam("url") String url, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-lease-id") String leaseId, @HeaderParam("x-ms-proposed-lease-id") String proposedLeaseId, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("comp") String comp, @HeaderParam("x-ms-lease-action") String action, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince, @HeaderParam("If-Match") String ifMatch, @HeaderParam("If-None-Match") String ifNoneMatch); + + @PUT("{containerName}/{blob}") + @ExpectedResponses({202}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single breakLease(Context context, @HostParam("url") String url, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-lease-break-period") Integer breakPeriod, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("comp") String comp, @HeaderParam("x-ms-lease-action") String action, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince, @HeaderParam("If-Match") String ifMatch, @HeaderParam("If-None-Match") String ifNoneMatch); + + @PUT("{containerName}/{blob}") + @ExpectedResponses({201}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single createSnapshot(Context context, @HostParam("url") String url, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-meta-") Map metadata, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("comp") String comp, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince, @HeaderParam("If-Match") String ifMatch, @HeaderParam("If-None-Match") String ifNoneMatch, @HeaderParam("x-ms-lease-id") String leaseId); + + @PUT("{containerName}/{blob}") + @ExpectedResponses({202}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single startCopyFromURL(Context context, @HostParam("url") String url, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-meta-") Map metadata, @HeaderParam("x-ms-copy-source") URL copySource, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @HeaderParam("x-ms-source-if-modified-since") DateTimeRfc1123 sourceIfModifiedSince, @HeaderParam("x-ms-source-if-unmodified-since") DateTimeRfc1123 sourceIfUnmodifiedSince, @HeaderParam("x-ms-source-if-match") String sourceIfMatch, @HeaderParam("x-ms-source-if-none-match") String sourceIfNoneMatch, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince, @HeaderParam("If-Match") String ifMatch, @HeaderParam("If-None-Match") String ifNoneMatch, @HeaderParam("x-ms-lease-id") String leaseId); + + @PUT("{containerName}/{blob}") + @ExpectedResponses({202}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single copyFromURL(Context context, @HostParam("url") String url, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-meta-") Map metadata, @HeaderParam("x-ms-copy-source") URL copySource, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @HeaderParam("x-ms-requires-sync") String xMsRequiresSync, @HeaderParam("x-ms-source-if-modified-since") DateTimeRfc1123 sourceIfModifiedSince, @HeaderParam("x-ms-source-if-unmodified-since") DateTimeRfc1123 sourceIfUnmodifiedSince, @HeaderParam("x-ms-source-if-match") String sourceIfMatch, @HeaderParam("x-ms-source-if-none-match") String sourceIfNoneMatch, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince, @HeaderParam("If-Match") String ifMatch, @HeaderParam("If-None-Match") String ifNoneMatch, @HeaderParam("x-ms-lease-id") String leaseId); + + @PUT("{containerName}/{blob}") + @ExpectedResponses({204}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single abortCopyFromURL(Context context, @HostParam("url") String url, @QueryParam("copyid") String copyId, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("comp") String comp, @HeaderParam("x-ms-copy-action") String copyActionAbortConstant, @HeaderParam("x-ms-lease-id") String leaseId); + + @PUT("{containerName}/{blob}") + @ExpectedResponses({200, 202}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single setTier(Context context, @HostParam("url") String url, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-access-tier") AccessTier tier, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("comp") String comp, @HeaderParam("x-ms-lease-id") String leaseId); + + @GET("{containerName}/{blobName}") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single getAccountInfo(Context context, @HostParam("url") String url, @HeaderParam("x-ms-version") String version, @QueryParam("restype") String restype, @QueryParam("comp") String comp); + } + + /** + * The Download operation reads or downloads a blob from the system, including its metadata and properties. You can also call Download to read a snapshot. + * + * @param context The context to associate with this operation. + * @param snapshot The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob.</a>. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param range Return only the bytes of the blob in the specified range. + * @param rangeGetContentMD5 When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB in size. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the Flowable<ByteBuffer> object if successful. + */ + public Flowable download(Context context, String snapshot, Integer timeout, String range, Boolean rangeGetContentMD5, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + return downloadAsync(context, snapshot, timeout, range, rangeGetContentMD5, requestId, leaseAccessConditions, modifiedAccessConditions).blockingGet(); + } + + /** + * The Download operation reads or downloads a blob from the system, including its metadata and properties. You can also call Download to read a snapshot. + * + * @param context The context to associate with this operation. + * @param snapshot The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob.</a>. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param range Return only the bytes of the blob in the specified range. + * @param rangeGetContentMD5 When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB in size. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture> downloadAsync(Context context, String snapshot, Integer timeout, String range, Boolean rangeGetContentMD5, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions, ServiceCallback> serviceCallback) { + return ServiceFuture.fromBody(downloadAsync(context, snapshot, timeout, range, rangeGetContentMD5, requestId, leaseAccessConditions, modifiedAccessConditions), serviceCallback); + } + + /** + * The Download operation reads or downloads a blob from the system, including its metadata and properties. You can also call Download to read a snapshot. + * + * @param context The context to associate with this operation. + * @param snapshot The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob.</a>. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param range Return only the bytes of the blob in the specified range. + * @param rangeGetContentMD5 When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB in size. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single downloadWithRestResponseAsync(Context context, String snapshot, Integer timeout, String range, Boolean rangeGetContentMD5, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(leaseAccessConditions); + Validator.validate(modifiedAccessConditions); + String leaseId = null; + if (leaseAccessConditions != null) { + leaseId = leaseAccessConditions.leaseId(); + } + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + OffsetDateTime ifUnmodifiedSince = null; + if (modifiedAccessConditions != null) { + ifUnmodifiedSince = modifiedAccessConditions.ifUnmodifiedSince(); + } + String ifMatch = null; + if (modifiedAccessConditions != null) { + ifMatch = modifiedAccessConditions.ifMatch(); + } + String ifNoneMatch = null; + if (modifiedAccessConditions != null) { + ifNoneMatch = modifiedAccessConditions.ifNoneMatch(); + } + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + DateTimeRfc1123 ifUnmodifiedSinceConverted = null; + if (ifUnmodifiedSince != null) { + ifUnmodifiedSinceConverted = new DateTimeRfc1123(ifUnmodifiedSince); + } + return service.download(context, this.client.url(), snapshot, timeout, range, rangeGetContentMD5, this.client.version(), requestId, leaseId, ifModifiedSinceConverted, ifUnmodifiedSinceConverted, ifMatch, ifNoneMatch); + } + + /** + * The Download operation reads or downloads a blob from the system, including its metadata and properties. You can also call Download to read a snapshot. + * + * @param context The context to associate with this operation. + * @param snapshot The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob.</a>. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param range Return only the bytes of the blob in the specified range. + * @param rangeGetContentMD5 When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB in size. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Maybe> downloadAsync(Context context, String snapshot, Integer timeout, String range, Boolean rangeGetContentMD5, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + return downloadWithRestResponseAsync(context, snapshot, timeout, range, rangeGetContentMD5, requestId, leaseAccessConditions, modifiedAccessConditions) + .flatMapMaybe((BlobDownloadResponse res) -> res.body() == null ? Maybe.empty() : Maybe.just(res.body())); + } + + /** + * The Get Properties operation returns all user-defined metadata, standard HTTP properties, and system properties for the blob. It does not return the content of the blob. + * + * @param context The context to associate with this operation. + * @param snapshot The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob.</a>. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void getProperties(Context context, String snapshot, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + getPropertiesAsync(context, snapshot, timeout, requestId, leaseAccessConditions, modifiedAccessConditions).blockingAwait(); + } + + /** + * The Get Properties operation returns all user-defined metadata, standard HTTP properties, and system properties for the blob. It does not return the content of the blob. + * + * @param context The context to associate with this operation. + * @param snapshot The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob.</a>. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture getPropertiesAsync(Context context, String snapshot, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(getPropertiesAsync(context, snapshot, timeout, requestId, leaseAccessConditions, modifiedAccessConditions), serviceCallback); + } + + /** + * The Get Properties operation returns all user-defined metadata, standard HTTP properties, and system properties for the blob. It does not return the content of the blob. + * + * @param context The context to associate with this operation. + * @param snapshot The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob.</a>. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single getPropertiesWithRestResponseAsync(Context context, String snapshot, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(leaseAccessConditions); + Validator.validate(modifiedAccessConditions); + String leaseId = null; + if (leaseAccessConditions != null) { + leaseId = leaseAccessConditions.leaseId(); + } + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + OffsetDateTime ifUnmodifiedSince = null; + if (modifiedAccessConditions != null) { + ifUnmodifiedSince = modifiedAccessConditions.ifUnmodifiedSince(); + } + String ifMatch = null; + if (modifiedAccessConditions != null) { + ifMatch = modifiedAccessConditions.ifMatch(); + } + String ifNoneMatch = null; + if (modifiedAccessConditions != null) { + ifNoneMatch = modifiedAccessConditions.ifNoneMatch(); + } + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + DateTimeRfc1123 ifUnmodifiedSinceConverted = null; + if (ifUnmodifiedSince != null) { + ifUnmodifiedSinceConverted = new DateTimeRfc1123(ifUnmodifiedSince); + } + return service.getProperties(context, this.client.url(), snapshot, timeout, this.client.version(), requestId, leaseId, ifModifiedSinceConverted, ifUnmodifiedSinceConverted, ifMatch, ifNoneMatch); + } + + /** + * The Get Properties operation returns all user-defined metadata, standard HTTP properties, and system properties for the blob. It does not return the content of the blob. + * + * @param context The context to associate with this operation. + * @param snapshot The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob.</a>. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable getPropertiesAsync(Context context, String snapshot, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + return getPropertiesWithRestResponseAsync(context, snapshot, timeout, requestId, leaseAccessConditions, modifiedAccessConditions) + .toCompletable(); + } + + /** + * If the storage account's soft delete feature is disabled then, when a blob is deleted, it is permanently removed from the storage account. If the storage account's soft delete feature is enabled, then, when a blob is deleted, it is marked for deletion and becomes inaccessible immediately. However, the blob service retains the blob or snapshot for the number of days specified by the DeleteRetentionPolicy section of [Storage service properties] (Set-Blob-Service-Properties.md). After the specified number of days has passed, the blob's data is permanently removed from the storage account. Note that you continue to be charged for the soft-deleted blob's storage until it is permanently removed. Use the List Blobs API and specify the "include=deleted" query parameter to discover which blobs and snapshots have been soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. All other operations on a soft-deleted blob or snapshot causes the service to return an HTTP status code of 404 (ResourceNotFound). + * + * @param context The context to associate with this operation. + * @param snapshot The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob.</a>. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param deleteSnapshots Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob and all of its snapshots. only: Delete only the blob's snapshots and not the blob itself. Possible values include: 'include', 'only'. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void delete(Context context, String snapshot, Integer timeout, DeleteSnapshotsOptionType deleteSnapshots, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + deleteAsync(context, snapshot, timeout, deleteSnapshots, requestId, leaseAccessConditions, modifiedAccessConditions).blockingAwait(); + } + + /** + * If the storage account's soft delete feature is disabled then, when a blob is deleted, it is permanently removed from the storage account. If the storage account's soft delete feature is enabled, then, when a blob is deleted, it is marked for deletion and becomes inaccessible immediately. However, the blob service retains the blob or snapshot for the number of days specified by the DeleteRetentionPolicy section of [Storage service properties] (Set-Blob-Service-Properties.md). After the specified number of days has passed, the blob's data is permanently removed from the storage account. Note that you continue to be charged for the soft-deleted blob's storage until it is permanently removed. Use the List Blobs API and specify the "include=deleted" query parameter to discover which blobs and snapshots have been soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. All other operations on a soft-deleted blob or snapshot causes the service to return an HTTP status code of 404 (ResourceNotFound). + * + * @param context The context to associate with this operation. + * @param snapshot The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob.</a>. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param deleteSnapshots Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob and all of its snapshots. only: Delete only the blob's snapshots and not the blob itself. Possible values include: 'include', 'only'. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture deleteAsync(Context context, String snapshot, Integer timeout, DeleteSnapshotsOptionType deleteSnapshots, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(deleteAsync(context, snapshot, timeout, deleteSnapshots, requestId, leaseAccessConditions, modifiedAccessConditions), serviceCallback); + } + + /** + * If the storage account's soft delete feature is disabled then, when a blob is deleted, it is permanently removed from the storage account. If the storage account's soft delete feature is enabled, then, when a blob is deleted, it is marked for deletion and becomes inaccessible immediately. However, the blob service retains the blob or snapshot for the number of days specified by the DeleteRetentionPolicy section of [Storage service properties] (Set-Blob-Service-Properties.md). After the specified number of days has passed, the blob's data is permanently removed from the storage account. Note that you continue to be charged for the soft-deleted blob's storage until it is permanently removed. Use the List Blobs API and specify the "include=deleted" query parameter to discover which blobs and snapshots have been soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. All other operations on a soft-deleted blob or snapshot causes the service to return an HTTP status code of 404 (ResourceNotFound). + * + * @param context The context to associate with this operation. + * @param snapshot The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob.</a>. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param deleteSnapshots Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob and all of its snapshots. only: Delete only the blob's snapshots and not the blob itself. Possible values include: 'include', 'only'. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single deleteWithRestResponseAsync(Context context, String snapshot, Integer timeout, DeleteSnapshotsOptionType deleteSnapshots, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(leaseAccessConditions); + Validator.validate(modifiedAccessConditions); + String leaseId = null; + if (leaseAccessConditions != null) { + leaseId = leaseAccessConditions.leaseId(); + } + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + OffsetDateTime ifUnmodifiedSince = null; + if (modifiedAccessConditions != null) { + ifUnmodifiedSince = modifiedAccessConditions.ifUnmodifiedSince(); + } + String ifMatch = null; + if (modifiedAccessConditions != null) { + ifMatch = modifiedAccessConditions.ifMatch(); + } + String ifNoneMatch = null; + if (modifiedAccessConditions != null) { + ifNoneMatch = modifiedAccessConditions.ifNoneMatch(); + } + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + DateTimeRfc1123 ifUnmodifiedSinceConverted = null; + if (ifUnmodifiedSince != null) { + ifUnmodifiedSinceConverted = new DateTimeRfc1123(ifUnmodifiedSince); + } + return service.delete(context, this.client.url(), snapshot, timeout, deleteSnapshots, this.client.version(), requestId, leaseId, ifModifiedSinceConverted, ifUnmodifiedSinceConverted, ifMatch, ifNoneMatch); + } + + /** + * If the storage account's soft delete feature is disabled then, when a blob is deleted, it is permanently removed from the storage account. If the storage account's soft delete feature is enabled, then, when a blob is deleted, it is marked for deletion and becomes inaccessible immediately. However, the blob service retains the blob or snapshot for the number of days specified by the DeleteRetentionPolicy section of [Storage service properties] (Set-Blob-Service-Properties.md). After the specified number of days has passed, the blob's data is permanently removed from the storage account. Note that you continue to be charged for the soft-deleted blob's storage until it is permanently removed. Use the List Blobs API and specify the "include=deleted" query parameter to discover which blobs and snapshots have been soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. All other operations on a soft-deleted blob or snapshot causes the service to return an HTTP status code of 404 (ResourceNotFound). + * + * @param context The context to associate with this operation. + * @param snapshot The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob.</a>. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param deleteSnapshots Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob and all of its snapshots. only: Delete only the blob's snapshots and not the blob itself. Possible values include: 'include', 'only'. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable deleteAsync(Context context, String snapshot, Integer timeout, DeleteSnapshotsOptionType deleteSnapshots, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + return deleteWithRestResponseAsync(context, snapshot, timeout, deleteSnapshots, requestId, leaseAccessConditions, modifiedAccessConditions) + .toCompletable(); + } + + /** + * Undelete a blob that was previously soft deleted. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void undelete(Context context, Integer timeout, String requestId) { + undeleteAsync(context, timeout, requestId).blockingAwait(); + } + + /** + * Undelete a blob that was previously soft deleted. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture undeleteAsync(Context context, Integer timeout, String requestId, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(undeleteAsync(context, timeout, requestId), serviceCallback); + } + + /** + * Undelete a blob that was previously soft deleted. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single undeleteWithRestResponseAsync(Context context, Integer timeout, String requestId) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + final String comp = "undelete"; + return service.undelete(context, this.client.url(), timeout, this.client.version(), requestId, comp); + } + + /** + * Undelete a blob that was previously soft deleted. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable undeleteAsync(Context context, Integer timeout, String requestId) { + return undeleteWithRestResponseAsync(context, timeout, requestId) + .toCompletable(); + } + + /** + * The Set HTTP Headers operation sets system properties on the blob. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param blobHTTPHeaders Additional parameters for the operation. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void setHTTPHeaders(Context context, Integer timeout, String requestId, BlobHTTPHeaders blobHTTPHeaders, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + setHTTPHeadersAsync(context, timeout, requestId, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions).blockingAwait(); + } + + /** + * The Set HTTP Headers operation sets system properties on the blob. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param blobHTTPHeaders Additional parameters for the operation. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture setHTTPHeadersAsync(Context context, Integer timeout, String requestId, BlobHTTPHeaders blobHTTPHeaders, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(setHTTPHeadersAsync(context, timeout, requestId, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions), serviceCallback); + } + + /** + * The Set HTTP Headers operation sets system properties on the blob. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param blobHTTPHeaders Additional parameters for the operation. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single setHTTPHeadersWithRestResponseAsync(Context context, Integer timeout, String requestId, BlobHTTPHeaders blobHTTPHeaders, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(blobHTTPHeaders); + Validator.validate(leaseAccessConditions); + Validator.validate(modifiedAccessConditions); + final String comp = "properties"; + String blobCacheControl = null; + if (blobHTTPHeaders != null) { + blobCacheControl = blobHTTPHeaders.blobCacheControl(); + } + String blobContentType = null; + if (blobHTTPHeaders != null) { + blobContentType = blobHTTPHeaders.blobContentType(); + } + byte[] blobContentMD5 = null; + if (blobHTTPHeaders != null) { + blobContentMD5 = blobHTTPHeaders.blobContentMD5(); + } + String blobContentEncoding = null; + if (blobHTTPHeaders != null) { + blobContentEncoding = blobHTTPHeaders.blobContentEncoding(); + } + String blobContentLanguage = null; + if (blobHTTPHeaders != null) { + blobContentLanguage = blobHTTPHeaders.blobContentLanguage(); + } + String blobContentDisposition = null; + if (blobHTTPHeaders != null) { + blobContentDisposition = blobHTTPHeaders.blobContentDisposition(); + } + String leaseId = null; + if (leaseAccessConditions != null) { + leaseId = leaseAccessConditions.leaseId(); + } + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + OffsetDateTime ifUnmodifiedSince = null; + if (modifiedAccessConditions != null) { + ifUnmodifiedSince = modifiedAccessConditions.ifUnmodifiedSince(); + } + String ifMatch = null; + if (modifiedAccessConditions != null) { + ifMatch = modifiedAccessConditions.ifMatch(); + } + String ifNoneMatch = null; + if (modifiedAccessConditions != null) { + ifNoneMatch = modifiedAccessConditions.ifNoneMatch(); + } + String blobContentMD5Converted = Base64Util.encodeToString(blobContentMD5); + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + DateTimeRfc1123 ifUnmodifiedSinceConverted = null; + if (ifUnmodifiedSince != null) { + ifUnmodifiedSinceConverted = new DateTimeRfc1123(ifUnmodifiedSince); + } + return service.setHTTPHeaders(context, this.client.url(), timeout, this.client.version(), requestId, comp, blobCacheControl, blobContentType, blobContentMD5Converted, blobContentEncoding, blobContentLanguage, blobContentDisposition, leaseId, ifModifiedSinceConverted, ifUnmodifiedSinceConverted, ifMatch, ifNoneMatch); + } + + /** + * The Set HTTP Headers operation sets system properties on the blob. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param blobHTTPHeaders Additional parameters for the operation. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable setHTTPHeadersAsync(Context context, Integer timeout, String requestId, BlobHTTPHeaders blobHTTPHeaders, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + return setHTTPHeadersWithRestResponseAsync(context, timeout, requestId, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions) + .toCompletable(); + } + + /** + * The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or more name-value pairs. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void setMetadata(Context context, Integer timeout, Map metadata, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + setMetadataAsync(context, timeout, metadata, requestId, leaseAccessConditions, modifiedAccessConditions).blockingAwait(); + } + + /** + * The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or more name-value pairs. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture setMetadataAsync(Context context, Integer timeout, Map metadata, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(setMetadataAsync(context, timeout, metadata, requestId, leaseAccessConditions, modifiedAccessConditions), serviceCallback); + } + + /** + * The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or more name-value pairs. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single setMetadataWithRestResponseAsync(Context context, Integer timeout, Map metadata, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(metadata); + Validator.validate(leaseAccessConditions); + Validator.validate(modifiedAccessConditions); + final String comp = "metadata"; + String leaseId = null; + if (leaseAccessConditions != null) { + leaseId = leaseAccessConditions.leaseId(); + } + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + OffsetDateTime ifUnmodifiedSince = null; + if (modifiedAccessConditions != null) { + ifUnmodifiedSince = modifiedAccessConditions.ifUnmodifiedSince(); + } + String ifMatch = null; + if (modifiedAccessConditions != null) { + ifMatch = modifiedAccessConditions.ifMatch(); + } + String ifNoneMatch = null; + if (modifiedAccessConditions != null) { + ifNoneMatch = modifiedAccessConditions.ifNoneMatch(); + } + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + DateTimeRfc1123 ifUnmodifiedSinceConverted = null; + if (ifUnmodifiedSince != null) { + ifUnmodifiedSinceConverted = new DateTimeRfc1123(ifUnmodifiedSince); + } + return service.setMetadata(context, this.client.url(), timeout, metadata, this.client.version(), requestId, comp, leaseId, ifModifiedSinceConverted, ifUnmodifiedSinceConverted, ifMatch, ifNoneMatch); + } + + /** + * The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or more name-value pairs. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable setMetadataAsync(Context context, Integer timeout, Map metadata, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + return setMetadataWithRestResponseAsync(context, timeout, metadata, requestId, leaseAccessConditions, modifiedAccessConditions) + .toCompletable(); + } + + /** + * [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param duration Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease duration cannot be changed using renew or change. + * @param proposedLeaseId Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID string formats. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void acquireLease(Context context, Integer timeout, Integer duration, String proposedLeaseId, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + acquireLeaseAsync(context, timeout, duration, proposedLeaseId, requestId, modifiedAccessConditions).blockingAwait(); + } + + /** + * [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param duration Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease duration cannot be changed using renew or change. + * @param proposedLeaseId Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID string formats. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture acquireLeaseAsync(Context context, Integer timeout, Integer duration, String proposedLeaseId, String requestId, ModifiedAccessConditions modifiedAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(acquireLeaseAsync(context, timeout, duration, proposedLeaseId, requestId, modifiedAccessConditions), serviceCallback); + } + + /** + * [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param duration Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease duration cannot be changed using renew or change. + * @param proposedLeaseId Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID string formats. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single acquireLeaseWithRestResponseAsync(Context context, Integer timeout, Integer duration, String proposedLeaseId, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(modifiedAccessConditions); + final String comp = "lease"; + final String action = "acquire"; + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + OffsetDateTime ifUnmodifiedSince = null; + if (modifiedAccessConditions != null) { + ifUnmodifiedSince = modifiedAccessConditions.ifUnmodifiedSince(); + } + String ifMatch = null; + if (modifiedAccessConditions != null) { + ifMatch = modifiedAccessConditions.ifMatch(); + } + String ifNoneMatch = null; + if (modifiedAccessConditions != null) { + ifNoneMatch = modifiedAccessConditions.ifNoneMatch(); + } + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + DateTimeRfc1123 ifUnmodifiedSinceConverted = null; + if (ifUnmodifiedSince != null) { + ifUnmodifiedSinceConverted = new DateTimeRfc1123(ifUnmodifiedSince); + } + return service.acquireLease(context, this.client.url(), timeout, duration, proposedLeaseId, this.client.version(), requestId, comp, action, ifModifiedSinceConverted, ifUnmodifiedSinceConverted, ifMatch, ifNoneMatch); + } + + /** + * [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param duration Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease duration cannot be changed using renew or change. + * @param proposedLeaseId Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID string formats. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable acquireLeaseAsync(Context context, Integer timeout, Integer duration, String proposedLeaseId, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + return acquireLeaseWithRestResponseAsync(context, timeout, duration, proposedLeaseId, requestId, modifiedAccessConditions) + .toCompletable(); + } + + /** + * [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations. + * + * @param context The context to associate with this operation. + * @param leaseId Specifies the current lease ID on the resource. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void releaseLease(Context context, @NonNull String leaseId, Integer timeout, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + releaseLeaseAsync(context, leaseId, timeout, requestId, modifiedAccessConditions).blockingAwait(); + } + + /** + * [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations. + * + * @param context The context to associate with this operation. + * @param leaseId Specifies the current lease ID on the resource. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture releaseLeaseAsync(Context context, @NonNull String leaseId, Integer timeout, String requestId, ModifiedAccessConditions modifiedAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(releaseLeaseAsync(context, leaseId, timeout, requestId, modifiedAccessConditions), serviceCallback); + } + + /** + * [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations. + * + * @param context The context to associate with this operation. + * @param leaseId Specifies the current lease ID on the resource. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single releaseLeaseWithRestResponseAsync(Context context, @NonNull String leaseId, Integer timeout, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (leaseId == null) { + throw new IllegalArgumentException("Parameter leaseId is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(modifiedAccessConditions); + final String comp = "lease"; + final String action = "release"; + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + OffsetDateTime ifUnmodifiedSince = null; + if (modifiedAccessConditions != null) { + ifUnmodifiedSince = modifiedAccessConditions.ifUnmodifiedSince(); + } + String ifMatch = null; + if (modifiedAccessConditions != null) { + ifMatch = modifiedAccessConditions.ifMatch(); + } + String ifNoneMatch = null; + if (modifiedAccessConditions != null) { + ifNoneMatch = modifiedAccessConditions.ifNoneMatch(); + } + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + DateTimeRfc1123 ifUnmodifiedSinceConverted = null; + if (ifUnmodifiedSince != null) { + ifUnmodifiedSinceConverted = new DateTimeRfc1123(ifUnmodifiedSince); + } + return service.releaseLease(context, this.client.url(), timeout, leaseId, this.client.version(), requestId, comp, action, ifModifiedSinceConverted, ifUnmodifiedSinceConverted, ifMatch, ifNoneMatch); + } + + /** + * [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations. + * + * @param context The context to associate with this operation. + * @param leaseId Specifies the current lease ID on the resource. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable releaseLeaseAsync(Context context, @NonNull String leaseId, Integer timeout, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + return releaseLeaseWithRestResponseAsync(context, leaseId, timeout, requestId, modifiedAccessConditions) + .toCompletable(); + } + + /** + * [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations. + * + * @param context The context to associate with this operation. + * @param leaseId Specifies the current lease ID on the resource. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void renewLease(Context context, @NonNull String leaseId, Integer timeout, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + renewLeaseAsync(context, leaseId, timeout, requestId, modifiedAccessConditions).blockingAwait(); + } + + /** + * [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations. + * + * @param context The context to associate with this operation. + * @param leaseId Specifies the current lease ID on the resource. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture renewLeaseAsync(Context context, @NonNull String leaseId, Integer timeout, String requestId, ModifiedAccessConditions modifiedAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(renewLeaseAsync(context, leaseId, timeout, requestId, modifiedAccessConditions), serviceCallback); + } + + /** + * [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations. + * + * @param context The context to associate with this operation. + * @param leaseId Specifies the current lease ID on the resource. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single renewLeaseWithRestResponseAsync(Context context, @NonNull String leaseId, Integer timeout, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (leaseId == null) { + throw new IllegalArgumentException("Parameter leaseId is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(modifiedAccessConditions); + final String comp = "lease"; + final String action = "renew"; + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + OffsetDateTime ifUnmodifiedSince = null; + if (modifiedAccessConditions != null) { + ifUnmodifiedSince = modifiedAccessConditions.ifUnmodifiedSince(); + } + String ifMatch = null; + if (modifiedAccessConditions != null) { + ifMatch = modifiedAccessConditions.ifMatch(); + } + String ifNoneMatch = null; + if (modifiedAccessConditions != null) { + ifNoneMatch = modifiedAccessConditions.ifNoneMatch(); + } + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + DateTimeRfc1123 ifUnmodifiedSinceConverted = null; + if (ifUnmodifiedSince != null) { + ifUnmodifiedSinceConverted = new DateTimeRfc1123(ifUnmodifiedSince); + } + return service.renewLease(context, this.client.url(), timeout, leaseId, this.client.version(), requestId, comp, action, ifModifiedSinceConverted, ifUnmodifiedSinceConverted, ifMatch, ifNoneMatch); + } + + /** + * [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations. + * + * @param context The context to associate with this operation. + * @param leaseId Specifies the current lease ID on the resource. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable renewLeaseAsync(Context context, @NonNull String leaseId, Integer timeout, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + return renewLeaseWithRestResponseAsync(context, leaseId, timeout, requestId, modifiedAccessConditions) + .toCompletable(); + } + + /** + * [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations. + * + * @param context The context to associate with this operation. + * @param leaseId Specifies the current lease ID on the resource. + * @param proposedLeaseId Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID string formats. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void changeLease(Context context, @NonNull String leaseId, @NonNull String proposedLeaseId, Integer timeout, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + changeLeaseAsync(context, leaseId, proposedLeaseId, timeout, requestId, modifiedAccessConditions).blockingAwait(); + } + + /** + * [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations. + * + * @param context The context to associate with this operation. + * @param leaseId Specifies the current lease ID on the resource. + * @param proposedLeaseId Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID string formats. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture changeLeaseAsync(Context context, @NonNull String leaseId, @NonNull String proposedLeaseId, Integer timeout, String requestId, ModifiedAccessConditions modifiedAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(changeLeaseAsync(context, leaseId, proposedLeaseId, timeout, requestId, modifiedAccessConditions), serviceCallback); + } + + /** + * [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations. + * + * @param context The context to associate with this operation. + * @param leaseId Specifies the current lease ID on the resource. + * @param proposedLeaseId Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID string formats. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single changeLeaseWithRestResponseAsync(Context context, @NonNull String leaseId, @NonNull String proposedLeaseId, Integer timeout, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (leaseId == null) { + throw new IllegalArgumentException("Parameter leaseId is required and cannot be null."); + } + if (proposedLeaseId == null) { + throw new IllegalArgumentException("Parameter proposedLeaseId is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(modifiedAccessConditions); + final String comp = "lease"; + final String action = "change"; + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + OffsetDateTime ifUnmodifiedSince = null; + if (modifiedAccessConditions != null) { + ifUnmodifiedSince = modifiedAccessConditions.ifUnmodifiedSince(); + } + String ifMatch = null; + if (modifiedAccessConditions != null) { + ifMatch = modifiedAccessConditions.ifMatch(); + } + String ifNoneMatch = null; + if (modifiedAccessConditions != null) { + ifNoneMatch = modifiedAccessConditions.ifNoneMatch(); + } + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + DateTimeRfc1123 ifUnmodifiedSinceConverted = null; + if (ifUnmodifiedSince != null) { + ifUnmodifiedSinceConverted = new DateTimeRfc1123(ifUnmodifiedSince); + } + return service.changeLease(context, this.client.url(), timeout, leaseId, proposedLeaseId, this.client.version(), requestId, comp, action, ifModifiedSinceConverted, ifUnmodifiedSinceConverted, ifMatch, ifNoneMatch); + } + + /** + * [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations. + * + * @param context The context to associate with this operation. + * @param leaseId Specifies the current lease ID on the resource. + * @param proposedLeaseId Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID string formats. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable changeLeaseAsync(Context context, @NonNull String leaseId, @NonNull String proposedLeaseId, Integer timeout, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + return changeLeaseWithRestResponseAsync(context, leaseId, proposedLeaseId, timeout, requestId, modifiedAccessConditions) + .toCompletable(); + } + + /** + * [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param breakPeriod For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter than the time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has expired, but the lease may be held for longer than the break period. If this header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease breaks immediately. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void breakLease(Context context, Integer timeout, Integer breakPeriod, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + breakLeaseAsync(context, timeout, breakPeriod, requestId, modifiedAccessConditions).blockingAwait(); + } + + /** + * [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param breakPeriod For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter than the time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has expired, but the lease may be held for longer than the break period. If this header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease breaks immediately. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture breakLeaseAsync(Context context, Integer timeout, Integer breakPeriod, String requestId, ModifiedAccessConditions modifiedAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(breakLeaseAsync(context, timeout, breakPeriod, requestId, modifiedAccessConditions), serviceCallback); + } + + /** + * [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param breakPeriod For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter than the time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has expired, but the lease may be held for longer than the break period. If this header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease breaks immediately. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single breakLeaseWithRestResponseAsync(Context context, Integer timeout, Integer breakPeriod, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(modifiedAccessConditions); + final String comp = "lease"; + final String action = "break"; + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + OffsetDateTime ifUnmodifiedSince = null; + if (modifiedAccessConditions != null) { + ifUnmodifiedSince = modifiedAccessConditions.ifUnmodifiedSince(); + } + String ifMatch = null; + if (modifiedAccessConditions != null) { + ifMatch = modifiedAccessConditions.ifMatch(); + } + String ifNoneMatch = null; + if (modifiedAccessConditions != null) { + ifNoneMatch = modifiedAccessConditions.ifNoneMatch(); + } + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + DateTimeRfc1123 ifUnmodifiedSinceConverted = null; + if (ifUnmodifiedSince != null) { + ifUnmodifiedSinceConverted = new DateTimeRfc1123(ifUnmodifiedSince); + } + return service.breakLease(context, this.client.url(), timeout, breakPeriod, this.client.version(), requestId, comp, action, ifModifiedSinceConverted, ifUnmodifiedSinceConverted, ifMatch, ifNoneMatch); + } + + /** + * [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param breakPeriod For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter than the time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has expired, but the lease may be held for longer than the break period. If this header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease breaks immediately. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable breakLeaseAsync(Context context, Integer timeout, Integer breakPeriod, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + return breakLeaseWithRestResponseAsync(context, timeout, breakPeriod, requestId, modifiedAccessConditions) + .toCompletable(); + } + + /** + * The Create Snapshot operation creates a read-only snapshot of a blob. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param leaseAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void createSnapshot(Context context, Integer timeout, Map metadata, String requestId, ModifiedAccessConditions modifiedAccessConditions, LeaseAccessConditions leaseAccessConditions) { + createSnapshotAsync(context, timeout, metadata, requestId, modifiedAccessConditions, leaseAccessConditions).blockingAwait(); + } + + /** + * The Create Snapshot operation creates a read-only snapshot of a blob. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param leaseAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture createSnapshotAsync(Context context, Integer timeout, Map metadata, String requestId, ModifiedAccessConditions modifiedAccessConditions, LeaseAccessConditions leaseAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(createSnapshotAsync(context, timeout, metadata, requestId, modifiedAccessConditions, leaseAccessConditions), serviceCallback); + } + + /** + * The Create Snapshot operation creates a read-only snapshot of a blob. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param leaseAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single createSnapshotWithRestResponseAsync(Context context, Integer timeout, Map metadata, String requestId, ModifiedAccessConditions modifiedAccessConditions, LeaseAccessConditions leaseAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(metadata); + Validator.validate(modifiedAccessConditions); + Validator.validate(leaseAccessConditions); + final String comp = "snapshot"; + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + OffsetDateTime ifUnmodifiedSince = null; + if (modifiedAccessConditions != null) { + ifUnmodifiedSince = modifiedAccessConditions.ifUnmodifiedSince(); + } + String ifMatch = null; + if (modifiedAccessConditions != null) { + ifMatch = modifiedAccessConditions.ifMatch(); + } + String ifNoneMatch = null; + if (modifiedAccessConditions != null) { + ifNoneMatch = modifiedAccessConditions.ifNoneMatch(); + } + String leaseId = null; + if (leaseAccessConditions != null) { + leaseId = leaseAccessConditions.leaseId(); + } + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + DateTimeRfc1123 ifUnmodifiedSinceConverted = null; + if (ifUnmodifiedSince != null) { + ifUnmodifiedSinceConverted = new DateTimeRfc1123(ifUnmodifiedSince); + } + return service.createSnapshot(context, this.client.url(), timeout, metadata, this.client.version(), requestId, comp, ifModifiedSinceConverted, ifUnmodifiedSinceConverted, ifMatch, ifNoneMatch, leaseId); + } + + /** + * The Create Snapshot operation creates a read-only snapshot of a blob. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param leaseAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable createSnapshotAsync(Context context, Integer timeout, Map metadata, String requestId, ModifiedAccessConditions modifiedAccessConditions, LeaseAccessConditions leaseAccessConditions) { + return createSnapshotWithRestResponseAsync(context, timeout, metadata, requestId, modifiedAccessConditions, leaseAccessConditions) + .toCompletable(); + } + + /** + * The Start Copy From URL operation copies a blob or an internet resource to a new blob. + * + * @param context The context to associate with this operation. + * @param copySource Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob must either be public or must be authenticated via a shared access signature. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param sourceModifiedAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param leaseAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void startCopyFromURL(Context context, @NonNull URL copySource, Integer timeout, Map metadata, String requestId, SourceModifiedAccessConditions sourceModifiedAccessConditions, ModifiedAccessConditions modifiedAccessConditions, LeaseAccessConditions leaseAccessConditions) { + startCopyFromURLAsync(context, copySource, timeout, metadata, requestId, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions).blockingAwait(); + } + + /** + * The Start Copy From URL operation copies a blob or an internet resource to a new blob. + * + * @param context The context to associate with this operation. + * @param copySource Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob must either be public or must be authenticated via a shared access signature. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param sourceModifiedAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param leaseAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture startCopyFromURLAsync(Context context, @NonNull URL copySource, Integer timeout, Map metadata, String requestId, SourceModifiedAccessConditions sourceModifiedAccessConditions, ModifiedAccessConditions modifiedAccessConditions, LeaseAccessConditions leaseAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(startCopyFromURLAsync(context, copySource, timeout, metadata, requestId, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions), serviceCallback); + } + + /** + * The Start Copy From URL operation copies a blob or an internet resource to a new blob. + * + * @param context The context to associate with this operation. + * @param copySource Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob must either be public or must be authenticated via a shared access signature. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param sourceModifiedAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param leaseAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single startCopyFromURLWithRestResponseAsync(Context context, @NonNull URL copySource, Integer timeout, Map metadata, String requestId, SourceModifiedAccessConditions sourceModifiedAccessConditions, ModifiedAccessConditions modifiedAccessConditions, LeaseAccessConditions leaseAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (copySource == null) { + throw new IllegalArgumentException("Parameter copySource is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(metadata); + Validator.validate(copySource); + Validator.validate(sourceModifiedAccessConditions); + Validator.validate(modifiedAccessConditions); + Validator.validate(leaseAccessConditions); + OffsetDateTime sourceIfModifiedSince = null; + if (sourceModifiedAccessConditions != null) { + sourceIfModifiedSince = sourceModifiedAccessConditions.sourceIfModifiedSince(); + } + OffsetDateTime sourceIfUnmodifiedSince = null; + if (sourceModifiedAccessConditions != null) { + sourceIfUnmodifiedSince = sourceModifiedAccessConditions.sourceIfUnmodifiedSince(); + } + String sourceIfMatch = null; + if (sourceModifiedAccessConditions != null) { + sourceIfMatch = sourceModifiedAccessConditions.sourceIfMatch(); + } + String sourceIfNoneMatch = null; + if (sourceModifiedAccessConditions != null) { + sourceIfNoneMatch = sourceModifiedAccessConditions.sourceIfNoneMatch(); + } + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + OffsetDateTime ifUnmodifiedSince = null; + if (modifiedAccessConditions != null) { + ifUnmodifiedSince = modifiedAccessConditions.ifUnmodifiedSince(); + } + String ifMatch = null; + if (modifiedAccessConditions != null) { + ifMatch = modifiedAccessConditions.ifMatch(); + } + String ifNoneMatch = null; + if (modifiedAccessConditions != null) { + ifNoneMatch = modifiedAccessConditions.ifNoneMatch(); + } + String leaseId = null; + if (leaseAccessConditions != null) { + leaseId = leaseAccessConditions.leaseId(); + } + DateTimeRfc1123 sourceIfModifiedSinceConverted = null; + if (sourceIfModifiedSince != null) { + sourceIfModifiedSinceConverted = new DateTimeRfc1123(sourceIfModifiedSince); + } + DateTimeRfc1123 sourceIfUnmodifiedSinceConverted = null; + if (sourceIfUnmodifiedSince != null) { + sourceIfUnmodifiedSinceConverted = new DateTimeRfc1123(sourceIfUnmodifiedSince); + } + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + DateTimeRfc1123 ifUnmodifiedSinceConverted = null; + if (ifUnmodifiedSince != null) { + ifUnmodifiedSinceConverted = new DateTimeRfc1123(ifUnmodifiedSince); + } + return service.startCopyFromURL(context, this.client.url(), timeout, metadata, copySource, this.client.version(), requestId, sourceIfModifiedSinceConverted, sourceIfUnmodifiedSinceConverted, sourceIfMatch, sourceIfNoneMatch, ifModifiedSinceConverted, ifUnmodifiedSinceConverted, ifMatch, ifNoneMatch, leaseId); + } + + /** + * The Start Copy From URL operation copies a blob or an internet resource to a new blob. + * + * @param context The context to associate with this operation. + * @param copySource Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob must either be public or must be authenticated via a shared access signature. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param sourceModifiedAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param leaseAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable startCopyFromURLAsync(Context context, @NonNull URL copySource, Integer timeout, Map metadata, String requestId, SourceModifiedAccessConditions sourceModifiedAccessConditions, ModifiedAccessConditions modifiedAccessConditions, LeaseAccessConditions leaseAccessConditions) { + return startCopyFromURLWithRestResponseAsync(context, copySource, timeout, metadata, requestId, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) + .toCompletable(); + } + + /** + * The Copy From URL operation copies a blob or an internet resource to a new blob. It will not return a response until the copy is complete. + * + * @param context The context to associate with this operation. + * @param copySource Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob must either be public or must be authenticated via a shared access signature. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param sourceModifiedAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param leaseAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void copyFromURL(Context context, @NonNull URL copySource, Integer timeout, Map metadata, String requestId, SourceModifiedAccessConditions sourceModifiedAccessConditions, ModifiedAccessConditions modifiedAccessConditions, LeaseAccessConditions leaseAccessConditions) { + copyFromURLAsync(context, copySource, timeout, metadata, requestId, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions).blockingAwait(); + } + + /** + * The Copy From URL operation copies a blob or an internet resource to a new blob. It will not return a response until the copy is complete. + * + * @param context The context to associate with this operation. + * @param copySource Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob must either be public or must be authenticated via a shared access signature. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param sourceModifiedAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param leaseAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture copyFromURLAsync(Context context, @NonNull URL copySource, Integer timeout, Map metadata, String requestId, SourceModifiedAccessConditions sourceModifiedAccessConditions, ModifiedAccessConditions modifiedAccessConditions, LeaseAccessConditions leaseAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(copyFromURLAsync(context, copySource, timeout, metadata, requestId, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions), serviceCallback); + } + + /** + * The Copy From URL operation copies a blob or an internet resource to a new blob. It will not return a response until the copy is complete. + * + * @param context The context to associate with this operation. + * @param copySource Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob must either be public or must be authenticated via a shared access signature. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param sourceModifiedAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param leaseAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single copyFromURLWithRestResponseAsync(Context context, @NonNull URL copySource, Integer timeout, Map metadata, String requestId, SourceModifiedAccessConditions sourceModifiedAccessConditions, ModifiedAccessConditions modifiedAccessConditions, LeaseAccessConditions leaseAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (copySource == null) { + throw new IllegalArgumentException("Parameter copySource is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(metadata); + Validator.validate(copySource); + Validator.validate(sourceModifiedAccessConditions); + Validator.validate(modifiedAccessConditions); + Validator.validate(leaseAccessConditions); + final String xMsRequiresSync = "true"; + OffsetDateTime sourceIfModifiedSince = null; + if (sourceModifiedAccessConditions != null) { + sourceIfModifiedSince = sourceModifiedAccessConditions.sourceIfModifiedSince(); + } + OffsetDateTime sourceIfUnmodifiedSince = null; + if (sourceModifiedAccessConditions != null) { + sourceIfUnmodifiedSince = sourceModifiedAccessConditions.sourceIfUnmodifiedSince(); + } + String sourceIfMatch = null; + if (sourceModifiedAccessConditions != null) { + sourceIfMatch = sourceModifiedAccessConditions.sourceIfMatch(); + } + String sourceIfNoneMatch = null; + if (sourceModifiedAccessConditions != null) { + sourceIfNoneMatch = sourceModifiedAccessConditions.sourceIfNoneMatch(); + } + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + OffsetDateTime ifUnmodifiedSince = null; + if (modifiedAccessConditions != null) { + ifUnmodifiedSince = modifiedAccessConditions.ifUnmodifiedSince(); + } + String ifMatch = null; + if (modifiedAccessConditions != null) { + ifMatch = modifiedAccessConditions.ifMatch(); + } + String ifNoneMatch = null; + if (modifiedAccessConditions != null) { + ifNoneMatch = modifiedAccessConditions.ifNoneMatch(); + } + String leaseId = null; + if (leaseAccessConditions != null) { + leaseId = leaseAccessConditions.leaseId(); + } + DateTimeRfc1123 sourceIfModifiedSinceConverted = null; + if (sourceIfModifiedSince != null) { + sourceIfModifiedSinceConverted = new DateTimeRfc1123(sourceIfModifiedSince); + } + DateTimeRfc1123 sourceIfUnmodifiedSinceConverted = null; + if (sourceIfUnmodifiedSince != null) { + sourceIfUnmodifiedSinceConverted = new DateTimeRfc1123(sourceIfUnmodifiedSince); + } + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + DateTimeRfc1123 ifUnmodifiedSinceConverted = null; + if (ifUnmodifiedSince != null) { + ifUnmodifiedSinceConverted = new DateTimeRfc1123(ifUnmodifiedSince); + } + return service.copyFromURL(context, this.client.url(), timeout, metadata, copySource, this.client.version(), requestId, xMsRequiresSync, sourceIfModifiedSinceConverted, sourceIfUnmodifiedSinceConverted, sourceIfMatch, sourceIfNoneMatch, ifModifiedSinceConverted, ifUnmodifiedSinceConverted, ifMatch, ifNoneMatch, leaseId); + } + + /** + * The Copy From URL operation copies a blob or an internet resource to a new blob. It will not return a response until the copy is complete. + * + * @param context The context to associate with this operation. + * @param copySource Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob must either be public or must be authenticated via a shared access signature. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param sourceModifiedAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param leaseAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable copyFromURLAsync(Context context, @NonNull URL copySource, Integer timeout, Map metadata, String requestId, SourceModifiedAccessConditions sourceModifiedAccessConditions, ModifiedAccessConditions modifiedAccessConditions, LeaseAccessConditions leaseAccessConditions) { + return copyFromURLWithRestResponseAsync(context, copySource, timeout, metadata, requestId, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) + .toCompletable(); + } + + /** + * The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a destination blob with zero length and full metadata. + * + * @param context The context to associate with this operation. + * @param copyId The copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void abortCopyFromURL(Context context, @NonNull String copyId, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions) { + abortCopyFromURLAsync(context, copyId, timeout, requestId, leaseAccessConditions).blockingAwait(); + } + + /** + * The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a destination blob with zero length and full metadata. + * + * @param context The context to associate with this operation. + * @param copyId The copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture abortCopyFromURLAsync(Context context, @NonNull String copyId, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(abortCopyFromURLAsync(context, copyId, timeout, requestId, leaseAccessConditions), serviceCallback); + } + + /** + * The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a destination blob with zero length and full metadata. + * + * @param context The context to associate with this operation. + * @param copyId The copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single abortCopyFromURLWithRestResponseAsync(Context context, @NonNull String copyId, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (copyId == null) { + throw new IllegalArgumentException("Parameter copyId is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(leaseAccessConditions); + final String comp = "copy"; + final String copyActionAbortConstant = "abort"; + String leaseId = null; + if (leaseAccessConditions != null) { + leaseId = leaseAccessConditions.leaseId(); + } + return service.abortCopyFromURL(context, this.client.url(), copyId, timeout, this.client.version(), requestId, comp, copyActionAbortConstant, leaseId); + } + + /** + * The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a destination blob with zero length and full metadata. + * + * @param context The context to associate with this operation. + * @param copyId The copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable abortCopyFromURLAsync(Context context, @NonNull String copyId, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions) { + return abortCopyFromURLWithRestResponseAsync(context, copyId, timeout, requestId, leaseAccessConditions) + .toCompletable(); + } + + /** + * The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage account and on a block blob in a blob storage account (locally redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation does not update the blob's ETag. + * + * @param context The context to associate with this operation. + * @param tier Indicates the tier to be set on the blob. Possible values include: 'P4', 'P6', 'P10', 'P20', 'P30', 'P40', 'P50', 'Hot', 'Cool', 'Archive'. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void setTier(Context context, @NonNull AccessTier tier, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions) { + setTierAsync(context, tier, timeout, requestId, leaseAccessConditions).blockingAwait(); + } + + /** + * The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage account and on a block blob in a blob storage account (locally redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation does not update the blob's ETag. + * + * @param context The context to associate with this operation. + * @param tier Indicates the tier to be set on the blob. Possible values include: 'P4', 'P6', 'P10', 'P20', 'P30', 'P40', 'P50', 'Hot', 'Cool', 'Archive'. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture setTierAsync(Context context, @NonNull AccessTier tier, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(setTierAsync(context, tier, timeout, requestId, leaseAccessConditions), serviceCallback); + } + + /** + * The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage account and on a block blob in a blob storage account (locally redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation does not update the blob's ETag. + * + * @param context The context to associate with this operation. + * @param tier Indicates the tier to be set on the blob. Possible values include: 'P4', 'P6', 'P10', 'P20', 'P30', 'P40', 'P50', 'Hot', 'Cool', 'Archive'. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single setTierWithRestResponseAsync(Context context, @NonNull AccessTier tier, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (tier == null) { + throw new IllegalArgumentException("Parameter tier is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(leaseAccessConditions); + final String comp = "tier"; + String leaseId = null; + if (leaseAccessConditions != null) { + leaseId = leaseAccessConditions.leaseId(); + } + return service.setTier(context, this.client.url(), timeout, tier, this.client.version(), requestId, comp, leaseId); + } + + /** + * The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage account and on a block blob in a blob storage account (locally redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation does not update the blob's ETag. + * + * @param context The context to associate with this operation. + * @param tier Indicates the tier to be set on the blob. Possible values include: 'P4', 'P6', 'P10', 'P20', 'P30', 'P40', 'P50', 'Hot', 'Cool', 'Archive'. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable setTierAsync(Context context, @NonNull AccessTier tier, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions) { + return setTierWithRestResponseAsync(context, tier, timeout, requestId, leaseAccessConditions) + .toCompletable(); + } + + /** + * Returns the sku name and account kind. + * + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void getAccountInfo(Context context) { + getAccountInfoAsync(context).blockingAwait(); + } + + /** + * Returns the sku name and account kind. + * + * @param context The context to associate with this operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture getAccountInfoAsync(Context context, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(getAccountInfoAsync(context), serviceCallback); + } + + /** + * Returns the sku name and account kind. + * + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single getAccountInfoWithRestResponseAsync(Context context) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + final String restype = "account"; + final String comp = "properties"; + return service.getAccountInfo(context, this.client.url(), this.client.version(), restype, comp); + } + + /** + * Returns the sku name and account kind. + * + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable getAccountInfoAsync(Context context) { + return getAccountInfoWithRestResponseAsync(context) + .toCompletable(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/GeneratedBlockBlobs.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/GeneratedBlockBlobs.java new file mode 100644 index 0000000000000..fb6eb986f7443 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/GeneratedBlockBlobs.java @@ -0,0 +1,673 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage; + +import com.microsoft.azure.storage.blob.models.BlobHTTPHeaders; +import com.microsoft.azure.storage.blob.models.BlockBlobCommitBlockListResponse; +import com.microsoft.azure.storage.blob.models.BlockBlobGetBlockListResponse; +import com.microsoft.azure.storage.blob.models.BlockBlobStageBlockFromURLResponse; +import com.microsoft.azure.storage.blob.models.BlockBlobStageBlockResponse; +import com.microsoft.azure.storage.blob.models.BlockBlobUploadResponse; +import com.microsoft.azure.storage.blob.models.BlockList; +import com.microsoft.azure.storage.blob.models.BlockListType; +import com.microsoft.azure.storage.blob.models.BlockLookupList; +import com.microsoft.azure.storage.blob.models.LeaseAccessConditions; +import com.microsoft.azure.storage.blob.models.ModifiedAccessConditions; +import com.microsoft.azure.storage.blob.models.StorageErrorException; +import com.microsoft.rest.v2.Context; +import com.microsoft.rest.v2.DateTimeRfc1123; +import com.microsoft.rest.v2.RestProxy; +import com.microsoft.rest.v2.ServiceCallback; +import com.microsoft.rest.v2.ServiceFuture; +import com.microsoft.rest.v2.Validator; +import com.microsoft.rest.v2.annotations.BodyParam; +import com.microsoft.rest.v2.annotations.ExpectedResponses; +import com.microsoft.rest.v2.annotations.GET; +import com.microsoft.rest.v2.annotations.HeaderParam; +import com.microsoft.rest.v2.annotations.Host; +import com.microsoft.rest.v2.annotations.HostParam; +import com.microsoft.rest.v2.annotations.PUT; +import com.microsoft.rest.v2.annotations.QueryParam; +import com.microsoft.rest.v2.annotations.UnexpectedResponseExceptionType; +import com.microsoft.rest.v2.util.Base64Util; +import io.reactivex.Completable; +import io.reactivex.Flowable; +import io.reactivex.Maybe; +import io.reactivex.Single; +import io.reactivex.annotations.NonNull; +import java.net.URL; +import java.nio.ByteBuffer; +import java.time.OffsetDateTime; +import java.util.HashMap; +import java.util.Map; + +/** + * An instance of this class provides access to all the operations defined in + * GeneratedBlockBlobs. + */ +public final class GeneratedBlockBlobs { + /** + * The proxy service used to perform REST calls. + */ + private BlockBlobsService service; + + /** + * The service client containing this operation class. + */ + private GeneratedStorageClient client; + + /** + * Initializes an instance of GeneratedBlockBlobs. + * + * @param client the instance of the service client containing this operation class. + */ + public GeneratedBlockBlobs(GeneratedStorageClient client) { + this.service = RestProxy.create(BlockBlobsService.class, client); + this.client = client; + } + + /** + * The interface defining all the services for GeneratedBlockBlobs to be + * used by the proxy service to perform REST calls. + */ + @Host("{url}") + private interface BlockBlobsService { + @PUT("{containerName}/{blob}") + @ExpectedResponses({201}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single upload(Context context, @HostParam("url") String url, @BodyParam("application/octet-stream") Flowable body, @QueryParam("timeout") Integer timeout, @HeaderParam("Content-Length") long contentLength, @HeaderParam("x-ms-meta-") Map metadata, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @HeaderParam("x-ms-blob-type") String blobType, @HeaderParam("x-ms-blob-content-type") String blobContentType, @HeaderParam("x-ms-blob-content-encoding") String blobContentEncoding, @HeaderParam("x-ms-blob-content-language") String blobContentLanguage, @HeaderParam("x-ms-blob-content-md5") String blobContentMD5, @HeaderParam("x-ms-blob-cache-control") String blobCacheControl, @HeaderParam("x-ms-blob-content-disposition") String blobContentDisposition, @HeaderParam("x-ms-lease-id") String leaseId, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince, @HeaderParam("If-Match") String ifMatch, @HeaderParam("If-None-Match") String ifNoneMatch); + + @PUT("{containerName}/{blob}") + @ExpectedResponses({201}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single stageBlock(Context context, @HostParam("url") String url, @QueryParam("blockid") String blockId, @HeaderParam("Content-Length") long contentLength, @HeaderParam("Content-MD5") String transactionalContentMD5, @BodyParam("application/octet-stream") Flowable body, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("comp") String comp, @HeaderParam("x-ms-lease-id") String leaseId); + + @PUT("{containerName}/{blob}") + @ExpectedResponses({201}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single stageBlockFromURL(Context context, @HostParam("url") String url, @QueryParam("blockid") String blockId, @HeaderParam("Content-Length") long contentLength, @HeaderParam("x-ms-copy-source") URL copySource, @HeaderParam("x-ms-source-range") String sourceRange, @HeaderParam("x-ms-source-content-md5") String sourceContentMD5, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("comp") String comp, @HeaderParam("x-ms-lease-id") String leaseId); + + @PUT("{containerName}/{blob}") + @ExpectedResponses({201}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single commitBlockList(Context context, @HostParam("url") String url, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-meta-") Map metadata, @BodyParam("application/xml; charset=utf-8") BlockLookupList blocks, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("comp") String comp, @HeaderParam("x-ms-blob-cache-control") String blobCacheControl, @HeaderParam("x-ms-blob-content-type") String blobContentType, @HeaderParam("x-ms-blob-content-encoding") String blobContentEncoding, @HeaderParam("x-ms-blob-content-language") String blobContentLanguage, @HeaderParam("x-ms-blob-content-md5") String blobContentMD5, @HeaderParam("x-ms-blob-content-disposition") String blobContentDisposition, @HeaderParam("x-ms-lease-id") String leaseId, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince, @HeaderParam("If-Match") String ifMatch, @HeaderParam("If-None-Match") String ifNoneMatch); + + @GET("{containerName}/{blob}") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single getBlockList(Context context, @HostParam("url") String url, @QueryParam("snapshot") String snapshot, @QueryParam("blocklisttype") BlockListType listType, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("comp") String comp, @HeaderParam("x-ms-lease-id") String leaseId); + } + + /** + * The Upload Block Blob operation updates the content of an existing block blob. Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported with Put Blob; the content of the existing blob is overwritten with the content of the new blob. To perform a partial update of the content of a block blob, use the Put Block List operation. + * + * @param context The context to associate with this operation. + * @param body Initial data. + * @param contentLength The length of the request. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param blobHTTPHeaders Additional parameters for the operation. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void upload(Context context, @NonNull Flowable body, @NonNull long contentLength, Integer timeout, Map metadata, String requestId, BlobHTTPHeaders blobHTTPHeaders, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + uploadAsync(context, body, contentLength, timeout, metadata, requestId, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions).blockingAwait(); + } + + /** + * The Upload Block Blob operation updates the content of an existing block blob. Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported with Put Blob; the content of the existing blob is overwritten with the content of the new blob. To perform a partial update of the content of a block blob, use the Put Block List operation. + * + * @param context The context to associate with this operation. + * @param body Initial data. + * @param contentLength The length of the request. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param blobHTTPHeaders Additional parameters for the operation. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture uploadAsync(Context context, @NonNull Flowable body, @NonNull long contentLength, Integer timeout, Map metadata, String requestId, BlobHTTPHeaders blobHTTPHeaders, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(uploadAsync(context, body, contentLength, timeout, metadata, requestId, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions), serviceCallback); + } + + /** + * The Upload Block Blob operation updates the content of an existing block blob. Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported with Put Blob; the content of the existing blob is overwritten with the content of the new blob. To perform a partial update of the content of a block blob, use the Put Block List operation. + * + * @param context The context to associate with this operation. + * @param body Initial data. + * @param contentLength The length of the request. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param blobHTTPHeaders Additional parameters for the operation. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single uploadWithRestResponseAsync(Context context, @NonNull Flowable body, @NonNull long contentLength, Integer timeout, Map metadata, String requestId, BlobHTTPHeaders blobHTTPHeaders, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (body == null) { + throw new IllegalArgumentException("Parameter body is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(metadata); + Validator.validate(blobHTTPHeaders); + Validator.validate(leaseAccessConditions); + Validator.validate(modifiedAccessConditions); + final String blobType = "BlockBlob"; + String blobContentType = null; + if (blobHTTPHeaders != null) { + blobContentType = blobHTTPHeaders.blobContentType(); + } + String blobContentEncoding = null; + if (blobHTTPHeaders != null) { + blobContentEncoding = blobHTTPHeaders.blobContentEncoding(); + } + String blobContentLanguage = null; + if (blobHTTPHeaders != null) { + blobContentLanguage = blobHTTPHeaders.blobContentLanguage(); + } + byte[] blobContentMD5 = null; + if (blobHTTPHeaders != null) { + blobContentMD5 = blobHTTPHeaders.blobContentMD5(); + } + String blobCacheControl = null; + if (blobHTTPHeaders != null) { + blobCacheControl = blobHTTPHeaders.blobCacheControl(); + } + String blobContentDisposition = null; + if (blobHTTPHeaders != null) { + blobContentDisposition = blobHTTPHeaders.blobContentDisposition(); + } + String leaseId = null; + if (leaseAccessConditions != null) { + leaseId = leaseAccessConditions.leaseId(); + } + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + OffsetDateTime ifUnmodifiedSince = null; + if (modifiedAccessConditions != null) { + ifUnmodifiedSince = modifiedAccessConditions.ifUnmodifiedSince(); + } + String ifMatch = null; + if (modifiedAccessConditions != null) { + ifMatch = modifiedAccessConditions.ifMatch(); + } + String ifNoneMatch = null; + if (modifiedAccessConditions != null) { + ifNoneMatch = modifiedAccessConditions.ifNoneMatch(); + } + String blobContentMD5Converted = Base64Util.encodeToString(blobContentMD5); + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + DateTimeRfc1123 ifUnmodifiedSinceConverted = null; + if (ifUnmodifiedSince != null) { + ifUnmodifiedSinceConverted = new DateTimeRfc1123(ifUnmodifiedSince); + } + return service.upload(context, this.client.url(), body, timeout, contentLength, metadata, this.client.version(), requestId, blobType, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5Converted, blobCacheControl, blobContentDisposition, leaseId, ifModifiedSinceConverted, ifUnmodifiedSinceConverted, ifMatch, ifNoneMatch); + } + + /** + * The Upload Block Blob operation updates the content of an existing block blob. Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported with Put Blob; the content of the existing blob is overwritten with the content of the new blob. To perform a partial update of the content of a block blob, use the Put Block List operation. + * + * @param context The context to associate with this operation. + * @param body Initial data. + * @param contentLength The length of the request. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param blobHTTPHeaders Additional parameters for the operation. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable uploadAsync(Context context, @NonNull Flowable body, @NonNull long contentLength, Integer timeout, Map metadata, String requestId, BlobHTTPHeaders blobHTTPHeaders, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + return uploadWithRestResponseAsync(context, body, contentLength, timeout, metadata, requestId, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions) + .toCompletable(); + } + + /** + * The Stage Block operation creates a new block to be committed as part of a blob. + * + * @param context The context to associate with this operation. + * @param blockId A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the same size for each block. + * @param contentLength The length of the request. + * @param body Initial data. + * @param transactionalContentMD5 Specify the transactional md5 for the body, to be validated by the service. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void stageBlock(Context context, @NonNull String blockId, @NonNull long contentLength, @NonNull Flowable body, byte[] transactionalContentMD5, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions) { + stageBlockAsync(context, blockId, contentLength, body, transactionalContentMD5, timeout, requestId, leaseAccessConditions).blockingAwait(); + } + + /** + * The Stage Block operation creates a new block to be committed as part of a blob. + * + * @param context The context to associate with this operation. + * @param blockId A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the same size for each block. + * @param contentLength The length of the request. + * @param body Initial data. + * @param transactionalContentMD5 Specify the transactional md5 for the body, to be validated by the service. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture stageBlockAsync(Context context, @NonNull String blockId, @NonNull long contentLength, @NonNull Flowable body, byte[] transactionalContentMD5, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(stageBlockAsync(context, blockId, contentLength, body, transactionalContentMD5, timeout, requestId, leaseAccessConditions), serviceCallback); + } + + /** + * The Stage Block operation creates a new block to be committed as part of a blob. + * + * @param context The context to associate with this operation. + * @param blockId A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the same size for each block. + * @param contentLength The length of the request. + * @param body Initial data. + * @param transactionalContentMD5 Specify the transactional md5 for the body, to be validated by the service. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single stageBlockWithRestResponseAsync(Context context, @NonNull String blockId, @NonNull long contentLength, @NonNull Flowable body, byte[] transactionalContentMD5, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (blockId == null) { + throw new IllegalArgumentException("Parameter blockId is required and cannot be null."); + } + if (body == null) { + throw new IllegalArgumentException("Parameter body is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(leaseAccessConditions); + final String comp = "block"; + String leaseId = null; + if (leaseAccessConditions != null) { + leaseId = leaseAccessConditions.leaseId(); + } + String transactionalContentMD5Converted = Base64Util.encodeToString(transactionalContentMD5); + return service.stageBlock(context, this.client.url(), blockId, contentLength, transactionalContentMD5Converted, body, timeout, this.client.version(), requestId, comp, leaseId); + } + + /** + * The Stage Block operation creates a new block to be committed as part of a blob. + * + * @param context The context to associate with this operation. + * @param blockId A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the same size for each block. + * @param contentLength The length of the request. + * @param body Initial data. + * @param transactionalContentMD5 Specify the transactional md5 for the body, to be validated by the service. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable stageBlockAsync(Context context, @NonNull String blockId, @NonNull long contentLength, @NonNull Flowable body, byte[] transactionalContentMD5, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions) { + return stageBlockWithRestResponseAsync(context, blockId, contentLength, body, transactionalContentMD5, timeout, requestId, leaseAccessConditions) + .toCompletable(); + } + + /** + * The Stage Block operation creates a new block to be committed as part of a blob where the contents are read from a URL. + * + * @param context The context to associate with this operation. + * @param blockId A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the same size for each block. + * @param contentLength The length of the request. + * @param sourceUrl Specify a URL to the copy source. + * @param sourceRange Bytes of source data in the specified range. + * @param sourceContentMD5 Specify the md5 calculated for the range of bytes that must be read from the copy source. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void stageBlockFromURL(Context context, @NonNull String blockId, @NonNull long contentLength, @NonNull URL sourceUrl, String sourceRange, byte[] sourceContentMD5, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions) { + stageBlockFromURLAsync(context, blockId, contentLength, sourceUrl, sourceRange, sourceContentMD5, timeout, requestId, leaseAccessConditions).blockingAwait(); + } + + /** + * The Stage Block operation creates a new block to be committed as part of a blob where the contents are read from a URL. + * + * @param context The context to associate with this operation. + * @param blockId A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the same size for each block. + * @param contentLength The length of the request. + * @param sourceUrl Specify a URL to the copy source. + * @param sourceRange Bytes of source data in the specified range. + * @param sourceContentMD5 Specify the md5 calculated for the range of bytes that must be read from the copy source. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture stageBlockFromURLAsync(Context context, @NonNull String blockId, @NonNull long contentLength, @NonNull URL sourceUrl, String sourceRange, byte[] sourceContentMD5, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(stageBlockFromURLAsync(context, blockId, contentLength, sourceUrl, sourceRange, sourceContentMD5, timeout, requestId, leaseAccessConditions), serviceCallback); + } + + /** + * The Stage Block operation creates a new block to be committed as part of a blob where the contents are read from a URL. + * + * @param context The context to associate with this operation. + * @param blockId A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the same size for each block. + * @param contentLength The length of the request. + * @param sourceUrl Specify a URL to the copy source. + * @param sourceRange Bytes of source data in the specified range. + * @param sourceContentMD5 Specify the md5 calculated for the range of bytes that must be read from the copy source. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single stageBlockFromURLWithRestResponseAsync(Context context, @NonNull String blockId, @NonNull long contentLength, @NonNull URL sourceUrl, String sourceRange, byte[] sourceContentMD5, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (blockId == null) { + throw new IllegalArgumentException("Parameter blockId is required and cannot be null."); + } + if (sourceUrl == null) { + throw new IllegalArgumentException("Parameter sourceUrl is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(sourceUrl); + Validator.validate(leaseAccessConditions); + final String comp = "block"; + String leaseId = null; + if (leaseAccessConditions != null) { + leaseId = leaseAccessConditions.leaseId(); + } + String sourceContentMD5Converted = Base64Util.encodeToString(sourceContentMD5); + return service.stageBlockFromURL(context, this.client.url(), blockId, contentLength, sourceUrl, sourceRange, sourceContentMD5Converted, timeout, this.client.version(), requestId, comp, leaseId); + } + + /** + * The Stage Block operation creates a new block to be committed as part of a blob where the contents are read from a URL. + * + * @param context The context to associate with this operation. + * @param blockId A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the same size for each block. + * @param contentLength The length of the request. + * @param sourceUrl Specify a URL to the copy source. + * @param sourceRange Bytes of source data in the specified range. + * @param sourceContentMD5 Specify the md5 calculated for the range of bytes that must be read from the copy source. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable stageBlockFromURLAsync(Context context, @NonNull String blockId, @NonNull long contentLength, @NonNull URL sourceUrl, String sourceRange, byte[] sourceContentMD5, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions) { + return stageBlockFromURLWithRestResponseAsync(context, blockId, contentLength, sourceUrl, sourceRange, sourceContentMD5, timeout, requestId, leaseAccessConditions) + .toCompletable(); + } + + /** + * The Commit Block List operation writes a blob by specifying the list of block IDs that make up the blob. In order to be written as part of a blob, a block must have been successfully written to the server in a prior Put Block operation. You can call Put Block List to update a blob by uploading only those blocks that have changed, then committing the new and existing blocks together. You can do this by specifying whether to commit a block from the committed block list or from the uncommitted block list, or to commit the most recently uploaded version of the block, whichever list it may belong to. + * + * @param context The context to associate with this operation. + * @param blocks the BlockLookupList value. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param blobHTTPHeaders Additional parameters for the operation. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void commitBlockList(Context context, @NonNull BlockLookupList blocks, Integer timeout, Map metadata, String requestId, BlobHTTPHeaders blobHTTPHeaders, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + commitBlockListAsync(context, blocks, timeout, metadata, requestId, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions).blockingAwait(); + } + + /** + * The Commit Block List operation writes a blob by specifying the list of block IDs that make up the blob. In order to be written as part of a blob, a block must have been successfully written to the server in a prior Put Block operation. You can call Put Block List to update a blob by uploading only those blocks that have changed, then committing the new and existing blocks together. You can do this by specifying whether to commit a block from the committed block list or from the uncommitted block list, or to commit the most recently uploaded version of the block, whichever list it may belong to. + * + * @param context The context to associate with this operation. + * @param blocks the BlockLookupList value. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param blobHTTPHeaders Additional parameters for the operation. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture commitBlockListAsync(Context context, @NonNull BlockLookupList blocks, Integer timeout, Map metadata, String requestId, BlobHTTPHeaders blobHTTPHeaders, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(commitBlockListAsync(context, blocks, timeout, metadata, requestId, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions), serviceCallback); + } + + /** + * The Commit Block List operation writes a blob by specifying the list of block IDs that make up the blob. In order to be written as part of a blob, a block must have been successfully written to the server in a prior Put Block operation. You can call Put Block List to update a blob by uploading only those blocks that have changed, then committing the new and existing blocks together. You can do this by specifying whether to commit a block from the committed block list or from the uncommitted block list, or to commit the most recently uploaded version of the block, whichever list it may belong to. + * + * @param context The context to associate with this operation. + * @param blocks the BlockLookupList value. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param blobHTTPHeaders Additional parameters for the operation. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single commitBlockListWithRestResponseAsync(Context context, @NonNull BlockLookupList blocks, Integer timeout, Map metadata, String requestId, BlobHTTPHeaders blobHTTPHeaders, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (blocks == null) { + throw new IllegalArgumentException("Parameter blocks is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(metadata); + Validator.validate(blocks); + Validator.validate(blobHTTPHeaders); + Validator.validate(leaseAccessConditions); + Validator.validate(modifiedAccessConditions); + final String comp = "blocklist"; + String blobCacheControl = null; + if (blobHTTPHeaders != null) { + blobCacheControl = blobHTTPHeaders.blobCacheControl(); + } + String blobContentType = null; + if (blobHTTPHeaders != null) { + blobContentType = blobHTTPHeaders.blobContentType(); + } + String blobContentEncoding = null; + if (blobHTTPHeaders != null) { + blobContentEncoding = blobHTTPHeaders.blobContentEncoding(); + } + String blobContentLanguage = null; + if (blobHTTPHeaders != null) { + blobContentLanguage = blobHTTPHeaders.blobContentLanguage(); + } + byte[] blobContentMD5 = null; + if (blobHTTPHeaders != null) { + blobContentMD5 = blobHTTPHeaders.blobContentMD5(); + } + String blobContentDisposition = null; + if (blobHTTPHeaders != null) { + blobContentDisposition = blobHTTPHeaders.blobContentDisposition(); + } + String leaseId = null; + if (leaseAccessConditions != null) { + leaseId = leaseAccessConditions.leaseId(); + } + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + OffsetDateTime ifUnmodifiedSince = null; + if (modifiedAccessConditions != null) { + ifUnmodifiedSince = modifiedAccessConditions.ifUnmodifiedSince(); + } + String ifMatch = null; + if (modifiedAccessConditions != null) { + ifMatch = modifiedAccessConditions.ifMatch(); + } + String ifNoneMatch = null; + if (modifiedAccessConditions != null) { + ifNoneMatch = modifiedAccessConditions.ifNoneMatch(); + } + String blobContentMD5Converted = Base64Util.encodeToString(blobContentMD5); + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + DateTimeRfc1123 ifUnmodifiedSinceConverted = null; + if (ifUnmodifiedSince != null) { + ifUnmodifiedSinceConverted = new DateTimeRfc1123(ifUnmodifiedSince); + } + return service.commitBlockList(context, this.client.url(), timeout, metadata, blocks, this.client.version(), requestId, comp, blobCacheControl, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5Converted, blobContentDisposition, leaseId, ifModifiedSinceConverted, ifUnmodifiedSinceConverted, ifMatch, ifNoneMatch); + } + + /** + * The Commit Block List operation writes a blob by specifying the list of block IDs that make up the blob. In order to be written as part of a blob, a block must have been successfully written to the server in a prior Put Block operation. You can call Put Block List to update a blob by uploading only those blocks that have changed, then committing the new and existing blocks together. You can do this by specifying whether to commit a block from the committed block list or from the uncommitted block list, or to commit the most recently uploaded version of the block, whichever list it may belong to. + * + * @param context The context to associate with this operation. + * @param blocks the BlockLookupList value. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param blobHTTPHeaders Additional parameters for the operation. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable commitBlockListAsync(Context context, @NonNull BlockLookupList blocks, Integer timeout, Map metadata, String requestId, BlobHTTPHeaders blobHTTPHeaders, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + return commitBlockListWithRestResponseAsync(context, blocks, timeout, metadata, requestId, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions) + .toCompletable(); + } + + /** + * The Get Block List operation retrieves the list of blocks that have been uploaded as part of a block blob. + * + * @param context The context to associate with this operation. + * @param listType Specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists together. Possible values include: 'committed', 'uncommitted', 'all'. + * @param snapshot The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob.</a>. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the BlockList object if successful. + */ + public BlockList getBlockList(Context context, @NonNull BlockListType listType, String snapshot, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions) { + return getBlockListAsync(context, listType, snapshot, timeout, requestId, leaseAccessConditions).blockingGet(); + } + + /** + * The Get Block List operation retrieves the list of blocks that have been uploaded as part of a block blob. + * + * @param context The context to associate with this operation. + * @param listType Specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists together. Possible values include: 'committed', 'uncommitted', 'all'. + * @param snapshot The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob.</a>. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture getBlockListAsync(Context context, @NonNull BlockListType listType, String snapshot, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(getBlockListAsync(context, listType, snapshot, timeout, requestId, leaseAccessConditions), serviceCallback); + } + + /** + * The Get Block List operation retrieves the list of blocks that have been uploaded as part of a block blob. + * + * @param context The context to associate with this operation. + * @param listType Specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists together. Possible values include: 'committed', 'uncommitted', 'all'. + * @param snapshot The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob.</a>. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single getBlockListWithRestResponseAsync(Context context, @NonNull BlockListType listType, String snapshot, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (listType == null) { + throw new IllegalArgumentException("Parameter listType is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(leaseAccessConditions); + final String comp = "blocklist"; + String leaseId = null; + if (leaseAccessConditions != null) { + leaseId = leaseAccessConditions.leaseId(); + } + return service.getBlockList(context, this.client.url(), snapshot, listType, timeout, this.client.version(), requestId, comp, leaseId); + } + + /** + * The Get Block List operation retrieves the list of blocks that have been uploaded as part of a block blob. + * + * @param context The context to associate with this operation. + * @param listType Specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists together. Possible values include: 'committed', 'uncommitted', 'all'. + * @param snapshot The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob.</a>. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Maybe getBlockListAsync(Context context, @NonNull BlockListType listType, String snapshot, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions) { + return getBlockListWithRestResponseAsync(context, listType, snapshot, timeout, requestId, leaseAccessConditions) + .flatMapMaybe((BlockBlobGetBlockListResponse res) -> res.body() == null ? Maybe.empty() : Maybe.just(res.body())); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/GeneratedContainers.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/GeneratedContainers.java new file mode 100644 index 0000000000000..132cee3f080f2 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/GeneratedContainers.java @@ -0,0 +1,1355 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage; + +import com.microsoft.azure.storage.blob.implementation.SignedIdentifiersWrapper; +import com.microsoft.azure.storage.blob.models.ContainerAcquireLeaseResponse; +import com.microsoft.azure.storage.blob.models.ContainerBreakLeaseResponse; +import com.microsoft.azure.storage.blob.models.ContainerChangeLeaseResponse; +import com.microsoft.azure.storage.blob.models.ContainerCreateResponse; +import com.microsoft.azure.storage.blob.models.ContainerDeleteResponse; +import com.microsoft.azure.storage.blob.models.ContainerGetAccessPolicyResponse; +import com.microsoft.azure.storage.blob.models.ContainerGetAccountInfoResponse; +import com.microsoft.azure.storage.blob.models.ContainerGetPropertiesResponse; +import com.microsoft.azure.storage.blob.models.ContainerListBlobFlatSegmentResponse; +import com.microsoft.azure.storage.blob.models.ContainerListBlobHierarchySegmentResponse; +import com.microsoft.azure.storage.blob.models.ContainerReleaseLeaseResponse; +import com.microsoft.azure.storage.blob.models.ContainerRenewLeaseResponse; +import com.microsoft.azure.storage.blob.models.ContainerSetAccessPolicyResponse; +import com.microsoft.azure.storage.blob.models.ContainerSetMetadataResponse; +import com.microsoft.azure.storage.blob.models.LeaseAccessConditions; +import com.microsoft.azure.storage.blob.models.ListBlobsFlatSegmentResponse; +import com.microsoft.azure.storage.blob.models.ListBlobsHierarchySegmentResponse; +import com.microsoft.azure.storage.blob.models.ListBlobsIncludeItem; +import com.microsoft.azure.storage.blob.models.ModifiedAccessConditions; +import com.microsoft.azure.storage.blob.models.PublicAccessType; +import com.microsoft.azure.storage.blob.models.SignedIdentifier; +import com.microsoft.azure.storage.blob.models.StorageErrorException; +import com.microsoft.rest.v2.CollectionFormat; +import com.microsoft.rest.v2.Context; +import com.microsoft.rest.v2.DateTimeRfc1123; +import com.microsoft.rest.v2.RestProxy; +import com.microsoft.rest.v2.ServiceCallback; +import com.microsoft.rest.v2.ServiceFuture; +import com.microsoft.rest.v2.Validator; +import com.microsoft.rest.v2.annotations.BodyParam; +import com.microsoft.rest.v2.annotations.DELETE; +import com.microsoft.rest.v2.annotations.ExpectedResponses; +import com.microsoft.rest.v2.annotations.GET; +import com.microsoft.rest.v2.annotations.HeaderParam; +import com.microsoft.rest.v2.annotations.Host; +import com.microsoft.rest.v2.annotations.HostParam; +import com.microsoft.rest.v2.annotations.PUT; +import com.microsoft.rest.v2.annotations.QueryParam; +import com.microsoft.rest.v2.annotations.UnexpectedResponseExceptionType; +import io.reactivex.Completable; +import io.reactivex.Maybe; +import io.reactivex.Single; +import io.reactivex.annotations.NonNull; + +import java.time.OffsetDateTime; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * An instance of this class provides access to all the operations defined in + * GeneratedContainers. + */ +public final class GeneratedContainers { + /** + * The proxy service used to perform REST calls. + */ + private ContainersService service; + + /** + * The service client containing this operation class. + */ + private GeneratedStorageClient client; + + /** + * Initializes an instance of GeneratedContainers. + * + * @param client the instance of the service client containing this operation class. + */ + public GeneratedContainers(GeneratedStorageClient client) { + this.service = RestProxy.create(ContainersService.class, client); + this.client = client; + } + + /** + * The interface defining all the services for GeneratedContainers to be + * used by the proxy service to perform REST calls. + */ + @Host("{url}") + private interface ContainersService { + @PUT("{containerName}") + @ExpectedResponses({201}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single create(Context context, @HostParam("url") String url, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-meta-") Map metadata, @HeaderParam("x-ms-blob-public-access") PublicAccessType access, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("restype") String restype); + + @GET("{containerName}") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single getProperties(Context context, @HostParam("url") String url, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("restype") String restype, @HeaderParam("x-ms-lease-id") String leaseId); + + @DELETE("{containerName}") + @ExpectedResponses({202}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single delete(Context context, @HostParam("url") String url, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("restype") String restype, @HeaderParam("x-ms-lease-id") String leaseId, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince); + + @PUT("{containerName}") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single setMetadata(Context context, @HostParam("url") String url, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-meta-") Map metadata, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("restype") String restype, @QueryParam("comp") String comp, @HeaderParam("x-ms-lease-id") String leaseId, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince); + + @GET("{containerName}") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single getAccessPolicy(Context context, @HostParam("url") String url, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("restype") String restype, @QueryParam("comp") String comp, @HeaderParam("x-ms-lease-id") String leaseId); + + @PUT("{containerName}") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single setAccessPolicy(Context context, @HostParam("url") String url, @BodyParam("application/xml; charset=utf-8") SignedIdentifiersWrapper containerAcl, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-blob-public-access") PublicAccessType access, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("restype") String restype, @QueryParam("comp") String comp, @HeaderParam("x-ms-lease-id") String leaseId, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince); + + @PUT("{containerName}") + @ExpectedResponses({201}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single acquireLease(Context context, @HostParam("url") String url, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-lease-duration") Integer duration, @HeaderParam("x-ms-proposed-lease-id") String proposedLeaseId, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("comp") String comp, @QueryParam("restype") String restype, @HeaderParam("x-ms-lease-action") String action, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince); + + @PUT("{containerName}") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single releaseLease(Context context, @HostParam("url") String url, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-lease-id") String leaseId, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("comp") String comp, @QueryParam("restype") String restype, @HeaderParam("x-ms-lease-action") String action, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince); + + @PUT("{containerName}") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single renewLease(Context context, @HostParam("url") String url, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-lease-id") String leaseId, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("comp") String comp, @QueryParam("restype") String restype, @HeaderParam("x-ms-lease-action") String action, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince); + + @PUT("{containerName}") + @ExpectedResponses({202}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single breakLease(Context context, @HostParam("url") String url, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-lease-break-period") Integer breakPeriod, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("comp") String comp, @QueryParam("restype") String restype, @HeaderParam("x-ms-lease-action") String action, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince); + + @PUT("{containerName}") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single changeLease(Context context, @HostParam("url") String url, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-lease-id") String leaseId, @HeaderParam("x-ms-proposed-lease-id") String proposedLeaseId, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("comp") String comp, @QueryParam("restype") String restype, @HeaderParam("x-ms-lease-action") String action, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince); + + @GET("{containerName}") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single listBlobFlatSegment(Context context, @HostParam("url") String url, @QueryParam("prefix") String prefix, @QueryParam("marker") String marker, @QueryParam("maxresults") Integer maxresults, @QueryParam("include") String include, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("restype") String restype, @QueryParam("comp") String comp); + + @GET("{containerName}") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single listBlobHierarchySegment(Context context, @HostParam("url") String url, @QueryParam("prefix") String prefix, @QueryParam("delimiter") String delimiter, @QueryParam("marker") String marker, @QueryParam("maxresults") Integer maxresults, @QueryParam("include") String include, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("restype") String restype, @QueryParam("comp") String comp); + + @GET("{containerName}") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single getAccountInfo(Context context, @HostParam("url") String url, @HeaderParam("x-ms-version") String version, @QueryParam("restype") String restype, @QueryParam("comp") String comp); + } + + /** + * creates a new container under the specified account. If the container with the same name already exists, the operation fails. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param access Specifies whether data in the container may be accessed publicly and the level of access. Possible values include: 'container', 'blob'. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void create(Context context, Integer timeout, Map metadata, PublicAccessType access, String requestId) { + createAsync(context, timeout, metadata, access, requestId).blockingAwait(); + } + + /** + * creates a new container under the specified account. If the container with the same name already exists, the operation fails. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param access Specifies whether data in the container may be accessed publicly and the level of access. Possible values include: 'container', 'blob'. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture createAsync(Context context, Integer timeout, Map metadata, PublicAccessType access, String requestId, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(createAsync(context, timeout, metadata, access, requestId), serviceCallback); + } + + /** + * creates a new container under the specified account. If the container with the same name already exists, the operation fails. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param access Specifies whether data in the container may be accessed publicly and the level of access. Possible values include: 'container', 'blob'. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single createWithRestResponseAsync(Context context, Integer timeout, Map metadata, PublicAccessType access, String requestId) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(metadata); + final String restype = "container"; + return service.create(context, this.client.url(), timeout, metadata, access, this.client.version(), requestId, restype); + } + + /** + * creates a new container under the specified account. If the container with the same name already exists, the operation fails. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param access Specifies whether data in the container may be accessed publicly and the level of access. Possible values include: 'container', 'blob'. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable createAsync(Context context, Integer timeout, Map metadata, PublicAccessType access, String requestId) { + return createWithRestResponseAsync(context, timeout, metadata, access, requestId) + .toCompletable(); + } + + /** + * returns all user-defined metadata and system properties for the specified container. The data returned does not include the container's list of blobs. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void getProperties(Context context, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions) { + getPropertiesAsync(context, timeout, requestId, leaseAccessConditions).blockingAwait(); + } + + /** + * returns all user-defined metadata and system properties for the specified container. The data returned does not include the container's list of blobs. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture getPropertiesAsync(Context context, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(getPropertiesAsync(context, timeout, requestId, leaseAccessConditions), serviceCallback); + } + + /** + * returns all user-defined metadata and system properties for the specified container. The data returned does not include the container's list of blobs. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single getPropertiesWithRestResponseAsync(Context context, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(leaseAccessConditions); + final String restype = "container"; + String leaseId = null; + if (leaseAccessConditions != null) { + leaseId = leaseAccessConditions.leaseId(); + } + return service.getProperties(context, this.client.url(), timeout, this.client.version(), requestId, restype, leaseId); + } + + /** + * returns all user-defined metadata and system properties for the specified container. The data returned does not include the container's list of blobs. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable getPropertiesAsync(Context context, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions) { + return getPropertiesWithRestResponseAsync(context, timeout, requestId, leaseAccessConditions) + .toCompletable(); + } + + /** + * operation marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void delete(Context context, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + deleteAsync(context, timeout, requestId, leaseAccessConditions, modifiedAccessConditions).blockingAwait(); + } + + /** + * operation marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture deleteAsync(Context context, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(deleteAsync(context, timeout, requestId, leaseAccessConditions, modifiedAccessConditions), serviceCallback); + } + + /** + * operation marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single deleteWithRestResponseAsync(Context context, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(leaseAccessConditions); + Validator.validate(modifiedAccessConditions); + final String restype = "container"; + String leaseId = null; + if (leaseAccessConditions != null) { + leaseId = leaseAccessConditions.leaseId(); + } + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + OffsetDateTime ifUnmodifiedSince = null; + if (modifiedAccessConditions != null) { + ifUnmodifiedSince = modifiedAccessConditions.ifUnmodifiedSince(); + } + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + DateTimeRfc1123 ifUnmodifiedSinceConverted = null; + if (ifUnmodifiedSince != null) { + ifUnmodifiedSinceConverted = new DateTimeRfc1123(ifUnmodifiedSince); + } + return service.delete(context, this.client.url(), timeout, this.client.version(), requestId, restype, leaseId, ifModifiedSinceConverted, ifUnmodifiedSinceConverted); + } + + /** + * operation marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable deleteAsync(Context context, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + return deleteWithRestResponseAsync(context, timeout, requestId, leaseAccessConditions, modifiedAccessConditions) + .toCompletable(); + } + + /** + * operation sets one or more user-defined name-value pairs for the specified container. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void setMetadata(Context context, Integer timeout, Map metadata, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + setMetadataAsync(context, timeout, metadata, requestId, leaseAccessConditions, modifiedAccessConditions).blockingAwait(); + } + + /** + * operation sets one or more user-defined name-value pairs for the specified container. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture setMetadataAsync(Context context, Integer timeout, Map metadata, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(setMetadataAsync(context, timeout, metadata, requestId, leaseAccessConditions, modifiedAccessConditions), serviceCallback); + } + + /** + * operation sets one or more user-defined name-value pairs for the specified container. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single setMetadataWithRestResponseAsync(Context context, Integer timeout, Map metadata, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(metadata); + Validator.validate(leaseAccessConditions); + Validator.validate(modifiedAccessConditions); + final String restype = "container"; + final String comp = "metadata"; + String leaseId = null; + if (leaseAccessConditions != null) { + leaseId = leaseAccessConditions.leaseId(); + } + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + return service.setMetadata(context, this.client.url(), timeout, metadata, this.client.version(), requestId, restype, comp, leaseId, ifModifiedSinceConverted); + } + + /** + * operation sets one or more user-defined name-value pairs for the specified container. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable setMetadataAsync(Context context, Integer timeout, Map metadata, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + return setMetadataWithRestResponseAsync(context, timeout, metadata, requestId, leaseAccessConditions, modifiedAccessConditions) + .toCompletable(); + } + + /** + * gets the permissions for the specified container. The permissions indicate whether container data may be accessed publicly. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the List<SignedIdentifier> object if successful. + */ + public List getAccessPolicy(Context context, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions) { + return getAccessPolicyAsync(context, timeout, requestId, leaseAccessConditions).blockingGet(); + } + + /** + * gets the permissions for the specified container. The permissions indicate whether container data may be accessed publicly. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture> getAccessPolicyAsync(Context context, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions, ServiceCallback> serviceCallback) { + return ServiceFuture.fromBody(getAccessPolicyAsync(context, timeout, requestId, leaseAccessConditions), serviceCallback); + } + + /** + * gets the permissions for the specified container. The permissions indicate whether container data may be accessed publicly. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single getAccessPolicyWithRestResponseAsync(Context context, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(leaseAccessConditions); + final String restype = "container"; + final String comp = "acl"; + String leaseId = null; + if (leaseAccessConditions != null) { + leaseId = leaseAccessConditions.leaseId(); + } + return service.getAccessPolicy(context, this.client.url(), timeout, this.client.version(), requestId, restype, comp, leaseId); + } + + /** + * gets the permissions for the specified container. The permissions indicate whether container data may be accessed publicly. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Maybe> getAccessPolicyAsync(Context context, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions) { + return getAccessPolicyWithRestResponseAsync(context, timeout, requestId, leaseAccessConditions) + .flatMapMaybe((ContainerGetAccessPolicyResponse res) -> res.body() == null ? Maybe.empty() : Maybe.just(res.body())); + } + + /** + * sets the permissions for the specified container. The permissions indicate whether blobs in a container may be accessed publicly. + * + * @param context The context to associate with this operation. + * @param containerAcl the acls for the container. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param access Specifies whether data in the container may be accessed publicly and the level of access. Possible values include: 'container', 'blob'. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void setAccessPolicy(Context context, List containerAcl, Integer timeout, PublicAccessType access, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + setAccessPolicyAsync(context, containerAcl, timeout, access, requestId, leaseAccessConditions, modifiedAccessConditions).blockingAwait(); + } + + /** + * sets the permissions for the specified container. The permissions indicate whether blobs in a container may be accessed publicly. + * + * @param context The context to associate with this operation. + * @param containerAcl the acls for the container. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param access Specifies whether data in the container may be accessed publicly and the level of access. Possible values include: 'container', 'blob'. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture setAccessPolicyAsync(Context context, List containerAcl, Integer timeout, PublicAccessType access, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(setAccessPolicyAsync(context, containerAcl, timeout, access, requestId, leaseAccessConditions, modifiedAccessConditions), serviceCallback); + } + + /** + * sets the permissions for the specified container. The permissions indicate whether blobs in a container may be accessed publicly. + * + * @param context The context to associate with this operation. + * @param containerAcl the acls for the container. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param access Specifies whether data in the container may be accessed publicly and the level of access. Possible values include: 'container', 'blob'. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single setAccessPolicyWithRestResponseAsync(Context context, List containerAcl, Integer timeout, PublicAccessType access, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(containerAcl); + Validator.validate(leaseAccessConditions); + Validator.validate(modifiedAccessConditions); + final String restype = "container"; + final String comp = "acl"; + String leaseId = null; + if (leaseAccessConditions != null) { + leaseId = leaseAccessConditions.leaseId(); + } + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + OffsetDateTime ifUnmodifiedSince = null; + if (modifiedAccessConditions != null) { + ifUnmodifiedSince = modifiedAccessConditions.ifUnmodifiedSince(); + } + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + DateTimeRfc1123 ifUnmodifiedSinceConverted = null; + if (ifUnmodifiedSince != null) { + ifUnmodifiedSinceConverted = new DateTimeRfc1123(ifUnmodifiedSince); + } + return service.setAccessPolicy(context, this.client.url(), new SignedIdentifiersWrapper(containerAcl), timeout, access, this.client.version(), requestId, restype, comp, leaseId, ifModifiedSinceConverted, ifUnmodifiedSinceConverted); + } + + /** + * sets the permissions for the specified container. The permissions indicate whether blobs in a container may be accessed publicly. + * + * @param context The context to associate with this operation. + * @param containerAcl the acls for the container. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param access Specifies whether data in the container may be accessed publicly and the level of access. Possible values include: 'container', 'blob'. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable setAccessPolicyAsync(Context context, List containerAcl, Integer timeout, PublicAccessType access, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + return setAccessPolicyWithRestResponseAsync(context, containerAcl, timeout, access, requestId, leaseAccessConditions, modifiedAccessConditions) + .toCompletable(); + } + + /** + * [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param duration Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease duration cannot be changed using renew or change. + * @param proposedLeaseId Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID string formats. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void acquireLease(Context context, Integer timeout, Integer duration, String proposedLeaseId, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + acquireLeaseAsync(context, timeout, duration, proposedLeaseId, requestId, modifiedAccessConditions).blockingAwait(); + } + + /** + * [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param duration Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease duration cannot be changed using renew or change. + * @param proposedLeaseId Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID string formats. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture acquireLeaseAsync(Context context, Integer timeout, Integer duration, String proposedLeaseId, String requestId, ModifiedAccessConditions modifiedAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(acquireLeaseAsync(context, timeout, duration, proposedLeaseId, requestId, modifiedAccessConditions), serviceCallback); + } + + /** + * [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param duration Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease duration cannot be changed using renew or change. + * @param proposedLeaseId Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID string formats. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single acquireLeaseWithRestResponseAsync(Context context, Integer timeout, Integer duration, String proposedLeaseId, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(modifiedAccessConditions); + final String comp = "lease"; + final String restype = "container"; + final String action = "acquire"; + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + OffsetDateTime ifUnmodifiedSince = null; + if (modifiedAccessConditions != null) { + ifUnmodifiedSince = modifiedAccessConditions.ifUnmodifiedSince(); + } + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + DateTimeRfc1123 ifUnmodifiedSinceConverted = null; + if (ifUnmodifiedSince != null) { + ifUnmodifiedSinceConverted = new DateTimeRfc1123(ifUnmodifiedSince); + } + return service.acquireLease(context, this.client.url(), timeout, duration, proposedLeaseId, this.client.version(), requestId, comp, restype, action, ifModifiedSinceConverted, ifUnmodifiedSinceConverted); + } + + /** + * [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param duration Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease duration cannot be changed using renew or change. + * @param proposedLeaseId Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID string formats. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable acquireLeaseAsync(Context context, Integer timeout, Integer duration, String proposedLeaseId, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + return acquireLeaseWithRestResponseAsync(context, timeout, duration, proposedLeaseId, requestId, modifiedAccessConditions) + .toCompletable(); + } + + /** + * [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite. + * + * @param context The context to associate with this operation. + * @param leaseId Specifies the current lease ID on the resource. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void releaseLease(Context context, @NonNull String leaseId, Integer timeout, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + releaseLeaseAsync(context, leaseId, timeout, requestId, modifiedAccessConditions).blockingAwait(); + } + + /** + * [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite. + * + * @param context The context to associate with this operation. + * @param leaseId Specifies the current lease ID on the resource. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture releaseLeaseAsync(Context context, @NonNull String leaseId, Integer timeout, String requestId, ModifiedAccessConditions modifiedAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(releaseLeaseAsync(context, leaseId, timeout, requestId, modifiedAccessConditions), serviceCallback); + } + + /** + * [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite. + * + * @param context The context to associate with this operation. + * @param leaseId Specifies the current lease ID on the resource. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single releaseLeaseWithRestResponseAsync(Context context, @NonNull String leaseId, Integer timeout, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (leaseId == null) { + throw new IllegalArgumentException("Parameter leaseId is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(modifiedAccessConditions); + final String comp = "lease"; + final String restype = "container"; + final String action = "release"; + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + OffsetDateTime ifUnmodifiedSince = null; + if (modifiedAccessConditions != null) { + ifUnmodifiedSince = modifiedAccessConditions.ifUnmodifiedSince(); + } + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + DateTimeRfc1123 ifUnmodifiedSinceConverted = null; + if (ifUnmodifiedSince != null) { + ifUnmodifiedSinceConverted = new DateTimeRfc1123(ifUnmodifiedSince); + } + return service.releaseLease(context, this.client.url(), timeout, leaseId, this.client.version(), requestId, comp, restype, action, ifModifiedSinceConverted, ifUnmodifiedSinceConverted); + } + + /** + * [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite. + * + * @param context The context to associate with this operation. + * @param leaseId Specifies the current lease ID on the resource. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable releaseLeaseAsync(Context context, @NonNull String leaseId, Integer timeout, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + return releaseLeaseWithRestResponseAsync(context, leaseId, timeout, requestId, modifiedAccessConditions) + .toCompletable(); + } + + /** + * [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite. + * + * @param context The context to associate with this operation. + * @param leaseId Specifies the current lease ID on the resource. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void renewLease(Context context, @NonNull String leaseId, Integer timeout, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + renewLeaseAsync(context, leaseId, timeout, requestId, modifiedAccessConditions).blockingAwait(); + } + + /** + * [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite. + * + * @param context The context to associate with this operation. + * @param leaseId Specifies the current lease ID on the resource. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture renewLeaseAsync(Context context, @NonNull String leaseId, Integer timeout, String requestId, ModifiedAccessConditions modifiedAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(renewLeaseAsync(context, leaseId, timeout, requestId, modifiedAccessConditions), serviceCallback); + } + + /** + * [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite. + * + * @param context The context to associate with this operation. + * @param leaseId Specifies the current lease ID on the resource. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single renewLeaseWithRestResponseAsync(Context context, @NonNull String leaseId, Integer timeout, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (leaseId == null) { + throw new IllegalArgumentException("Parameter leaseId is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(modifiedAccessConditions); + final String comp = "lease"; + final String restype = "container"; + final String action = "renew"; + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + OffsetDateTime ifUnmodifiedSince = null; + if (modifiedAccessConditions != null) { + ifUnmodifiedSince = modifiedAccessConditions.ifUnmodifiedSince(); + } + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + DateTimeRfc1123 ifUnmodifiedSinceConverted = null; + if (ifUnmodifiedSince != null) { + ifUnmodifiedSinceConverted = new DateTimeRfc1123(ifUnmodifiedSince); + } + return service.renewLease(context, this.client.url(), timeout, leaseId, this.client.version(), requestId, comp, restype, action, ifModifiedSinceConverted, ifUnmodifiedSinceConverted); + } + + /** + * [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite. + * + * @param context The context to associate with this operation. + * @param leaseId Specifies the current lease ID on the resource. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable renewLeaseAsync(Context context, @NonNull String leaseId, Integer timeout, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + return renewLeaseWithRestResponseAsync(context, leaseId, timeout, requestId, modifiedAccessConditions) + .toCompletable(); + } + + /** + * [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param breakPeriod For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter than the time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has expired, but the lease may be held for longer than the break period. If this header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease breaks immediately. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void breakLease(Context context, Integer timeout, Integer breakPeriod, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + breakLeaseAsync(context, timeout, breakPeriod, requestId, modifiedAccessConditions).blockingAwait(); + } + + /** + * [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param breakPeriod For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter than the time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has expired, but the lease may be held for longer than the break period. If this header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease breaks immediately. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture breakLeaseAsync(Context context, Integer timeout, Integer breakPeriod, String requestId, ModifiedAccessConditions modifiedAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(breakLeaseAsync(context, timeout, breakPeriod, requestId, modifiedAccessConditions), serviceCallback); + } + + /** + * [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param breakPeriod For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter than the time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has expired, but the lease may be held for longer than the break period. If this header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease breaks immediately. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single breakLeaseWithRestResponseAsync(Context context, Integer timeout, Integer breakPeriod, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(modifiedAccessConditions); + final String comp = "lease"; + final String restype = "container"; + final String action = "break"; + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + OffsetDateTime ifUnmodifiedSince = null; + if (modifiedAccessConditions != null) { + ifUnmodifiedSince = modifiedAccessConditions.ifUnmodifiedSince(); + } + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + DateTimeRfc1123 ifUnmodifiedSinceConverted = null; + if (ifUnmodifiedSince != null) { + ifUnmodifiedSinceConverted = new DateTimeRfc1123(ifUnmodifiedSince); + } + return service.breakLease(context, this.client.url(), timeout, breakPeriod, this.client.version(), requestId, comp, restype, action, ifModifiedSinceConverted, ifUnmodifiedSinceConverted); + } + + /** + * [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param breakPeriod For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter than the time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has expired, but the lease may be held for longer than the break period. If this header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease breaks immediately. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable breakLeaseAsync(Context context, Integer timeout, Integer breakPeriod, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + return breakLeaseWithRestResponseAsync(context, timeout, breakPeriod, requestId, modifiedAccessConditions) + .toCompletable(); + } + + /** + * [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite. + * + * @param context The context to associate with this operation. + * @param leaseId Specifies the current lease ID on the resource. + * @param proposedLeaseId Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID string formats. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void changeLease(Context context, @NonNull String leaseId, @NonNull String proposedLeaseId, Integer timeout, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + changeLeaseAsync(context, leaseId, proposedLeaseId, timeout, requestId, modifiedAccessConditions).blockingAwait(); + } + + /** + * [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite. + * + * @param context The context to associate with this operation. + * @param leaseId Specifies the current lease ID on the resource. + * @param proposedLeaseId Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID string formats. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture changeLeaseAsync(Context context, @NonNull String leaseId, @NonNull String proposedLeaseId, Integer timeout, String requestId, ModifiedAccessConditions modifiedAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(changeLeaseAsync(context, leaseId, proposedLeaseId, timeout, requestId, modifiedAccessConditions), serviceCallback); + } + + /** + * [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite. + * + * @param context The context to associate with this operation. + * @param leaseId Specifies the current lease ID on the resource. + * @param proposedLeaseId Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID string formats. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single changeLeaseWithRestResponseAsync(Context context, @NonNull String leaseId, @NonNull String proposedLeaseId, Integer timeout, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (leaseId == null) { + throw new IllegalArgumentException("Parameter leaseId is required and cannot be null."); + } + if (proposedLeaseId == null) { + throw new IllegalArgumentException("Parameter proposedLeaseId is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(modifiedAccessConditions); + final String comp = "lease"; + final String restype = "container"; + final String action = "change"; + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + OffsetDateTime ifUnmodifiedSince = null; + if (modifiedAccessConditions != null) { + ifUnmodifiedSince = modifiedAccessConditions.ifUnmodifiedSince(); + } + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + DateTimeRfc1123 ifUnmodifiedSinceConverted = null; + if (ifUnmodifiedSince != null) { + ifUnmodifiedSinceConverted = new DateTimeRfc1123(ifUnmodifiedSince); + } + return service.changeLease(context, this.client.url(), timeout, leaseId, proposedLeaseId, this.client.version(), requestId, comp, restype, action, ifModifiedSinceConverted, ifUnmodifiedSinceConverted); + } + + /** + * [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite. + * + * @param context The context to associate with this operation. + * @param leaseId Specifies the current lease ID on the resource. + * @param proposedLeaseId Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID string formats. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable changeLeaseAsync(Context context, @NonNull String leaseId, @NonNull String proposedLeaseId, Integer timeout, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + return changeLeaseWithRestResponseAsync(context, leaseId, proposedLeaseId, timeout, requestId, modifiedAccessConditions) + .toCompletable(); + } + + /** + * [Update] The List Blobs operation returns a list of the blobs under the specified container. + * + * @param context The context to associate with this operation. + * @param prefix Filters the results to return only containers whose name begins with the specified prefix. + * @param marker A string value that identifies the portion of the list of containers to be returned with the next listing operation. The operation returns the NextMarker value within the response body if the listing operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the client. + * @param maxresults Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder of the results. For this reason, it is possible that the service will return fewer results than specified by maxresults, or than the default of 5000. + * @param include Include this parameter to specify one or more datasets to include in the response. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the ListBlobsFlatSegmentResponse object if successful. + */ + public ListBlobsFlatSegmentResponse listBlobFlatSegment(Context context, String prefix, String marker, Integer maxresults, List include, Integer timeout, String requestId) { + return listBlobFlatSegmentAsync(context, prefix, marker, maxresults, include, timeout, requestId).blockingGet(); + } + + /** + * [Update] The List Blobs operation returns a list of the blobs under the specified container. + * + * @param context The context to associate with this operation. + * @param prefix Filters the results to return only containers whose name begins with the specified prefix. + * @param marker A string value that identifies the portion of the list of containers to be returned with the next listing operation. The operation returns the NextMarker value within the response body if the listing operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the client. + * @param maxresults Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder of the results. For this reason, it is possible that the service will return fewer results than specified by maxresults, or than the default of 5000. + * @param include Include this parameter to specify one or more datasets to include in the response. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture listBlobFlatSegmentAsync(Context context, String prefix, String marker, Integer maxresults, List include, Integer timeout, String requestId, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(listBlobFlatSegmentAsync(context, prefix, marker, maxresults, include, timeout, requestId), serviceCallback); + } + + /** + * [Update] The List Blobs operation returns a list of the blobs under the specified container. + * + * @param context The context to associate with this operation. + * @param prefix Filters the results to return only containers whose name begins with the specified prefix. + * @param marker A string value that identifies the portion of the list of containers to be returned with the next listing operation. The operation returns the NextMarker value within the response body if the listing operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the client. + * @param maxresults Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder of the results. For this reason, it is possible that the service will return fewer results than specified by maxresults, or than the default of 5000. + * @param include Include this parameter to specify one or more datasets to include in the response. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single listBlobFlatSegmentWithRestResponseAsync(Context context, String prefix, String marker, Integer maxresults, List include, Integer timeout, String requestId) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(include); + final String restype = "container"; + final String comp = "list"; + String includeConverted = this.client.serializerAdapter().serializeList(include, CollectionFormat.CSV); + return service.listBlobFlatSegment(context, this.client.url(), prefix, marker, maxresults, includeConverted, timeout, this.client.version(), requestId, restype, comp); + } + + /** + * [Update] The List Blobs operation returns a list of the blobs under the specified container. + * + * @param context The context to associate with this operation. + * @param prefix Filters the results to return only containers whose name begins with the specified prefix. + * @param marker A string value that identifies the portion of the list of containers to be returned with the next listing operation. The operation returns the NextMarker value within the response body if the listing operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the client. + * @param maxresults Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder of the results. For this reason, it is possible that the service will return fewer results than specified by maxresults, or than the default of 5000. + * @param include Include this parameter to specify one or more datasets to include in the response. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Maybe listBlobFlatSegmentAsync(Context context, String prefix, String marker, Integer maxresults, List include, Integer timeout, String requestId) { + return listBlobFlatSegmentWithRestResponseAsync(context, prefix, marker, maxresults, include, timeout, requestId) + .flatMapMaybe((ContainerListBlobFlatSegmentResponse res) -> res.body() == null ? Maybe.empty() : Maybe.just(res.body())); + } + + /** + * [Update] The List Blobs operation returns a list of the blobs under the specified container. + * + * @param context The context to associate with this operation. + * @param delimiter When the request includes this parameter, the operation returns a BlobPrefix element in the response body that acts as a placeholder for all blobs whose names begin with the same substring up to the appearance of the delimiter character. The delimiter may be a single character or a string. + * @param prefix Filters the results to return only containers whose name begins with the specified prefix. + * @param marker A string value that identifies the portion of the list of containers to be returned with the next listing operation. The operation returns the NextMarker value within the response body if the listing operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the client. + * @param maxresults Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder of the results. For this reason, it is possible that the service will return fewer results than specified by maxresults, or than the default of 5000. + * @param include Include this parameter to specify one or more datasets to include in the response. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the ListBlobsHierarchySegmentResponse object if successful. + */ + public ListBlobsHierarchySegmentResponse listBlobHierarchySegment(Context context, @NonNull String delimiter, String prefix, String marker, Integer maxresults, List include, Integer timeout, String requestId) { + return listBlobHierarchySegmentAsync(context, delimiter, prefix, marker, maxresults, include, timeout, requestId).blockingGet(); + } + + /** + * [Update] The List Blobs operation returns a list of the blobs under the specified container. + * + * @param context The context to associate with this operation. + * @param delimiter When the request includes this parameter, the operation returns a BlobPrefix element in the response body that acts as a placeholder for all blobs whose names begin with the same substring up to the appearance of the delimiter character. The delimiter may be a single character or a string. + * @param prefix Filters the results to return only containers whose name begins with the specified prefix. + * @param marker A string value that identifies the portion of the list of containers to be returned with the next listing operation. The operation returns the NextMarker value within the response body if the listing operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the client. + * @param maxresults Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder of the results. For this reason, it is possible that the service will return fewer results than specified by maxresults, or than the default of 5000. + * @param include Include this parameter to specify one or more datasets to include in the response. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture listBlobHierarchySegmentAsync(Context context, @NonNull String delimiter, String prefix, String marker, Integer maxresults, List include, Integer timeout, String requestId, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(listBlobHierarchySegmentAsync(context, delimiter, prefix, marker, maxresults, include, timeout, requestId), serviceCallback); + } + + /** + * [Update] The List Blobs operation returns a list of the blobs under the specified container. + * + * @param context The context to associate with this operation. + * @param delimiter When the request includes this parameter, the operation returns a BlobPrefix element in the response body that acts as a placeholder for all blobs whose names begin with the same substring up to the appearance of the delimiter character. The delimiter may be a single character or a string. + * @param prefix Filters the results to return only containers whose name begins with the specified prefix. + * @param marker A string value that identifies the portion of the list of containers to be returned with the next listing operation. The operation returns the NextMarker value within the response body if the listing operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the client. + * @param maxresults Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder of the results. For this reason, it is possible that the service will return fewer results than specified by maxresults, or than the default of 5000. + * @param include Include this parameter to specify one or more datasets to include in the response. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single listBlobHierarchySegmentWithRestResponseAsync(Context context, @NonNull String delimiter, String prefix, String marker, Integer maxresults, List include, Integer timeout, String requestId) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (delimiter == null) { + throw new IllegalArgumentException("Parameter delimiter is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(include); + final String restype = "container"; + final String comp = "list"; + String includeConverted = this.client.serializerAdapter().serializeList(include, CollectionFormat.CSV); + return service.listBlobHierarchySegment(context, this.client.url(), prefix, delimiter, marker, maxresults, includeConverted, timeout, this.client.version(), requestId, restype, comp); + } + + /** + * [Update] The List Blobs operation returns a list of the blobs under the specified container. + * + * @param context The context to associate with this operation. + * @param delimiter When the request includes this parameter, the operation returns a BlobPrefix element in the response body that acts as a placeholder for all blobs whose names begin with the same substring up to the appearance of the delimiter character. The delimiter may be a single character or a string. + * @param prefix Filters the results to return only containers whose name begins with the specified prefix. + * @param marker A string value that identifies the portion of the list of containers to be returned with the next listing operation. The operation returns the NextMarker value within the response body if the listing operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the client. + * @param maxresults Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder of the results. For this reason, it is possible that the service will return fewer results than specified by maxresults, or than the default of 5000. + * @param include Include this parameter to specify one or more datasets to include in the response. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Maybe listBlobHierarchySegmentAsync(Context context, @NonNull String delimiter, String prefix, String marker, Integer maxresults, List include, Integer timeout, String requestId) { + return listBlobHierarchySegmentWithRestResponseAsync(context, delimiter, prefix, marker, maxresults, include, timeout, requestId) + .flatMapMaybe((ContainerListBlobHierarchySegmentResponse res) -> res.body() == null ? Maybe.empty() : Maybe.just(res.body())); + } + + /** + * Returns the sku name and account kind. + * + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void getAccountInfo(Context context) { + getAccountInfoAsync(context).blockingAwait(); + } + + /** + * Returns the sku name and account kind. + * + * @param context The context to associate with this operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture getAccountInfoAsync(Context context, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(getAccountInfoAsync(context), serviceCallback); + } + + /** + * Returns the sku name and account kind. + * + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single getAccountInfoWithRestResponseAsync(Context context) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + final String restype = "account"; + final String comp = "properties"; + return service.getAccountInfo(context, this.client.url(), this.client.version(), restype, comp); + } + + /** + * Returns the sku name and account kind. + * + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable getAccountInfoAsync(Context context) { + return getAccountInfoWithRestResponseAsync(context) + .toCompletable(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/GeneratedPageBlobs.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/GeneratedPageBlobs.java new file mode 100644 index 0000000000000..9e7a7594d5364 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/GeneratedPageBlobs.java @@ -0,0 +1,1073 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage; + +import com.microsoft.azure.storage.blob.models.BlobHTTPHeaders; +import com.microsoft.azure.storage.blob.models.LeaseAccessConditions; +import com.microsoft.azure.storage.blob.models.ModifiedAccessConditions; +import com.microsoft.azure.storage.blob.models.PageBlobClearPagesResponse; +import com.microsoft.azure.storage.blob.models.PageBlobCopyIncrementalResponse; +import com.microsoft.azure.storage.blob.models.PageBlobCreateResponse; +import com.microsoft.azure.storage.blob.models.PageBlobGetPageRangesDiffResponse; +import com.microsoft.azure.storage.blob.models.PageBlobGetPageRangesResponse; +import com.microsoft.azure.storage.blob.models.PageBlobResizeResponse; +import com.microsoft.azure.storage.blob.models.PageBlobUpdateSequenceNumberResponse; +import com.microsoft.azure.storage.blob.models.PageBlobUploadPagesResponse; +import com.microsoft.azure.storage.blob.models.PageList; +import com.microsoft.azure.storage.blob.models.SequenceNumberAccessConditions; +import com.microsoft.azure.storage.blob.models.SequenceNumberActionType; +import com.microsoft.azure.storage.blob.models.StorageErrorException; +import com.microsoft.rest.v2.Context; +import com.microsoft.rest.v2.DateTimeRfc1123; +import com.microsoft.rest.v2.RestProxy; +import com.microsoft.rest.v2.ServiceCallback; +import com.microsoft.rest.v2.ServiceFuture; +import com.microsoft.rest.v2.Validator; +import com.microsoft.rest.v2.annotations.BodyParam; +import com.microsoft.rest.v2.annotations.ExpectedResponses; +import com.microsoft.rest.v2.annotations.GET; +import com.microsoft.rest.v2.annotations.HeaderParam; +import com.microsoft.rest.v2.annotations.Host; +import com.microsoft.rest.v2.annotations.HostParam; +import com.microsoft.rest.v2.annotations.PUT; +import com.microsoft.rest.v2.annotations.QueryParam; +import com.microsoft.rest.v2.annotations.UnexpectedResponseExceptionType; +import com.microsoft.rest.v2.util.Base64Util; +import io.reactivex.Completable; +import io.reactivex.Flowable; +import io.reactivex.Maybe; +import io.reactivex.Single; +import io.reactivex.annotations.NonNull; +import java.net.URL; +import java.nio.ByteBuffer; +import java.time.OffsetDateTime; +import java.util.HashMap; +import java.util.Map; + +/** + * An instance of this class provides access to all the operations defined in + * GeneratedPageBlobs. + */ +public final class GeneratedPageBlobs { + /** + * The proxy service used to perform REST calls. + */ + private PageBlobsService service; + + /** + * The service client containing this operation class. + */ + private GeneratedStorageClient client; + + /** + * Initializes an instance of GeneratedPageBlobs. + * + * @param client the instance of the service client containing this operation class. + */ + public GeneratedPageBlobs(GeneratedStorageClient client) { + this.service = RestProxy.create(PageBlobsService.class, client); + this.client = client; + } + + /** + * The interface defining all the services for GeneratedPageBlobs to be + * used by the proxy service to perform REST calls. + */ + @Host("{url}") + private interface PageBlobsService { + @PUT("{containerName}/{blob}") + @ExpectedResponses({201}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single create(Context context, @HostParam("url") String url, @QueryParam("timeout") Integer timeout, @HeaderParam("Content-Length") long contentLength, @HeaderParam("x-ms-meta-") Map metadata, @HeaderParam("x-ms-blob-content-length") long blobContentLength, @HeaderParam("x-ms-blob-sequence-number") Long blobSequenceNumber, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @HeaderParam("x-ms-blob-type") String blobType, @HeaderParam("x-ms-blob-content-type") String blobContentType, @HeaderParam("x-ms-blob-content-encoding") String blobContentEncoding, @HeaderParam("x-ms-blob-content-language") String blobContentLanguage, @HeaderParam("x-ms-blob-content-md5") String blobContentMD5, @HeaderParam("x-ms-blob-cache-control") String blobCacheControl, @HeaderParam("x-ms-blob-content-disposition") String blobContentDisposition, @HeaderParam("x-ms-lease-id") String leaseId, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince, @HeaderParam("If-Match") String ifMatch, @HeaderParam("If-None-Match") String ifNoneMatch); + + @PUT("{containerName}/{blob}") + @ExpectedResponses({201}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single uploadPages(Context context, @HostParam("url") String url, @BodyParam("application/octet-stream") Flowable body, @HeaderParam("Content-Length") long contentLength, @HeaderParam("Content-MD5") String transactionalContentMD5, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-range") String range, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("comp") String comp, @HeaderParam("x-ms-page-write") String pageWrite, @HeaderParam("x-ms-lease-id") String leaseId, @HeaderParam("x-ms-if-sequence-number-le") Long ifSequenceNumberLessThanOrEqualTo, @HeaderParam("x-ms-if-sequence-number-lt") Long ifSequenceNumberLessThan, @HeaderParam("x-ms-if-sequence-number-eq") Long ifSequenceNumberEqualTo, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince, @HeaderParam("If-Match") String ifMatch, @HeaderParam("If-None-Match") String ifNoneMatch); + + @PUT("{containerName}/{blob}") + @ExpectedResponses({201}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single clearPages(Context context, @HostParam("url") String url, @HeaderParam("Content-Length") long contentLength, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-range") String range, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("comp") String comp, @HeaderParam("x-ms-page-write") String pageWrite, @HeaderParam("x-ms-lease-id") String leaseId, @HeaderParam("x-ms-if-sequence-number-le") Long ifSequenceNumberLessThanOrEqualTo, @HeaderParam("x-ms-if-sequence-number-lt") Long ifSequenceNumberLessThan, @HeaderParam("x-ms-if-sequence-number-eq") Long ifSequenceNumberEqualTo, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince, @HeaderParam("If-Match") String ifMatch, @HeaderParam("If-None-Match") String ifNoneMatch); + + @GET("{containerName}/{blob}") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single getPageRanges(Context context, @HostParam("url") String url, @QueryParam("snapshot") String snapshot, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-range") String range, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("comp") String comp, @HeaderParam("x-ms-lease-id") String leaseId, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince, @HeaderParam("If-Match") String ifMatch, @HeaderParam("If-None-Match") String ifNoneMatch); + + @GET("{containerName}/{blob}") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single getPageRangesDiff(Context context, @HostParam("url") String url, @QueryParam("snapshot") String snapshot, @QueryParam("timeout") Integer timeout, @QueryParam("prevsnapshot") String prevsnapshot, @HeaderParam("x-ms-range") String range, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("comp") String comp, @HeaderParam("x-ms-lease-id") String leaseId, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince, @HeaderParam("If-Match") String ifMatch, @HeaderParam("If-None-Match") String ifNoneMatch); + + @PUT("{containerName}/{blob}") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single resize(Context context, @HostParam("url") String url, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-blob-content-length") long blobContentLength, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("comp") String comp, @HeaderParam("x-ms-lease-id") String leaseId, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince, @HeaderParam("If-Match") String ifMatch, @HeaderParam("If-None-Match") String ifNoneMatch); + + @PUT("{containerName}/{blob}") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single updateSequenceNumber(Context context, @HostParam("url") String url, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-sequence-number-action") SequenceNumberActionType sequenceNumberAction, @HeaderParam("x-ms-blob-sequence-number") Long blobSequenceNumber, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("comp") String comp, @HeaderParam("x-ms-lease-id") String leaseId, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince, @HeaderParam("If-Match") String ifMatch, @HeaderParam("If-None-Match") String ifNoneMatch); + + @PUT("{containerName}/{blob}") + @ExpectedResponses({202}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single copyIncremental(Context context, @HostParam("url") String url, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-copy-source") URL copySource, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("comp") String comp, @HeaderParam("If-Modified-Since") DateTimeRfc1123 ifModifiedSince, @HeaderParam("If-Unmodified-Since") DateTimeRfc1123 ifUnmodifiedSince, @HeaderParam("If-Match") String ifMatch, @HeaderParam("If-None-Match") String ifNoneMatch); + } + + /** + * The Create operation creates a new page blob. + * + * @param context The context to associate with this operation. + * @param contentLength The length of the request. + * @param blobContentLength This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned to a 512-byte boundary. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param blobSequenceNumber Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of the sequence number must be between 0 and 2^63 - 1. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param blobHTTPHeaders Additional parameters for the operation. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void create(Context context, @NonNull long contentLength, @NonNull long blobContentLength, Integer timeout, Map metadata, Long blobSequenceNumber, String requestId, BlobHTTPHeaders blobHTTPHeaders, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + createAsync(context, contentLength, blobContentLength, timeout, metadata, blobSequenceNumber, requestId, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions).blockingAwait(); + } + + /** + * The Create operation creates a new page blob. + * + * @param context The context to associate with this operation. + * @param contentLength The length of the request. + * @param blobContentLength This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned to a 512-byte boundary. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param blobSequenceNumber Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of the sequence number must be between 0 and 2^63 - 1. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param blobHTTPHeaders Additional parameters for the operation. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture createAsync(Context context, @NonNull long contentLength, @NonNull long blobContentLength, Integer timeout, Map metadata, Long blobSequenceNumber, String requestId, BlobHTTPHeaders blobHTTPHeaders, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(createAsync(context, contentLength, blobContentLength, timeout, metadata, blobSequenceNumber, requestId, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions), serviceCallback); + } + + /** + * The Create operation creates a new page blob. + * + * @param context The context to associate with this operation. + * @param contentLength The length of the request. + * @param blobContentLength This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned to a 512-byte boundary. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param blobSequenceNumber Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of the sequence number must be between 0 and 2^63 - 1. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param blobHTTPHeaders Additional parameters for the operation. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single createWithRestResponseAsync(Context context, @NonNull long contentLength, @NonNull long blobContentLength, Integer timeout, Map metadata, Long blobSequenceNumber, String requestId, BlobHTTPHeaders blobHTTPHeaders, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(metadata); + Validator.validate(blobHTTPHeaders); + Validator.validate(leaseAccessConditions); + Validator.validate(modifiedAccessConditions); + final String blobType = "PageBlob"; + String blobContentType = null; + if (blobHTTPHeaders != null) { + blobContentType = blobHTTPHeaders.blobContentType(); + } + String blobContentEncoding = null; + if (blobHTTPHeaders != null) { + blobContentEncoding = blobHTTPHeaders.blobContentEncoding(); + } + String blobContentLanguage = null; + if (blobHTTPHeaders != null) { + blobContentLanguage = blobHTTPHeaders.blobContentLanguage(); + } + byte[] blobContentMD5 = null; + if (blobHTTPHeaders != null) { + blobContentMD5 = blobHTTPHeaders.blobContentMD5(); + } + String blobCacheControl = null; + if (blobHTTPHeaders != null) { + blobCacheControl = blobHTTPHeaders.blobCacheControl(); + } + String blobContentDisposition = null; + if (blobHTTPHeaders != null) { + blobContentDisposition = blobHTTPHeaders.blobContentDisposition(); + } + String leaseId = null; + if (leaseAccessConditions != null) { + leaseId = leaseAccessConditions.leaseId(); + } + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + OffsetDateTime ifUnmodifiedSince = null; + if (modifiedAccessConditions != null) { + ifUnmodifiedSince = modifiedAccessConditions.ifUnmodifiedSince(); + } + String ifMatch = null; + if (modifiedAccessConditions != null) { + ifMatch = modifiedAccessConditions.ifMatch(); + } + String ifNoneMatch = null; + if (modifiedAccessConditions != null) { + ifNoneMatch = modifiedAccessConditions.ifNoneMatch(); + } + String blobContentMD5Converted = Base64Util.encodeToString(blobContentMD5); + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + DateTimeRfc1123 ifUnmodifiedSinceConverted = null; + if (ifUnmodifiedSince != null) { + ifUnmodifiedSinceConverted = new DateTimeRfc1123(ifUnmodifiedSince); + } + return service.create(context, this.client.url(), timeout, contentLength, metadata, blobContentLength, blobSequenceNumber, this.client.version(), requestId, blobType, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5Converted, blobCacheControl, blobContentDisposition, leaseId, ifModifiedSinceConverted, ifUnmodifiedSinceConverted, ifMatch, ifNoneMatch); + } + + /** + * The Create operation creates a new page blob. + * + * @param context The context to associate with this operation. + * @param contentLength The length of the request. + * @param blobContentLength This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned to a 512-byte boundary. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param metadata Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. + * @param blobSequenceNumber Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of the sequence number must be between 0 and 2^63 - 1. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param blobHTTPHeaders Additional parameters for the operation. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable createAsync(Context context, @NonNull long contentLength, @NonNull long blobContentLength, Integer timeout, Map metadata, Long blobSequenceNumber, String requestId, BlobHTTPHeaders blobHTTPHeaders, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + return createWithRestResponseAsync(context, contentLength, blobContentLength, timeout, metadata, blobSequenceNumber, requestId, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions) + .toCompletable(); + } + + /** + * The Upload Pages operation writes a range of pages to a page blob. + * + * @param context The context to associate with this operation. + * @param body Initial data. + * @param contentLength The length of the request. + * @param transactionalContentMD5 Specify the transactional md5 for the body, to be validated by the service. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param range Return only the bytes of the blob in the specified range. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param sequenceNumberAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void uploadPages(Context context, @NonNull Flowable body, @NonNull long contentLength, byte[] transactionalContentMD5, Integer timeout, String range, String requestId, LeaseAccessConditions leaseAccessConditions, SequenceNumberAccessConditions sequenceNumberAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + uploadPagesAsync(context, body, contentLength, transactionalContentMD5, timeout, range, requestId, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions).blockingAwait(); + } + + /** + * The Upload Pages operation writes a range of pages to a page blob. + * + * @param context The context to associate with this operation. + * @param body Initial data. + * @param contentLength The length of the request. + * @param transactionalContentMD5 Specify the transactional md5 for the body, to be validated by the service. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param range Return only the bytes of the blob in the specified range. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param sequenceNumberAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture uploadPagesAsync(Context context, @NonNull Flowable body, @NonNull long contentLength, byte[] transactionalContentMD5, Integer timeout, String range, String requestId, LeaseAccessConditions leaseAccessConditions, SequenceNumberAccessConditions sequenceNumberAccessConditions, ModifiedAccessConditions modifiedAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(uploadPagesAsync(context, body, contentLength, transactionalContentMD5, timeout, range, requestId, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions), serviceCallback); + } + + /** + * The Upload Pages operation writes a range of pages to a page blob. + * + * @param context The context to associate with this operation. + * @param body Initial data. + * @param contentLength The length of the request. + * @param transactionalContentMD5 Specify the transactional md5 for the body, to be validated by the service. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param range Return only the bytes of the blob in the specified range. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param sequenceNumberAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single uploadPagesWithRestResponseAsync(Context context, @NonNull Flowable body, @NonNull long contentLength, byte[] transactionalContentMD5, Integer timeout, String range, String requestId, LeaseAccessConditions leaseAccessConditions, SequenceNumberAccessConditions sequenceNumberAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (body == null) { + throw new IllegalArgumentException("Parameter body is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(leaseAccessConditions); + Validator.validate(sequenceNumberAccessConditions); + Validator.validate(modifiedAccessConditions); + final String comp = "page"; + final String pageWrite = "update"; + String leaseId = null; + if (leaseAccessConditions != null) { + leaseId = leaseAccessConditions.leaseId(); + } + Long ifSequenceNumberLessThanOrEqualTo = null; + if (sequenceNumberAccessConditions != null) { + ifSequenceNumberLessThanOrEqualTo = sequenceNumberAccessConditions.ifSequenceNumberLessThanOrEqualTo(); + } + Long ifSequenceNumberLessThan = null; + if (sequenceNumberAccessConditions != null) { + ifSequenceNumberLessThan = sequenceNumberAccessConditions.ifSequenceNumberLessThan(); + } + Long ifSequenceNumberEqualTo = null; + if (sequenceNumberAccessConditions != null) { + ifSequenceNumberEqualTo = sequenceNumberAccessConditions.ifSequenceNumberEqualTo(); + } + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + OffsetDateTime ifUnmodifiedSince = null; + if (modifiedAccessConditions != null) { + ifUnmodifiedSince = modifiedAccessConditions.ifUnmodifiedSince(); + } + String ifMatch = null; + if (modifiedAccessConditions != null) { + ifMatch = modifiedAccessConditions.ifMatch(); + } + String ifNoneMatch = null; + if (modifiedAccessConditions != null) { + ifNoneMatch = modifiedAccessConditions.ifNoneMatch(); + } + String transactionalContentMD5Converted = Base64Util.encodeToString(transactionalContentMD5); + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + DateTimeRfc1123 ifUnmodifiedSinceConverted = null; + if (ifUnmodifiedSince != null) { + ifUnmodifiedSinceConverted = new DateTimeRfc1123(ifUnmodifiedSince); + } + return service.uploadPages(context, this.client.url(), body, contentLength, transactionalContentMD5Converted, timeout, range, this.client.version(), requestId, comp, pageWrite, leaseId, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSinceConverted, ifUnmodifiedSinceConverted, ifMatch, ifNoneMatch); + } + + /** + * The Upload Pages operation writes a range of pages to a page blob. + * + * @param context The context to associate with this operation. + * @param body Initial data. + * @param contentLength The length of the request. + * @param transactionalContentMD5 Specify the transactional md5 for the body, to be validated by the service. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param range Return only the bytes of the blob in the specified range. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param sequenceNumberAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable uploadPagesAsync(Context context, @NonNull Flowable body, @NonNull long contentLength, byte[] transactionalContentMD5, Integer timeout, String range, String requestId, LeaseAccessConditions leaseAccessConditions, SequenceNumberAccessConditions sequenceNumberAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + return uploadPagesWithRestResponseAsync(context, body, contentLength, transactionalContentMD5, timeout, range, requestId, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions) + .toCompletable(); + } + + /** + * The Clear Pages operation clears a set of pages from a page blob. + * + * @param context The context to associate with this operation. + * @param contentLength The length of the request. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param range Return only the bytes of the blob in the specified range. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param sequenceNumberAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void clearPages(Context context, @NonNull long contentLength, Integer timeout, String range, String requestId, LeaseAccessConditions leaseAccessConditions, SequenceNumberAccessConditions sequenceNumberAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + clearPagesAsync(context, contentLength, timeout, range, requestId, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions).blockingAwait(); + } + + /** + * The Clear Pages operation clears a set of pages from a page blob. + * + * @param context The context to associate with this operation. + * @param contentLength The length of the request. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param range Return only the bytes of the blob in the specified range. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param sequenceNumberAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture clearPagesAsync(Context context, @NonNull long contentLength, Integer timeout, String range, String requestId, LeaseAccessConditions leaseAccessConditions, SequenceNumberAccessConditions sequenceNumberAccessConditions, ModifiedAccessConditions modifiedAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(clearPagesAsync(context, contentLength, timeout, range, requestId, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions), serviceCallback); + } + + /** + * The Clear Pages operation clears a set of pages from a page blob. + * + * @param context The context to associate with this operation. + * @param contentLength The length of the request. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param range Return only the bytes of the blob in the specified range. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param sequenceNumberAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single clearPagesWithRestResponseAsync(Context context, @NonNull long contentLength, Integer timeout, String range, String requestId, LeaseAccessConditions leaseAccessConditions, SequenceNumberAccessConditions sequenceNumberAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(leaseAccessConditions); + Validator.validate(sequenceNumberAccessConditions); + Validator.validate(modifiedAccessConditions); + final String comp = "page"; + final String pageWrite = "clear"; + String leaseId = null; + if (leaseAccessConditions != null) { + leaseId = leaseAccessConditions.leaseId(); + } + Long ifSequenceNumberLessThanOrEqualTo = null; + if (sequenceNumberAccessConditions != null) { + ifSequenceNumberLessThanOrEqualTo = sequenceNumberAccessConditions.ifSequenceNumberLessThanOrEqualTo(); + } + Long ifSequenceNumberLessThan = null; + if (sequenceNumberAccessConditions != null) { + ifSequenceNumberLessThan = sequenceNumberAccessConditions.ifSequenceNumberLessThan(); + } + Long ifSequenceNumberEqualTo = null; + if (sequenceNumberAccessConditions != null) { + ifSequenceNumberEqualTo = sequenceNumberAccessConditions.ifSequenceNumberEqualTo(); + } + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + OffsetDateTime ifUnmodifiedSince = null; + if (modifiedAccessConditions != null) { + ifUnmodifiedSince = modifiedAccessConditions.ifUnmodifiedSince(); + } + String ifMatch = null; + if (modifiedAccessConditions != null) { + ifMatch = modifiedAccessConditions.ifMatch(); + } + String ifNoneMatch = null; + if (modifiedAccessConditions != null) { + ifNoneMatch = modifiedAccessConditions.ifNoneMatch(); + } + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + DateTimeRfc1123 ifUnmodifiedSinceConverted = null; + if (ifUnmodifiedSince != null) { + ifUnmodifiedSinceConverted = new DateTimeRfc1123(ifUnmodifiedSince); + } + return service.clearPages(context, this.client.url(), contentLength, timeout, range, this.client.version(), requestId, comp, pageWrite, leaseId, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSinceConverted, ifUnmodifiedSinceConverted, ifMatch, ifNoneMatch); + } + + /** + * The Clear Pages operation clears a set of pages from a page blob. + * + * @param context The context to associate with this operation. + * @param contentLength The length of the request. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param range Return only the bytes of the blob in the specified range. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param sequenceNumberAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable clearPagesAsync(Context context, @NonNull long contentLength, Integer timeout, String range, String requestId, LeaseAccessConditions leaseAccessConditions, SequenceNumberAccessConditions sequenceNumberAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + return clearPagesWithRestResponseAsync(context, contentLength, timeout, range, requestId, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions) + .toCompletable(); + } + + /** + * The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot of a page blob. + * + * @param context The context to associate with this operation. + * @param snapshot The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob.</a>. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param range Return only the bytes of the blob in the specified range. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the PageList object if successful. + */ + public PageList getPageRanges(Context context, String snapshot, Integer timeout, String range, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + return getPageRangesAsync(context, snapshot, timeout, range, requestId, leaseAccessConditions, modifiedAccessConditions).blockingGet(); + } + + /** + * The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot of a page blob. + * + * @param context The context to associate with this operation. + * @param snapshot The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob.</a>. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param range Return only the bytes of the blob in the specified range. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture getPageRangesAsync(Context context, String snapshot, Integer timeout, String range, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(getPageRangesAsync(context, snapshot, timeout, range, requestId, leaseAccessConditions, modifiedAccessConditions), serviceCallback); + } + + /** + * The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot of a page blob. + * + * @param context The context to associate with this operation. + * @param snapshot The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob.</a>. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param range Return only the bytes of the blob in the specified range. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single getPageRangesWithRestResponseAsync(Context context, String snapshot, Integer timeout, String range, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(leaseAccessConditions); + Validator.validate(modifiedAccessConditions); + final String comp = "pagelist"; + String leaseId = null; + if (leaseAccessConditions != null) { + leaseId = leaseAccessConditions.leaseId(); + } + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + OffsetDateTime ifUnmodifiedSince = null; + if (modifiedAccessConditions != null) { + ifUnmodifiedSince = modifiedAccessConditions.ifUnmodifiedSince(); + } + String ifMatch = null; + if (modifiedAccessConditions != null) { + ifMatch = modifiedAccessConditions.ifMatch(); + } + String ifNoneMatch = null; + if (modifiedAccessConditions != null) { + ifNoneMatch = modifiedAccessConditions.ifNoneMatch(); + } + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + DateTimeRfc1123 ifUnmodifiedSinceConverted = null; + if (ifUnmodifiedSince != null) { + ifUnmodifiedSinceConverted = new DateTimeRfc1123(ifUnmodifiedSince); + } + return service.getPageRanges(context, this.client.url(), snapshot, timeout, range, this.client.version(), requestId, comp, leaseId, ifModifiedSinceConverted, ifUnmodifiedSinceConverted, ifMatch, ifNoneMatch); + } + + /** + * The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot of a page blob. + * + * @param context The context to associate with this operation. + * @param snapshot The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob.</a>. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param range Return only the bytes of the blob in the specified range. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Maybe getPageRangesAsync(Context context, String snapshot, Integer timeout, String range, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + return getPageRangesWithRestResponseAsync(context, snapshot, timeout, range, requestId, leaseAccessConditions, modifiedAccessConditions) + .flatMapMaybe((PageBlobGetPageRangesResponse res) -> res.body() == null ? Maybe.empty() : Maybe.just(res.body())); + } + + /** + * [Update] The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that were changed between target blob and previous snapshot. + * + * @param context The context to associate with this operation. + * @param snapshot The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob.</a>. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param prevsnapshot Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response will contain only pages that were changed between target blob and previous snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. Note that incremental snapshots are currently supported only for blobs created on or after January 1, 2016. + * @param range Return only the bytes of the blob in the specified range. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the PageList object if successful. + */ + public PageList getPageRangesDiff(Context context, String snapshot, Integer timeout, String prevsnapshot, String range, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + return getPageRangesDiffAsync(context, snapshot, timeout, prevsnapshot, range, requestId, leaseAccessConditions, modifiedAccessConditions).blockingGet(); + } + + /** + * [Update] The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that were changed between target blob and previous snapshot. + * + * @param context The context to associate with this operation. + * @param snapshot The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob.</a>. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param prevsnapshot Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response will contain only pages that were changed between target blob and previous snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. Note that incremental snapshots are currently supported only for blobs created on or after January 1, 2016. + * @param range Return only the bytes of the blob in the specified range. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture getPageRangesDiffAsync(Context context, String snapshot, Integer timeout, String prevsnapshot, String range, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(getPageRangesDiffAsync(context, snapshot, timeout, prevsnapshot, range, requestId, leaseAccessConditions, modifiedAccessConditions), serviceCallback); + } + + /** + * [Update] The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that were changed between target blob and previous snapshot. + * + * @param context The context to associate with this operation. + * @param snapshot The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob.</a>. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param prevsnapshot Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response will contain only pages that were changed between target blob and previous snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. Note that incremental snapshots are currently supported only for blobs created on or after January 1, 2016. + * @param range Return only the bytes of the blob in the specified range. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single getPageRangesDiffWithRestResponseAsync(Context context, String snapshot, Integer timeout, String prevsnapshot, String range, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(leaseAccessConditions); + Validator.validate(modifiedAccessConditions); + final String comp = "pagelist"; + String leaseId = null; + if (leaseAccessConditions != null) { + leaseId = leaseAccessConditions.leaseId(); + } + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + OffsetDateTime ifUnmodifiedSince = null; + if (modifiedAccessConditions != null) { + ifUnmodifiedSince = modifiedAccessConditions.ifUnmodifiedSince(); + } + String ifMatch = null; + if (modifiedAccessConditions != null) { + ifMatch = modifiedAccessConditions.ifMatch(); + } + String ifNoneMatch = null; + if (modifiedAccessConditions != null) { + ifNoneMatch = modifiedAccessConditions.ifNoneMatch(); + } + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + DateTimeRfc1123 ifUnmodifiedSinceConverted = null; + if (ifUnmodifiedSince != null) { + ifUnmodifiedSinceConverted = new DateTimeRfc1123(ifUnmodifiedSince); + } + return service.getPageRangesDiff(context, this.client.url(), snapshot, timeout, prevsnapshot, range, this.client.version(), requestId, comp, leaseId, ifModifiedSinceConverted, ifUnmodifiedSinceConverted, ifMatch, ifNoneMatch); + } + + /** + * [Update] The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that were changed between target blob and previous snapshot. + * + * @param context The context to associate with this operation. + * @param snapshot The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob.</a>. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param prevsnapshot Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response will contain only pages that were changed between target blob and previous snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. Note that incremental snapshots are currently supported only for blobs created on or after January 1, 2016. + * @param range Return only the bytes of the blob in the specified range. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Maybe getPageRangesDiffAsync(Context context, String snapshot, Integer timeout, String prevsnapshot, String range, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + return getPageRangesDiffWithRestResponseAsync(context, snapshot, timeout, prevsnapshot, range, requestId, leaseAccessConditions, modifiedAccessConditions) + .flatMapMaybe((PageBlobGetPageRangesDiffResponse res) -> res.body() == null ? Maybe.empty() : Maybe.just(res.body())); + } + + /** + * Resize the Blob. + * + * @param context The context to associate with this operation. + * @param blobContentLength This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned to a 512-byte boundary. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void resize(Context context, @NonNull long blobContentLength, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + resizeAsync(context, blobContentLength, timeout, requestId, leaseAccessConditions, modifiedAccessConditions).blockingAwait(); + } + + /** + * Resize the Blob. + * + * @param context The context to associate with this operation. + * @param blobContentLength This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned to a 512-byte boundary. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture resizeAsync(Context context, @NonNull long blobContentLength, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(resizeAsync(context, blobContentLength, timeout, requestId, leaseAccessConditions, modifiedAccessConditions), serviceCallback); + } + + /** + * Resize the Blob. + * + * @param context The context to associate with this operation. + * @param blobContentLength This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned to a 512-byte boundary. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single resizeWithRestResponseAsync(Context context, @NonNull long blobContentLength, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(leaseAccessConditions); + Validator.validate(modifiedAccessConditions); + final String comp = "properties"; + String leaseId = null; + if (leaseAccessConditions != null) { + leaseId = leaseAccessConditions.leaseId(); + } + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + OffsetDateTime ifUnmodifiedSince = null; + if (modifiedAccessConditions != null) { + ifUnmodifiedSince = modifiedAccessConditions.ifUnmodifiedSince(); + } + String ifMatch = null; + if (modifiedAccessConditions != null) { + ifMatch = modifiedAccessConditions.ifMatch(); + } + String ifNoneMatch = null; + if (modifiedAccessConditions != null) { + ifNoneMatch = modifiedAccessConditions.ifNoneMatch(); + } + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + DateTimeRfc1123 ifUnmodifiedSinceConverted = null; + if (ifUnmodifiedSince != null) { + ifUnmodifiedSinceConverted = new DateTimeRfc1123(ifUnmodifiedSince); + } + return service.resize(context, this.client.url(), timeout, blobContentLength, this.client.version(), requestId, comp, leaseId, ifModifiedSinceConverted, ifUnmodifiedSinceConverted, ifMatch, ifNoneMatch); + } + + /** + * Resize the Blob. + * + * @param context The context to associate with this operation. + * @param blobContentLength This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned to a 512-byte boundary. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable resizeAsync(Context context, @NonNull long blobContentLength, Integer timeout, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + return resizeWithRestResponseAsync(context, blobContentLength, timeout, requestId, leaseAccessConditions, modifiedAccessConditions) + .toCompletable(); + } + + /** + * Update the sequence number of the blob. + * + * @param context The context to associate with this operation. + * @param sequenceNumberAction Required if the x-ms-blob-sequence-number header is set for the request. This property applies to page blobs only. This property indicates how the service should modify the blob's sequence number. Possible values include: 'max', 'update', 'increment'. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param blobSequenceNumber Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of the sequence number must be between 0 and 2^63 - 1. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void updateSequenceNumber(Context context, @NonNull SequenceNumberActionType sequenceNumberAction, Integer timeout, Long blobSequenceNumber, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + updateSequenceNumberAsync(context, sequenceNumberAction, timeout, blobSequenceNumber, requestId, leaseAccessConditions, modifiedAccessConditions).blockingAwait(); + } + + /** + * Update the sequence number of the blob. + * + * @param context The context to associate with this operation. + * @param sequenceNumberAction Required if the x-ms-blob-sequence-number header is set for the request. This property applies to page blobs only. This property indicates how the service should modify the blob's sequence number. Possible values include: 'max', 'update', 'increment'. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param blobSequenceNumber Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of the sequence number must be between 0 and 2^63 - 1. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture updateSequenceNumberAsync(Context context, @NonNull SequenceNumberActionType sequenceNumberAction, Integer timeout, Long blobSequenceNumber, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(updateSequenceNumberAsync(context, sequenceNumberAction, timeout, blobSequenceNumber, requestId, leaseAccessConditions, modifiedAccessConditions), serviceCallback); + } + + /** + * Update the sequence number of the blob. + * + * @param context The context to associate with this operation. + * @param sequenceNumberAction Required if the x-ms-blob-sequence-number header is set for the request. This property applies to page blobs only. This property indicates how the service should modify the blob's sequence number. Possible values include: 'max', 'update', 'increment'. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param blobSequenceNumber Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of the sequence number must be between 0 and 2^63 - 1. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single updateSequenceNumberWithRestResponseAsync(Context context, @NonNull SequenceNumberActionType sequenceNumberAction, Integer timeout, Long blobSequenceNumber, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (sequenceNumberAction == null) { + throw new IllegalArgumentException("Parameter sequenceNumberAction is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(leaseAccessConditions); + Validator.validate(modifiedAccessConditions); + final String comp = "properties"; + String leaseId = null; + if (leaseAccessConditions != null) { + leaseId = leaseAccessConditions.leaseId(); + } + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + OffsetDateTime ifUnmodifiedSince = null; + if (modifiedAccessConditions != null) { + ifUnmodifiedSince = modifiedAccessConditions.ifUnmodifiedSince(); + } + String ifMatch = null; + if (modifiedAccessConditions != null) { + ifMatch = modifiedAccessConditions.ifMatch(); + } + String ifNoneMatch = null; + if (modifiedAccessConditions != null) { + ifNoneMatch = modifiedAccessConditions.ifNoneMatch(); + } + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + DateTimeRfc1123 ifUnmodifiedSinceConverted = null; + if (ifUnmodifiedSince != null) { + ifUnmodifiedSinceConverted = new DateTimeRfc1123(ifUnmodifiedSince); + } + return service.updateSequenceNumber(context, this.client.url(), timeout, sequenceNumberAction, blobSequenceNumber, this.client.version(), requestId, comp, leaseId, ifModifiedSinceConverted, ifUnmodifiedSinceConverted, ifMatch, ifNoneMatch); + } + + /** + * Update the sequence number of the blob. + * + * @param context The context to associate with this operation. + * @param sequenceNumberAction Required if the x-ms-blob-sequence-number header is set for the request. This property applies to page blobs only. This property indicates how the service should modify the blob's sequence number. Possible values include: 'max', 'update', 'increment'. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param blobSequenceNumber Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of the sequence number must be between 0 and 2^63 - 1. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param leaseAccessConditions Additional parameters for the operation. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable updateSequenceNumberAsync(Context context, @NonNull SequenceNumberActionType sequenceNumberAction, Integer timeout, Long blobSequenceNumber, String requestId, LeaseAccessConditions leaseAccessConditions, ModifiedAccessConditions modifiedAccessConditions) { + return updateSequenceNumberWithRestResponseAsync(context, sequenceNumberAction, timeout, blobSequenceNumber, requestId, leaseAccessConditions, modifiedAccessConditions) + .toCompletable(); + } + + /** + * The Copy Incremental operation copies a snapshot of the source page blob to a destination page blob. The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination. The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual. This API is supported since REST version 2016-05-31. + * + * @param context The context to associate with this operation. + * @param copySource Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob must either be public or must be authenticated via a shared access signature. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void copyIncremental(Context context, @NonNull URL copySource, Integer timeout, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + copyIncrementalAsync(context, copySource, timeout, requestId, modifiedAccessConditions).blockingAwait(); + } + + /** + * The Copy Incremental operation copies a snapshot of the source page blob to a destination page blob. The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination. The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual. This API is supported since REST version 2016-05-31. + * + * @param context The context to associate with this operation. + * @param copySource Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob must either be public or must be authenticated via a shared access signature. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture copyIncrementalAsync(Context context, @NonNull URL copySource, Integer timeout, String requestId, ModifiedAccessConditions modifiedAccessConditions, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(copyIncrementalAsync(context, copySource, timeout, requestId, modifiedAccessConditions), serviceCallback); + } + + /** + * The Copy Incremental operation copies a snapshot of the source page blob to a destination page blob. The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination. The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual. This API is supported since REST version 2016-05-31. + * + * @param context The context to associate with this operation. + * @param copySource Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob must either be public or must be authenticated via a shared access signature. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single copyIncrementalWithRestResponseAsync(Context context, @NonNull URL copySource, Integer timeout, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (copySource == null) { + throw new IllegalArgumentException("Parameter copySource is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(copySource); + Validator.validate(modifiedAccessConditions); + final String comp = "incrementalcopy"; + OffsetDateTime ifModifiedSince = null; + if (modifiedAccessConditions != null) { + ifModifiedSince = modifiedAccessConditions.ifModifiedSince(); + } + OffsetDateTime ifUnmodifiedSince = null; + if (modifiedAccessConditions != null) { + ifUnmodifiedSince = modifiedAccessConditions.ifUnmodifiedSince(); + } + String ifMatch = null; + if (modifiedAccessConditions != null) { + ifMatch = modifiedAccessConditions.ifMatch(); + } + String ifNoneMatch = null; + if (modifiedAccessConditions != null) { + ifNoneMatch = modifiedAccessConditions.ifNoneMatch(); + } + DateTimeRfc1123 ifModifiedSinceConverted = null; + if (ifModifiedSince != null) { + ifModifiedSinceConverted = new DateTimeRfc1123(ifModifiedSince); + } + DateTimeRfc1123 ifUnmodifiedSinceConverted = null; + if (ifUnmodifiedSince != null) { + ifUnmodifiedSinceConverted = new DateTimeRfc1123(ifUnmodifiedSince); + } + return service.copyIncremental(context, this.client.url(), timeout, copySource, this.client.version(), requestId, comp, ifModifiedSinceConverted, ifUnmodifiedSinceConverted, ifMatch, ifNoneMatch); + } + + /** + * The Copy Incremental operation copies a snapshot of the source page blob to a destination page blob. The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination. The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual. This API is supported since REST version 2016-05-31. + * + * @param context The context to associate with this operation. + * @param copySource Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob must either be public or must be authenticated via a shared access signature. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param modifiedAccessConditions Additional parameters for the operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable copyIncrementalAsync(Context context, @NonNull URL copySource, Integer timeout, String requestId, ModifiedAccessConditions modifiedAccessConditions) { + return copyIncrementalWithRestResponseAsync(context, copySource, timeout, requestId, modifiedAccessConditions) + .toCompletable(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/GeneratedServices.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/GeneratedServices.java new file mode 100644 index 0000000000000..9399927fc07c6 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/GeneratedServices.java @@ -0,0 +1,431 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage; + +import com.microsoft.azure.storage.blob.models.ListContainersIncludeType; +import com.microsoft.azure.storage.blob.models.ListContainersSegmentResponse; +import com.microsoft.azure.storage.blob.models.ServiceGetAccountInfoResponse; +import com.microsoft.azure.storage.blob.models.ServiceGetPropertiesResponse; +import com.microsoft.azure.storage.blob.models.ServiceGetStatisticsResponse; +import com.microsoft.azure.storage.blob.models.ServiceListContainersSegmentResponse; +import com.microsoft.azure.storage.blob.models.ServiceSetPropertiesResponse; +import com.microsoft.azure.storage.blob.models.StorageErrorException; +import com.microsoft.azure.storage.blob.models.StorageServiceProperties; +import com.microsoft.azure.storage.blob.models.StorageServiceStats; +import com.microsoft.rest.v2.Context; +import com.microsoft.rest.v2.RestProxy; +import com.microsoft.rest.v2.ServiceCallback; +import com.microsoft.rest.v2.ServiceFuture; +import com.microsoft.rest.v2.Validator; +import com.microsoft.rest.v2.annotations.BodyParam; +import com.microsoft.rest.v2.annotations.ExpectedResponses; +import com.microsoft.rest.v2.annotations.GET; +import com.microsoft.rest.v2.annotations.HeaderParam; +import com.microsoft.rest.v2.annotations.Host; +import com.microsoft.rest.v2.annotations.HostParam; +import com.microsoft.rest.v2.annotations.PUT; +import com.microsoft.rest.v2.annotations.QueryParam; +import com.microsoft.rest.v2.annotations.UnexpectedResponseExceptionType; +import io.reactivex.Completable; +import io.reactivex.Maybe; +import io.reactivex.Single; +import io.reactivex.annotations.NonNull; + +/** + * An instance of this class provides access to all the operations defined in + * GeneratedServices. + */ +public final class GeneratedServices { + /** + * The proxy service used to perform REST calls. + */ + private ServicesService service; + + /** + * The service client containing this operation class. + */ + private GeneratedStorageClient client; + + /** + * Initializes an instance of GeneratedServices. + * + * @param client the instance of the service client containing this operation class. + */ + public GeneratedServices(GeneratedStorageClient client) { + this.service = RestProxy.create(ServicesService.class, client); + this.client = client; + } + + /** + * The interface defining all the services for GeneratedServices to be used + * by the proxy service to perform REST calls. + */ + @Host("{url}") + private interface ServicesService { + @PUT("") + @ExpectedResponses({202}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single setProperties(Context context, @HostParam("url") String url, @BodyParam("application/xml; charset=utf-8") StorageServiceProperties storageServiceProperties, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("restype") String restype, @QueryParam("comp") String comp); + + @GET("") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single getProperties(Context context, @HostParam("url") String url, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("restype") String restype, @QueryParam("comp") String comp); + + @GET("") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single getStatistics(Context context, @HostParam("url") String url, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("restype") String restype, @QueryParam("comp") String comp); + + @GET("") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single listContainersSegment(Context context, @HostParam("url") String url, @QueryParam("prefix") String prefix, @QueryParam("marker") String marker, @QueryParam("maxresults") Integer maxresults, @QueryParam("include") ListContainersIncludeType include, @QueryParam("timeout") Integer timeout, @HeaderParam("x-ms-version") String version, @HeaderParam("x-ms-client-request-id") String requestId, @QueryParam("comp") String comp); + + @GET("") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType(StorageErrorException.class) + Single getAccountInfo(Context context, @HostParam("url") String url, @HeaderParam("x-ms-version") String version, @QueryParam("restype") String restype, @QueryParam("comp") String comp); + } + + /** + * Sets properties for a storage account's Blob service endpoint, including properties for Storage Analytics and CORS (Cross-Origin Resource Sharing) rules. + * + * @param context The context to associate with this operation. + * @param storageServiceProperties The StorageService properties. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void setProperties(Context context, @NonNull StorageServiceProperties storageServiceProperties, Integer timeout, String requestId) { + setPropertiesAsync(context, storageServiceProperties, timeout, requestId).blockingAwait(); + } + + /** + * Sets properties for a storage account's Blob service endpoint, including properties for Storage Analytics and CORS (Cross-Origin Resource Sharing) rules. + * + * @param context The context to associate with this operation. + * @param storageServiceProperties The StorageService properties. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture setPropertiesAsync(Context context, @NonNull StorageServiceProperties storageServiceProperties, Integer timeout, String requestId, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(setPropertiesAsync(context, storageServiceProperties, timeout, requestId), serviceCallback); + } + + /** + * Sets properties for a storage account's Blob service endpoint, including properties for Storage Analytics and CORS (Cross-Origin Resource Sharing) rules. + * + * @param context The context to associate with this operation. + * @param storageServiceProperties The StorageService properties. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single setPropertiesWithRestResponseAsync(Context context, @NonNull StorageServiceProperties storageServiceProperties, Integer timeout, String requestId) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (storageServiceProperties == null) { + throw new IllegalArgumentException("Parameter storageServiceProperties is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + Validator.validate(storageServiceProperties); + final String restype = "service"; + final String comp = "properties"; + return service.setProperties(context, this.client.url(), storageServiceProperties, timeout, this.client.version(), requestId, restype, comp); + } + + /** + * Sets properties for a storage account's Blob service endpoint, including properties for Storage Analytics and CORS (Cross-Origin Resource Sharing) rules. + * + * @param context The context to associate with this operation. + * @param storageServiceProperties The StorageService properties. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable setPropertiesAsync(Context context, @NonNull StorageServiceProperties storageServiceProperties, Integer timeout, String requestId) { + return setPropertiesWithRestResponseAsync(context, storageServiceProperties, timeout, requestId) + .toCompletable(); + } + + /** + * gets the properties of a storage account's Blob service, including properties for Storage Analytics and CORS (Cross-Origin Resource Sharing) rules. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the StorageServiceProperties object if successful. + */ + public StorageServiceProperties getProperties(Context context, Integer timeout, String requestId) { + return getPropertiesAsync(context, timeout, requestId).blockingGet(); + } + + /** + * gets the properties of a storage account's Blob service, including properties for Storage Analytics and CORS (Cross-Origin Resource Sharing) rules. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture getPropertiesAsync(Context context, Integer timeout, String requestId, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(getPropertiesAsync(context, timeout, requestId), serviceCallback); + } + + /** + * gets the properties of a storage account's Blob service, including properties for Storage Analytics and CORS (Cross-Origin Resource Sharing) rules. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single getPropertiesWithRestResponseAsync(Context context, Integer timeout, String requestId) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + final String restype = "service"; + final String comp = "properties"; + return service.getProperties(context, this.client.url(), timeout, this.client.version(), requestId, restype, comp); + } + + /** + * gets the properties of a storage account's Blob service, including properties for Storage Analytics and CORS (Cross-Origin Resource Sharing) rules. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Maybe getPropertiesAsync(Context context, Integer timeout, String requestId) { + return getPropertiesWithRestResponseAsync(context, timeout, requestId) + .flatMapMaybe((ServiceGetPropertiesResponse res) -> res.body() == null ? Maybe.empty() : Maybe.just(res.body())); + } + + /** + * Retrieves statistics related to replication for the Blob service. It is only available on the secondary location endpoint when read-access geo-redundant replication is enabled for the storage account. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the StorageServiceStats object if successful. + */ + public StorageServiceStats getStatistics(Context context, Integer timeout, String requestId) { + return getStatisticsAsync(context, timeout, requestId).blockingGet(); + } + + /** + * Retrieves statistics related to replication for the Blob service. It is only available on the secondary location endpoint when read-access geo-redundant replication is enabled for the storage account. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture getStatisticsAsync(Context context, Integer timeout, String requestId, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(getStatisticsAsync(context, timeout, requestId), serviceCallback); + } + + /** + * Retrieves statistics related to replication for the Blob service. It is only available on the secondary location endpoint when read-access geo-redundant replication is enabled for the storage account. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single getStatisticsWithRestResponseAsync(Context context, Integer timeout, String requestId) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + final String restype = "service"; + final String comp = "stats"; + return service.getStatistics(context, this.client.url(), timeout, this.client.version(), requestId, restype, comp); + } + + /** + * Retrieves statistics related to replication for the Blob service. It is only available on the secondary location endpoint when read-access geo-redundant replication is enabled for the storage account. + * + * @param context The context to associate with this operation. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Maybe getStatisticsAsync(Context context, Integer timeout, String requestId) { + return getStatisticsWithRestResponseAsync(context, timeout, requestId) + .flatMapMaybe((ServiceGetStatisticsResponse res) -> res.body() == null ? Maybe.empty() : Maybe.just(res.body())); + } + + /** + * The List Containers Segment operation returns a list of the containers under the specified account. + * + * @param context The context to associate with this operation. + * @param prefix Filters the results to return only containers whose name begins with the specified prefix. + * @param marker A string value that identifies the portion of the list of containers to be returned with the next listing operation. The operation returns the NextMarker value within the response body if the listing operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the client. + * @param maxresults Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder of the results. For this reason, it is possible that the service will return fewer results than specified by maxresults, or than the default of 5000. + * @param include Include this parameter to specify that the container's metadata be returned as part of the response body. Possible values include: 'metadata'. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the ListContainersSegmentResponse object if successful. + */ + public ListContainersSegmentResponse listContainersSegment(Context context, String prefix, String marker, Integer maxresults, ListContainersIncludeType include, Integer timeout, String requestId) { + return listContainersSegmentAsync(context, prefix, marker, maxresults, include, timeout, requestId).blockingGet(); + } + + /** + * The List Containers Segment operation returns a list of the containers under the specified account. + * + * @param context The context to associate with this operation. + * @param prefix Filters the results to return only containers whose name begins with the specified prefix. + * @param marker A string value that identifies the portion of the list of containers to be returned with the next listing operation. The operation returns the NextMarker value within the response body if the listing operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the client. + * @param maxresults Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder of the results. For this reason, it is possible that the service will return fewer results than specified by maxresults, or than the default of 5000. + * @param include Include this parameter to specify that the container's metadata be returned as part of the response body. Possible values include: 'metadata'. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture listContainersSegmentAsync(Context context, String prefix, String marker, Integer maxresults, ListContainersIncludeType include, Integer timeout, String requestId, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(listContainersSegmentAsync(context, prefix, marker, maxresults, include, timeout, requestId), serviceCallback); + } + + /** + * The List Containers Segment operation returns a list of the containers under the specified account. + * + * @param context The context to associate with this operation. + * @param prefix Filters the results to return only containers whose name begins with the specified prefix. + * @param marker A string value that identifies the portion of the list of containers to be returned with the next listing operation. The operation returns the NextMarker value within the response body if the listing operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the client. + * @param maxresults Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder of the results. For this reason, it is possible that the service will return fewer results than specified by maxresults, or than the default of 5000. + * @param include Include this parameter to specify that the container's metadata be returned as part of the response body. Possible values include: 'metadata'. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single listContainersSegmentWithRestResponseAsync(Context context, String prefix, String marker, Integer maxresults, ListContainersIncludeType include, Integer timeout, String requestId) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + final String comp = "list"; + return service.listContainersSegment(context, this.client.url(), prefix, marker, maxresults, include, timeout, this.client.version(), requestId, comp); + } + + /** + * The List Containers Segment operation returns a list of the containers under the specified account. + * + * @param context The context to associate with this operation. + * @param prefix Filters the results to return only containers whose name begins with the specified prefix. + * @param marker A string value that identifies the portion of the list of containers to be returned with the next listing operation. The operation returns the NextMarker value within the response body if the listing operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the client. + * @param maxresults Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder of the results. For this reason, it is possible that the service will return fewer results than specified by maxresults, or than the default of 5000. + * @param include Include this parameter to specify that the container's metadata be returned as part of the response body. Possible values include: 'metadata'. + * @param timeout The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + * @param requestId Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Maybe listContainersSegmentAsync(Context context, String prefix, String marker, Integer maxresults, ListContainersIncludeType include, Integer timeout, String requestId) { + return listContainersSegmentWithRestResponseAsync(context, prefix, marker, maxresults, include, timeout, requestId) + .flatMapMaybe((ServiceListContainersSegmentResponse res) -> res.body() == null ? Maybe.empty() : Maybe.just(res.body())); + } + + /** + * Returns the sku name and account kind. + * + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws StorageErrorException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + public void getAccountInfo(Context context) { + getAccountInfoAsync(context).blockingAwait(); + } + + /** + * Returns the sku name and account kind. + * + * @param context The context to associate with this operation. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a ServiceFuture which will be completed with the result of the network request. + */ + public ServiceFuture getAccountInfoAsync(Context context, ServiceCallback serviceCallback) { + return ServiceFuture.fromBody(getAccountInfoAsync(context), serviceCallback); + } + + /** + * Returns the sku name and account kind. + * + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Single getAccountInfoWithRestResponseAsync(Context context) { + if (this.client.url() == null) { + throw new IllegalArgumentException("Parameter this.client.url() is required and cannot be null."); + } + if (this.client.version() == null) { + throw new IllegalArgumentException("Parameter this.client.version() is required and cannot be null."); + } + final String restype = "account"; + final String comp = "properties"; + return service.getAccountInfo(context, this.client.url(), this.client.version(), restype, comp); + } + + /** + * Returns the sku name and account kind. + * + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @return a Single which performs the network request upon subscription. + */ + public Completable getAccountInfoAsync(Context context) { + return getAccountInfoWithRestResponseAsync(context) + .toCompletable(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/GeneratedStorageClient.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/GeneratedStorageClient.java new file mode 100644 index 0000000000000..c969b3c7a3c84 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/GeneratedStorageClient.java @@ -0,0 +1,177 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage; + +import com.microsoft.rest.v2.RestProxy; +import com.microsoft.rest.v2.ServiceClient; +import com.microsoft.rest.v2.http.HttpPipeline; +import io.reactivex.annotations.NonNull; + +/** + * Initializes a new instance of the GeneratedStorageClient type. + */ +public final class GeneratedStorageClient extends ServiceClient { + /** + * The URL of the service account, container, or blob that is the targe of the desired operation. + */ + private String url; + + /** + * Gets The URL of the service account, container, or blob that is the targe of the desired operation. + * + * @return the url value. + */ + public String url() { + return this.url; + } + + /** + * Sets The URL of the service account, container, or blob that is the targe of the desired operation. + * + * @param url the url value. + * @return the service client itself. + */ + public GeneratedStorageClient withUrl(String url) { + this.url = url; + return this; + } + + /** + * Specifies the version of the operation to use for this request. + */ + private String version; + + /** + * Gets Specifies the version of the operation to use for this request. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Sets Specifies the version of the operation to use for this request. + * + * @param version the version value. + * @return the service client itself. + */ + public GeneratedStorageClient withVersion(String version) { + this.version = version; + return this; + } + + /** + * The GeneratedServices object to access its operations. + */ + private GeneratedServices generatedServices; + + /** + * Gets the GeneratedServices object to access its operations. + * + * @return the GeneratedServices object. + */ + public GeneratedServices generatedServices() { + return this.generatedServices; + } + + /** + * The GeneratedContainers object to access its operations. + */ + private GeneratedContainers generatedContainers; + + /** + * Gets the GeneratedContainers object to access its operations. + * + * @return the GeneratedContainers object. + */ + public GeneratedContainers generatedContainers() { + return this.generatedContainers; + } + + /** + * The GeneratedBlobs object to access its operations. + */ + private GeneratedBlobs generatedBlobs; + + /** + * Gets the GeneratedBlobs object to access its operations. + * + * @return the GeneratedBlobs object. + */ + public GeneratedBlobs generatedBlobs() { + return this.generatedBlobs; + } + + /** + * The GeneratedPageBlobs object to access its operations. + */ + private GeneratedPageBlobs generatedPageBlobs; + + /** + * Gets the GeneratedPageBlobs object to access its operations. + * + * @return the GeneratedPageBlobs object. + */ + public GeneratedPageBlobs generatedPageBlobs() { + return this.generatedPageBlobs; + } + + /** + * The GeneratedAppendBlobs object to access its operations. + */ + private GeneratedAppendBlobs generatedAppendBlobs; + + /** + * Gets the GeneratedAppendBlobs object to access its operations. + * + * @return the GeneratedAppendBlobs object. + */ + public GeneratedAppendBlobs generatedAppendBlobs() { + return this.generatedAppendBlobs; + } + + /** + * The GeneratedBlockBlobs object to access its operations. + */ + private GeneratedBlockBlobs generatedBlockBlobs; + + /** + * Gets the GeneratedBlockBlobs object to access its operations. + * + * @return the GeneratedBlockBlobs object. + */ + public GeneratedBlockBlobs generatedBlockBlobs() { + return this.generatedBlockBlobs; + } + + /** + * Initializes an instance of GeneratedStorageClient client. + */ + public GeneratedStorageClient() { + this(RestProxy.createDefaultPipeline()); + } + + /** + * Initializes an instance of GeneratedStorageClient client. + * + * @param httpPipeline The HTTP pipeline to send requests through. + */ + public GeneratedStorageClient(@NonNull HttpPipeline httpPipeline) { + super(httpPipeline); + this.generatedServices = new GeneratedServices(this); + this.generatedContainers = new GeneratedContainers(this); + this.generatedBlobs = new GeneratedBlobs(this); + this.generatedPageBlobs = new GeneratedPageBlobs(this); + this.generatedAppendBlobs = new GeneratedAppendBlobs(this); + this.generatedBlockBlobs = new GeneratedBlockBlobs(this); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/AccountSASPermission.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/AccountSASPermission.java new file mode 100644 index 0000000000000..b6c833e1ce519 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/AccountSASPermission.java @@ -0,0 +1,263 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import java.util.Locale; + +/** + * This is a helper class to construct a string representing the permissions granted by an AccountSAS. Setting a value + * to true means that any SAS which uses these permissions will grant permissions for that operation. Once all the + * values are set, this should be serialized with toString and set as the permissions field on an + * {@link AccountSASSignatureValues} object. It is possible to construct the permissions string without this class, but + * the order of the permissions is particular and this class guarantees correctness. + */ +public final class AccountSASPermission { + + private boolean read; + + private boolean add; + + private boolean create; + + private boolean write; + + private boolean delete; + + private boolean list; + + private boolean update; + + private boolean processMessages; + + /** + * Initializes an {@code AccountSASPermission} object with all fields set to false. + */ + public AccountSASPermission() { + } + + /** + * Creates an {@code AccountSASPermission} from the specified permissions string. This method will throw an + * {@code IllegalArgumentException} if it encounters a character that does not correspond to a valid permission. + * + * @param permString + * A {@code String} which represents the {@code SharedAccessAccountPermissions}. + * + * @return An {@code AccountSASPermission} object generated from the given {@code String}. + */ + public static AccountSASPermission parse(String permString) { + AccountSASPermission permissions = new AccountSASPermission(); + + for (int i = 0; i < permString.length(); i++) { + char c = permString.charAt(i); + switch (c) { + case 'r': + permissions.read = true; + break; + case 'w': + permissions.write = true; + break; + case 'd': + permissions.delete = true; + break; + case 'l': + permissions.list = true; + break; + case 'a': + permissions.add = true; + break; + case 'c': + permissions.create = true; + break; + case 'u': + permissions.update = true; + break; + case 'p': + permissions.processMessages = true; + break; + default: + throw new IllegalArgumentException( + String.format(Locale.ROOT, SR.ENUM_COULD_NOT_BE_PARSED_INVALID_VALUE, "Permissions", permString, c)); + } + } + return permissions; + } + + /** + * Permission to read resources and list queues and tables granted. + */ + public boolean read() { + return read; + } + + /** + * Permission to read resources and list queues and tables granted. + */ + public AccountSASPermission withRead(boolean read) { + this.read = read; + return this; + } + + /** + * Permission to add messages, table entities, and append to blobs granted. + */ + public boolean add() { + return add; + } + + /** + * Permission to add messages, table entities, and append to blobs granted. + */ + public AccountSASPermission withAdd(boolean add) { + this.add = add; + return this; + } + + /** + * Permission to create blobs and files granted. + */ + public boolean create() { + return create; + } + + /** + * Permission to create blobs and files granted. + */ + public AccountSASPermission withCreate(boolean create) { + this.create = create; + return this; + } + + /** + * Permission to write resources granted. + */ + public boolean write() { + return write; + } + + /** + * Permission to write resources granted. + */ + public AccountSASPermission withWrite(boolean write) { + this.write = write; + return this; + } + + /** + * Permission to delete resources granted. + */ + public boolean delete() { + return delete; + } + + /** + * Permission to delete resources granted. + */ + public AccountSASPermission withDelete(boolean delete) { + this.delete = delete; + return this; + } + + /** + * Permission to list blob containers, blobs, shares, directories, and files granted. + */ + public boolean list() { + return list; + } + + /** + * Permission to list blob containers, blobs, shares, directories, and files granted. + */ + public AccountSASPermission withList(boolean list) { + this.list = list; + return this; + } + + /** + * Permissions to update messages and table entities granted. + */ + public boolean update() { + return update; + } + + /** + * Permissions to update messages and table entities granted. + */ + public AccountSASPermission withUpdate(boolean update) { + this.update = update; + return this; + } + + /** + * Permission to get and delete messages granted. + */ + public boolean processMessages() { + return processMessages; + } + + /** + * Permission to get and delete messages granted. + */ + public AccountSASPermission withProcessMessages(boolean processMessages) { + this.processMessages = processMessages; + return this; + } + + /** + * Converts the given permissions to a {@code String}. Using this method will guarantee the permissions are in an + * order accepted by the service. + * + * @return A {@code String} which represents the {@code AccountSASPermissions}. + */ + @Override + public String toString() { + // The order of the characters should be as specified here to ensure correctness: + // https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas + final StringBuilder builder = new StringBuilder(); + + if (this.read) { + builder.append('r'); + } + + if (this.write) { + builder.append('w'); + } + + if (this.delete) { + builder.append('d'); + } + + if (this.list) { + builder.append('l'); + } + + if (this.add) { + builder.append('a'); + } + + if (this.create) { + builder.append('c'); + } + + if (this.update) { + builder.append('u'); + } + + if (this.processMessages) { + builder.append('p'); + } + + return builder.toString(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/AccountSASResourceType.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/AccountSASResourceType.java new file mode 100644 index 0000000000000..094f647cb66dc --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/AccountSASResourceType.java @@ -0,0 +1,144 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import java.util.Locale; + +/** + * This is a helper class to construct a string representing the resources accessible by an AccountSAS. Setting a value + * to true means that any SAS which uses these permissions will grant access to that resource type. Once all the + * values are set, this should be serialized with toString and set as the resources field on an + * {@link AccountSASSignatureValues} object. It is possible to construct the resources string without this class, but + * the order of the resources is particular and this class guarantees correctness. + */ +public final class AccountSASResourceType { + + private boolean service; + + private boolean container; + + private boolean object; + + /** + * Initializes an {@code AccountSASResourceType} object with all fields set to false. + */ + public AccountSASResourceType() { + } + + /** + * Creates an {@code AccountSASResourceType} from the specified resource types string. This method will throw an + * {@code IllegalArgumentException} if it encounters a character that does not correspond to a valid resource type. + * + * @param resourceTypesString + * A {@code String} which represents the {@code AccountSASResourceTypes}. + * + * @return A {@code AccountSASResourceType} generated from the given {@code String}. + */ + public static AccountSASResourceType parse(String resourceTypesString) { + AccountSASResourceType resourceType = new AccountSASResourceType(); + + for (int i = 0; i < resourceTypesString.length(); i++) { + char c = resourceTypesString.charAt(i); + switch (c) { + case 's': + resourceType.service = true; + break; + case 'c': + resourceType.container = true; + break; + case 'o': + resourceType.object = true; + break; + default: + throw new IllegalArgumentException( + String.format(Locale.ROOT, SR.ENUM_COULD_NOT_BE_PARSED_INVALID_VALUE, + "Resource Types", resourceTypesString, c)); + } + } + return resourceType; + } + + /** + * Permission to access service level APIs granted. + */ + public boolean service() { + return service; + } + + /** + * Permission to access service level APIs granted. + */ + public AccountSASResourceType withService(boolean service) { + this.service = service; + return this; + } + + /** + * Permission to access container level APIs (Blob Containers, Tables, Queues, File Shares) granted. + */ + public boolean container() { + return container; + } + + /** + * Permission to access container level APIs (Blob Containers, Tables, Queues, File Shares) granted. + */ + public AccountSASResourceType withContainer(boolean container) { + this.container = container; + return this; + } + + /** + * Permission to access object level APIs (Blobs, Table Entities, Queue Messages, Files) granted. + */ + public boolean object() { + return object; + } + + /** + * Permission to access object level APIs (Blobs, Table Entities, Queue Messages, Files) granted. + */ + public AccountSASResourceType withObject(boolean object) { + this.object = object; + return this; + } + + /** + * Converts the given resource types to a {@code String}. Using this method will guarantee the resource types are in + * an order accepted by the service. + * + * @return A {@code String} which represents the {@code AccountSASResourceTypes}. + */ + @Override + public String toString() { + // The order of the characters should be as specified here to ensure correctness: + // https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas + StringBuilder builder = new StringBuilder(); + + if (this.service) { + builder.append('s'); + } + + if (this.container) { + builder.append('c'); + } + + if (this.object) { + builder.append('o'); + } + + return builder.toString(); + } +} \ No newline at end of file diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/AccountSASService.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/AccountSASService.java new file mode 100644 index 0000000000000..26a2e337a90e9 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/AccountSASService.java @@ -0,0 +1,165 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import java.util.Locale; + +/** + * This is a helper class to construct a string representing the services accessible by an AccountSAS. Setting a value + * to true means that any SAS which uses these permissions will grant access to that service. Once all the + * values are set, this should be serialized with toString and set as the services field on an + * {@link AccountSASSignatureValues} object. It is possible to construct the services string without this class, but + * the order of the services is particular and this class guarantees correctness. + */ +public final class AccountSASService { + + private boolean blob; + + private boolean file; + + private boolean queue; + + private boolean table; + + /** + * Initializes an {@code AccountSASService} object with all fields set to false. + */ + public AccountSASService() { + } + + /** + * Creates an {@code AccountSASService} from the specified services string. This method will throw an + * {@code IllegalArgumentException} if it encounters a character that does not correspond to a valid service. + * + * @param servicesString + * A {@code String} which represents the {@code SharedAccessAccountServices}. + * + * @return A {@code AccountSASService} generated from the given {@code String}. + */ + public static AccountSASService parse(String servicesString) { + AccountSASService services = new AccountSASService(); + + for (int i = 0; i < servicesString.length(); i++) { + char c = servicesString.charAt(i); + switch (c) { + case 'b': + services.blob = true; + break; + case 'f': + services.file = true; + break; + case 'q': + services.queue = true; + break; + case 't': + services.table = true; + break; + default: + throw new IllegalArgumentException( + String.format(Locale.ROOT, SR.ENUM_COULD_NOT_BE_PARSED_INVALID_VALUE, "Services", + servicesString, c)); + } + } + return services; + } + + /** + * Permission to access blob resources granted. + */ + public boolean blob() { + return blob; + } + + /** + * Permission to access blob resources granted. + */ + public AccountSASService withBlob(boolean blob) { + this.blob = blob; + return this; + } + + /** + * Permission to access file resources granted. + */ + public boolean file() { + return file; + } + + /** + * Permission to access file resources granted. + */ + public AccountSASService withFile(boolean file) { + this.file = file; + return this; + } + + /** + * Permission to access queue resources granted. + */ + public boolean queue() { + return queue; + } + + /** + * Permission to access queue resources granted. + */ + public AccountSASService withQueue(boolean queue) { + this.queue = queue; + return this; + } + + /** + * Permission to access table resources granted. + */ + public boolean table() { + return table; + } + + /** + * Permission to access table resources granted. + */ + public AccountSASService withTable(boolean table) { + this.table = table; + return this; + } + + /** + * Converts the given services to a {@code String}. Using this method will guarantee the services are in an order + * accepted by the service. + * + * @return A {@code String} which represents the {@code AccountSASServices}. + */ + @Override + public String toString() { + // The order of the characters should be as specified here to ensure correctness: + // https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas + StringBuilder value = new StringBuilder(); + + if (this.blob) { + value.append('b'); + } + if (this.queue) { + value.append('q'); + } + if (this.table) { + value.append('t'); + } + if (this.file) { + value.append('f'); + } + + return value.toString(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/AccountSASSignatureValues.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/AccountSASSignatureValues.java new file mode 100644 index 0000000000000..79105d548cd7e --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/AccountSASSignatureValues.java @@ -0,0 +1,237 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import java.security.InvalidKeyException; +import java.time.OffsetDateTime; + +/** + * AccountSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account. Once + * all the values here are set appropriately, call generateSASQueryParameters to obtain a representation of the SAS + * which can actually be applied to blob urls. Note: that both this class and {@link SASQueryParameters} exist because + * the former is mutable and a logical representation while the latter is immutable and used to generate actual REST + * requests. + *

    + * Please see + * here + * for more conceptual information on SAS: + *

    + *

    + * Please see + * here for further + * descriptions of the parameters, including which are required: + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=account_sas "Sample code for AccountSASSignatureValues")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ +public final class AccountSASSignatureValues { + + private String version = Constants.HeaderConstants.TARGET_STORAGE_VERSION; + + private SASProtocol protocol; + + private OffsetDateTime startTime; + + private OffsetDateTime expiryTime; + + private String permissions; + + private IPRange ipRange; + + private String services; + + private String resourceTypes; + + /** + * Initializes an {@code AccountSASSignatureValues} object with the version number set to the default and all + * other values empty. + */ + public AccountSASSignatureValues() { + } + + /** + * If null or empty, this defaults to the service version targeted by this version of the library. + */ + public String version() { + return version; + } + + /** + * If null or empty, this defaults to the service version targeted by this version of the library. + */ + public AccountSASSignatureValues withVersion(String version) { + this.version = version; + return this; + } + + /** + * {@link SASProtocol} + */ + public SASProtocol protocol() { + return protocol; + } + + /** + * {@link SASProtocol} + */ + public AccountSASSignatureValues withProtocol(SASProtocol protocol) { + this.protocol = protocol; + return this; + } + + /** + * When the SAS will take effect. + */ + public OffsetDateTime startTime() { + return startTime; + } + + /** + * When the SAS will take effect. + */ + public AccountSASSignatureValues withStartTime(OffsetDateTime startTime) { + this.startTime = startTime; + return this; + } + + /** + * The time after which the SAS will no longer work. + */ + public OffsetDateTime expiryTime() { + return expiryTime; + } + + /** + * The time after which the SAS will no longer work. + */ + public AccountSASSignatureValues withExpiryTime(OffsetDateTime expiryTime) { + this.expiryTime = expiryTime; + return this; + } + + /** + * Specifies which operations the SAS user may perform. Please refer to {@link AccountSASPermission} for help + * constructing the permissions string. + */ + public String permissions() { + return permissions; + } + + /** + * Specifies which operations the SAS user may perform. Please refer to {@link AccountSASPermission} for help + * constructing the permissions string. + */ + public AccountSASSignatureValues withPermissions(String permissions) { + this.permissions = permissions; + return this; + } + + /** + * {@link IPRange} + */ + public IPRange ipRange() { + return ipRange; + } + + /** + * {@link IPRange} + */ + public AccountSASSignatureValues withIpRange(IPRange ipRange) { + this.ipRange = ipRange; + return this; + } + + /** + * The values that indicate the services accessible with this SAS. Please refer to {@link AccountSASService} to + * construct this value. + */ + public String services() { + return services; + } + + /** + * The values that indicate the services accessible with this SAS. Please refer to {@link AccountSASService} to + * construct this value. + */ + public AccountSASSignatureValues withServices(String services) { + this.services = services; + return this; + } + + /** + * The values that indicate the resource types accessible with this SAS. Please refer + * to {@link AccountSASResourceType} to construct this value. + */ + public String resourceTypes() { + return resourceTypes; + } + + /** + * The values that indicate the resource types accessible with this SAS. Please refer + * to {@link AccountSASResourceType} to construct this value. + */ + public AccountSASSignatureValues withResourceTypes(String resourceTypes) { + this.resourceTypes = resourceTypes; + return this; + } + + /** + * Generates a {@link SASQueryParameters} object which contains all SAS query parameters needed to make an actual + * REST request. + * + * @param sharedKeyCredentials + * Credentials for the storage account and corresponding primary or secondary key. + * + * @return {@link SASQueryParameters} + */ + public SASQueryParameters generateSASQueryParameters(SharedKeyCredentials sharedKeyCredentials) { + Utility.assertNotNull("SharedKeyCredentials", sharedKeyCredentials); + Utility.assertNotNull("services", this.services); + Utility.assertNotNull("resourceTypes", this.resourceTypes); + Utility.assertNotNull("expiryTime", this.expiryTime); + Utility.assertNotNull("permissions", this.permissions); + Utility.assertNotNull("version", this.version); + + // Signature is generated on the un-url-encoded values. + final String stringToSign = stringToSign(sharedKeyCredentials); + + String signature; + try { + signature = sharedKeyCredentials.computeHmac256(stringToSign); + } catch (InvalidKeyException e) { + throw new Error(e); // The key should have been validated by now. If it is no longer valid here, we fail. + } + + return new SASQueryParameters(this.version, this.services, resourceTypes, + this.protocol, this.startTime, this.expiryTime, this.ipRange, null, + null, this.permissions, signature, null, null, null, null, null); + } + + private String stringToSign(final SharedKeyCredentials sharedKeyCredentials) { + return String.join("\n", + sharedKeyCredentials.getAccountName(), + AccountSASPermission.parse(this.permissions).toString(), // guarantees ordering + this.services, + resourceTypes, + this.startTime == null ? "" : Utility.ISO8601UTCDateFormatter.format(this.startTime), + Utility.ISO8601UTCDateFormatter.format(this.expiryTime), + this.ipRange == null ? IPRange.DEFAULT.toString() : this.ipRange.toString(), + this.protocol == null ? "" : this.protocol.toString(), + this.version, + Constants.EMPTY_STRING // Account SAS requires an additional newline character + ); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/AnonymousCredentials.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/AnonymousCredentials.java new file mode 100644 index 0000000000000..0d26b29ceda0c --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/AnonymousCredentials.java @@ -0,0 +1,80 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import com.microsoft.rest.v2.http.HttpPipeline; +import com.microsoft.rest.v2.http.HttpRequest; +import com.microsoft.rest.v2.http.HttpResponse; +import com.microsoft.rest.v2.policy.RequestPolicy; +import com.microsoft.rest.v2.policy.RequestPolicyOptions; +import io.reactivex.Single; + +/** + * Anonymous credentials are to be used with with HTTP(S) requests that read blobs from public containers or requests + * that use a Shared Access Signature (SAS). This is because Anonymous credentials will not set an Authorization header. + * Pass an instance of this class as the credentials parameter when creating a new pipeline (typically with + * {@link StorageURL}). + */ +public final class AnonymousCredentials implements ICredentials { + + /** + * Returns an empty instance of {@code AnonymousCredentials}. + */ + public AnonymousCredentials() { + } + + /** + * Creates a new {@code AnonymousCredentialsPolicy}. + * + * @param nextRequestPolicy + * The next {@code RequestPolicy} in the pipeline which will be called after this policy completes. + * @param options + * Unused. + * + * @return A {@code RequestPolicy} object to be inserted into the {@link HttpPipeline}. + */ + @Override + public RequestPolicy create(RequestPolicy nextRequestPolicy, RequestPolicyOptions options) { + return new AnonymousCredentialsPolicy(nextRequestPolicy); + } + + /** + * This policy will perform an a no-op on the Authorization header. Typically constructing a pipeline will even + * ignore constructing this policy if is recognized. Please refer to either {@link AccountSASSignatureValues}, + * {@link ServiceSASSignatureValues} for more information on SAS requests. Please refer to the following for more + * information on anonymous requests: + * Manage Access to Storage Resources + * Set Container Permissions + */ + private final class AnonymousCredentialsPolicy implements RequestPolicy { + final RequestPolicy nextPolicy; + + AnonymousCredentialsPolicy(RequestPolicy nextPolicy) { + this.nextPolicy = nextPolicy; + } + + /** + * For anonymous credentials, this is effectively a no-op. + * + * @param request + * An {@link HttpRequest} object representing the storage request. + * + * @return A Single containing the {@link HttpResponse} if successful. + */ + public Single sendAsync(HttpRequest request) { + return nextPolicy.sendAsync(request); + } + } +} \ No newline at end of file diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/AppendBlobAccessConditions.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/AppendBlobAccessConditions.java new file mode 100644 index 0000000000000..b4fc31d240b31 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/AppendBlobAccessConditions.java @@ -0,0 +1,105 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import com.microsoft.azure.storage.blob.models.AppendPositionAccessConditions; +import com.microsoft.azure.storage.blob.models.LeaseAccessConditions; +import com.microsoft.azure.storage.blob.models.ModifiedAccessConditions; + +/** + * This class contains values that restrict the successful completion of AppendBlock operations to certain conditions. + * Any field may be set to null if no access conditions are desired. + *

    + * Please refer to the request header section + * here for more conceptual + * information. + */ +public final class AppendBlobAccessConditions { + + /** + * An object representing no access conditions. + */ + public static final AppendBlobAccessConditions NONE = + new AppendBlobAccessConditions(); + + private AppendPositionAccessConditions appendPositionAccessConditions; + + private ModifiedAccessConditions modifiedAccessConditions; + + private LeaseAccessConditions leaseAccessConditions; + + /** + * Creates an instance which has fields set to non-null, empty values. + */ + public AppendBlobAccessConditions() { + appendPositionAccessConditions = new AppendPositionAccessConditions(); + modifiedAccessConditions = new ModifiedAccessConditions(); + leaseAccessConditions = new LeaseAccessConditions(); + } + + /** + * Access conditions used for appending data only if the operation meets the provided conditions related to the + * size of the append blob. + */ + public AppendPositionAccessConditions appendPositionAccessConditions() { + return appendPositionAccessConditions; + } + + /** + * Access conditions used for appending data only if the operation meets the provided conditions related to the + * size of the append blob. + */ + public AppendBlobAccessConditions withAppendPositionAccessConditions( + AppendPositionAccessConditions appendPositionAccessConditions) { + this.appendPositionAccessConditions = appendPositionAccessConditions; + return this; + } + + /** + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used to + * construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + */ + public ModifiedAccessConditions modifiedAccessConditions() { + return modifiedAccessConditions; + } + + /** + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used to + * construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + */ + public AppendBlobAccessConditions withModifiedAccessConditions(ModifiedAccessConditions modifiedAccessConditions) { + this.modifiedAccessConditions = modifiedAccessConditions; + return this; + } + + /** + * By setting lease access conditions, requests will fail if the provided lease does not match the active lease on + * the blob. + */ + public LeaseAccessConditions leaseAccessConditions() { + return leaseAccessConditions; + } + + /** + * By setting lease access conditions, requests will fail if the provided lease does not match the active lease on + * the blob. + */ + public AppendBlobAccessConditions withLeaseAccessConditions(LeaseAccessConditions leaseAccessConditions) { + this.leaseAccessConditions = leaseAccessConditions; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/AppendBlobURL.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/AppendBlobURL.java new file mode 100644 index 0000000000000..bad92a443aea9 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/AppendBlobURL.java @@ -0,0 +1,209 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import com.microsoft.azure.storage.blob.models.AppendBlobAppendBlockResponse; +import com.microsoft.azure.storage.blob.models.AppendBlobCreateResponse; +import com.microsoft.azure.storage.blob.models.BlobHTTPHeaders; +import com.microsoft.rest.v2.Context; +import com.microsoft.rest.v2.http.HttpPipeline; +import io.reactivex.Flowable; +import io.reactivex.Single; + +import java.net.MalformedURLException; +import java.net.URL; +import java.net.UnknownHostException; +import java.nio.ByteBuffer; + +import static com.microsoft.azure.storage.blob.Utility.addErrorWrappingToSingle; + + +/** + * Represents a URL to an append blob. It may be obtained by direct construction or via the create method on a + * {@link ContainerURL} object. This class does not hold any state about a particular append blob but is instead a + * convenient way of sending off appropriate requests to the resource on the service. Please refer to the + * Azure Docs + */ +public final class AppendBlobURL extends BlobURL { + + /** + * Indicates the maximum number of bytes that can be sent in a call to appendBlock. + */ + public static final int MAX_APPEND_BLOCK_BYTES = 4 * Constants.MB; + + /** + * Indicates the maximum number of blocks allowed in an append blob. + */ + public static final int MAX_BLOCKS = 50000; + + /** + * Creates a {@code AppendBlobURL} object pointing to the account specified by the URL and using the provided + * pipeline to make HTTP requests. + * + * @param url + * A {@code URL} to an Azure Storage append blob. + * @param pipeline + * A {@code HttpPipeline} which configures the behavior of HTTP exchanges. Please refer to + * {@link StorageURL#createPipeline(ICredentials, PipelineOptions)} for more information. + */ + public AppendBlobURL(URL url, HttpPipeline pipeline) { + super(url, pipeline); + } + + /** + * Creates a new {@link AppendBlobURL} with the given pipeline. + * + * @param pipeline + * An {@code HttpPipeline} object to process HTTP transactions. + * + * @return An {@code AppendBlobURL} object with the given pipeline. + */ + public AppendBlobURL withPipeline(HttpPipeline pipeline) { + try { + return new AppendBlobURL(new URL(this.storageClient.url()), pipeline); + } catch (MalformedURLException e) { + throw new RuntimeException(e); + } + + } + + /** + * Creates a new {@code AppendBlobURL} with the given snapshot. + * + * @param snapshot + * A {@code String} of the snapshot identifier. + * + * @return An {@code AppendBlobURL} object with the given pipeline. + */ + public AppendBlobURL withSnapshot(String snapshot) throws MalformedURLException, UnknownHostException { + BlobURLParts blobURLParts = URLParser.parse(new URL(this.storageClient.url())); + blobURLParts.withSnapshot(snapshot); + return new AppendBlobURL(blobURLParts.toURL(), super.storageClient.httpPipeline()); + } + + /** + * Creates a 0-length append blob. Call AppendBlock to append data to an append blob. For more information, see + * the Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=append_blob "Sample code for AppendBlobURL.create")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single create() { + return this.create(null, null, null, null); + } + + /** + * Creates a 0-length append blob. Call AppendBlock to append data to an append blob. For more information, see + * the Azure Docs. + * + * @param headers + * {@link BlobHTTPHeaders} + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=append_blob "Sample code for AppendBlobURL.create")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single create(BlobHTTPHeaders headers, Metadata metadata, + BlobAccessConditions accessConditions, Context context) { + metadata = metadata == null ? Metadata.NONE : metadata; + accessConditions = accessConditions == null ? BlobAccessConditions.NONE : accessConditions; + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedAppendBlobs().createWithRestResponseAsync(context, + 0, null, metadata, null, headers, accessConditions.leaseAccessConditions(), + accessConditions.modifiedAccessConditions())); + } + + /** + * Commits a new block of data to the end of the existing append blob. For more information, see the + * Azure Docs. + *

    + * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flowable} must produce the same data each time it is subscribed to. + * + * @param data + * The data to write to the blob. Note that this {@code Flowable} must be replayable if retries are enabled + * (the default). In other words, the Flowable must produce the same data each time it is subscribed to. + * @param length + * The exact length of the data. It is important that this value match precisely the length of the data + * emitted by the {@code Flowable}. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=append_blob "Sample code for AppendBlobURL.appendBlock")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single appendBlock(Flowable data, long length) { + return this.appendBlock(data, length, null, null); + } + + /** + * Commits a new block of data to the end of the existing append blob. For more information, see the + * Azure Docs. + *

    + * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flowable} must produce the same data each time it is subscribed to. + * + * @param data + * The data to write to the blob. Note that this {@code Flowable} must be replayable if retries are enabled + * (the default). In other words, the Flowable must produce the same data each time it is subscribed to. + * @param length + * The exact length of the data. It is important that this value match precisely the length of the data + * emitted by the {@code Flowable}. + * @param appendBlobAccessConditions + * {@link AppendBlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=append_blob "Sample code for AppendBlobURL.appendBlock")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single appendBlock(Flowable data, long length, + AppendBlobAccessConditions appendBlobAccessConditions, Context context) { + appendBlobAccessConditions = appendBlobAccessConditions == null ? AppendBlobAccessConditions.NONE : + appendBlobAccessConditions; + appendBlobAccessConditions = appendBlobAccessConditions == null + ? AppendBlobAccessConditions.NONE : appendBlobAccessConditions; + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedAppendBlobs().appendBlockWithRestResponseAsync( + context, data, length, null, null, null, appendBlobAccessConditions.leaseAccessConditions(), + appendBlobAccessConditions.appendPositionAccessConditions(), + appendBlobAccessConditions.modifiedAccessConditions())); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/BlobAccessConditions.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/BlobAccessConditions.java new file mode 100644 index 0000000000000..54e925764a37e --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/BlobAccessConditions.java @@ -0,0 +1,78 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import com.microsoft.azure.storage.blob.models.LeaseAccessConditions; +import com.microsoft.azure.storage.blob.models.ModifiedAccessConditions; + +/** + * This class contains values which will restrict the successful operation of a variety of requests to the conditions + * present. These conditions are entirely optional. The entire object or any of its properties may be set to null when + * passed to a method to indicate that those conditions are not desired. Please refer to the type of each field for more + * information on those particular access conditions. + */ +public final class BlobAccessConditions { + + public static final BlobAccessConditions NONE = + new BlobAccessConditions(); + + private ModifiedAccessConditions modifiedAccessConditions; + + private LeaseAccessConditions leaseAccessConditions; + + /** + * Creates an instance which has fields set to non-null, empty values. + */ + public BlobAccessConditions() { + modifiedAccessConditions = new ModifiedAccessConditions(); + leaseAccessConditions = new LeaseAccessConditions(); + } + + /** + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used to + * construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + */ + public ModifiedAccessConditions modifiedAccessConditions() { + return modifiedAccessConditions; + } + + /** + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used to + * construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + */ + public BlobAccessConditions withModifiedAccessConditions(ModifiedAccessConditions modifiedAccessConditions) { + this.modifiedAccessConditions = modifiedAccessConditions; + return this; + } + + /** + * By setting lease access conditions, requests will fail if the provided lease does not match the active lease on + * the blob. + */ + public LeaseAccessConditions leaseAccessConditions() { + return leaseAccessConditions; + } + + /** + * By setting lease access conditions, requests will fail if the provided lease does not match the active lease on + * the blob. + */ + public BlobAccessConditions withLeaseAccessConditions(LeaseAccessConditions leaseAccessConditions) { + this.leaseAccessConditions = leaseAccessConditions; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/BlobListingDetails.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/BlobListingDetails.java new file mode 100644 index 0000000000000..6d24b0174e1c2 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/BlobListingDetails.java @@ -0,0 +1,149 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import com.microsoft.azure.storage.blob.models.ListBlobsIncludeItem; + +import java.util.ArrayList; + +/** + * This type allows users to specify additional information the service should return with each blob when listing blobs + * in a container (via a {@link ContainerURL} object). This type is immutable to ensure thread-safety of requests, so + * changing the details for a different listing operation requires construction of a new object. Null may be passed if + * none of the options are desirable. + */ +public final class BlobListingDetails { + + /** + * An object representing no listing details. + */ + public static final BlobListingDetails NONE = new BlobListingDetails(); + + private boolean copy; + + private boolean metadata; + + private boolean snapshots; + + private boolean uncommittedBlobs; + + private boolean deletedBlobs; + + public BlobListingDetails() { + } + + /** + * Whether blob metadata related to any current or previous Copy Blob operation should be included in the + * response. + */ + public boolean copy() { + return copy; + } + + /** + * Whether blob metadata related to any current or previous Copy Blob operation should be included in the + * response. + */ + public BlobListingDetails withCopy(boolean copy) { + this.copy = copy; + return this; + } + + /** + * Whether blob metadata should be returned. + */ + public boolean metadata() { + return metadata; + } + + /** + * Whether blob metadata should be returned. + */ + public BlobListingDetails withMetadata(boolean metadata) { + this.metadata = metadata; + return this; + } + + /** + * Whether snapshots should be returned. Snapshots are listed from oldest to newest. + */ + public boolean snapshots() { + return snapshots; + } + + /** + * Whether snapshots should be returned. Snapshots are listed from oldest to newest. + */ + public BlobListingDetails withSnapshots(boolean snapshots) { + this.snapshots = snapshots; + return this; + } + + /** + * Whether blobs for which blocks have been uploaded, but which have not been committed using Put Block List, + * should be included in the response. + */ + public boolean uncommittedBlobs() { + return uncommittedBlobs; + } + + /** + * Whether blobs for which blocks have been uploaded, but which have not been committed using Put Block List, + * should be included in the response. + */ + public BlobListingDetails withUncommittedBlobs(boolean uncommittedBlobs) { + this.uncommittedBlobs = uncommittedBlobs; + return this; + } + + /** + * Whether blobs which have been soft deleted should be returned. + */ + public boolean deletedBlobs() { + return deletedBlobs; + } + + /** + * Whether blobs which have been soft deleted should be returned. + */ + public BlobListingDetails withDeletedBlobs(boolean deletedBlobs) { + this.deletedBlobs = deletedBlobs; + return this; + } + + /* + This is used internally to convert the details structure into a list to pass to the protocol layer. The customer + should never have need for this. + */ + ArrayList toList() { + ArrayList details = new ArrayList(); + if (this.copy) { + details.add(ListBlobsIncludeItem.COPY); + } + if (this.deletedBlobs) { + details.add(ListBlobsIncludeItem.DELETED); + } + if (this.metadata) { + details.add(ListBlobsIncludeItem.METADATA); + } + if (this.snapshots) { + details.add(ListBlobsIncludeItem.SNAPSHOTS); + } + if (this.uncommittedBlobs) { + details.add(ListBlobsIncludeItem.UNCOMMITTEDBLOBS); + } + return details; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/BlobRange.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/BlobRange.java new file mode 100644 index 0000000000000..a66ff8ff923b9 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/BlobRange.java @@ -0,0 +1,89 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import java.util.Locale; + +/** + * This is a representation of a range of bytes on a blob, typically used during a download operation. This type is + * immutable to ensure thread-safety of requests, so changing the values for a different operation requires construction + * of a new object. Passing null as a BlobRange value will default to the entire range of the blob. + */ +public final class BlobRange { + + /** + * An object which reflects the service's default range, which is the whole blob. + */ + public static final BlobRange DEFAULT = new BlobRange(); + + private long offset; + + private Long count; + + public BlobRange() { + } + + /** + * The start of the range. Must be greater than or equal to 0. + */ + public long offset() { + return offset; + } + + /** + * The start of the range. Must be greater than or equal to 0. + */ + public BlobRange withOffset(long offset) { + if (offset < 0) { + throw new IllegalArgumentException("BlobRange offset must be greater than or equal to 0."); + } + this.offset = offset; + return this; + } + + /** + * How many bytes to include in the range. Must be greater than or equal to 0 if specified. + */ + public Long count() { + return count; + } + + /** + * How many bytes to include in the range. Must be greater than or equal to 0 if specified. + */ + public BlobRange withCount(Long count) { + if (count != null && count < 0) { + throw new IllegalArgumentException( + "BlobRange count must be greater than or equal to 0 if specified."); + } + this.count = count; + return this; + } + + /** + * @return A {@code String} compliant with the format of the Azure Storage x-ms-range and Range headers. + */ + @Override + public String toString() { + if (this.count != null) { + long rangeEnd = this.offset + this.count - 1; + return String.format( + Locale.ROOT, Constants.HeaderConstants.RANGE_HEADER_FORMAT, this.offset, rangeEnd); + } + + return String.format( + Locale.ROOT, Constants.HeaderConstants.BEGIN_RANGE_HEADER_FORMAT, this.offset); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/BlobSASPermission.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/BlobSASPermission.java new file mode 100644 index 0000000000000..2dd70c798b55c --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/BlobSASPermission.java @@ -0,0 +1,192 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import java.util.Locale; + +/** + * This is a helper class to construct a string representing the permissions granted by a ServiceSAS to a blob. Setting + * a value to true means that any SAS which uses these permissions will grant permissions for that operation. Once all + * the values are set, this should be serialized with toString and set as the permissions field on a + * {@link ServiceSASSignatureValues} object. It is possible to construct the permissions string without this class, but + * the order of the permissions is particular and this class guarantees correctness. + */ +public final class BlobSASPermission { + + private boolean read; + + private boolean add; + + private boolean create; + + private boolean write; + + private boolean delete; + + /** + * Initializes a {@code BlobSASPermission} object with all fields set to false. + */ + public BlobSASPermission() { + } + + /** + * Creates a {@code BlobSASPermission} from the specified permissions string. This method will throw an + * {@code IllegalArgumentException} if it encounters a character that does not correspond to a valid permission. + * + * @param permString + * A {@code String} which represents the {@code BlobSASPermission}. + * + * @return A {@code BlobSASPermission} generated from the given {@code String}. + */ + public static BlobSASPermission parse(String permString) { + BlobSASPermission permissions = new BlobSASPermission(); + + for (int i = 0; i < permString.length(); i++) { + char c = permString.charAt(i); + switch (c) { + case 'r': + permissions.read = true; + break; + case 'a': + permissions.add = true; + break; + case 'c': + permissions.create = true; + break; + case 'w': + permissions.write = true; + break; + case 'd': + permissions.delete = true; + break; + default: + throw new IllegalArgumentException( + String.format(Locale.ROOT, SR.ENUM_COULD_NOT_BE_PARSED_INVALID_VALUE, "Permissions", permString, c)); + } + } + return permissions; + } + + /** + * Specifies Read access granted. + */ + public boolean read() { + return read; + } + + /** + * Specifies Read access granted. + */ + public BlobSASPermission withRead(boolean read) { + this.read = read; + return this; + } + + /** + * Specifies Add access granted. + */ + public boolean add() { + return add; + } + + /** + * Specifies Add access granted. + */ + public BlobSASPermission withAdd(boolean add) { + this.add = add; + return this; + } + + /** + * Specifies Create access granted. + */ + public boolean create() { + return create; + } + + /** + * Specifies Create access granted. + */ + public BlobSASPermission withCreate(boolean create) { + this.create = create; + return this; + } + + /** + * Specifies Write access granted. + */ + public boolean write() { + return write; + } + + /** + * Specifies Write access granted. + */ + public BlobSASPermission withWrite(boolean write) { + this.write = write; + return this; + } + + /** + * Specifies Delete access granted. + */ + public boolean delete() { + return delete; + } + + /** + * Specifies Delete access granted. + */ + public BlobSASPermission withDelete(boolean delete) { + this.delete = delete; + return this; + } + + /** + * Converts the given permissions to a {@code String}. Using this method will guarantee the permissions are in an + * order accepted by the service. + * + * @return A {@code String} which represents the {@code BlobSASPermission}. + */ + @Override + public String toString() { + // The order of the characters should be as specified here to ensure correctness: + // https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas + + final StringBuilder builder = new StringBuilder(); + + if (this.read) { + builder.append('r'); + } + + if (this.add) { + builder.append('a'); + } + + if (this.create) { + builder.append('c'); + } + + if (this.write) { + builder.append('w'); + } + + if (this.delete) { + builder.append('d'); + } + + return builder.toString(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/BlobURL.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/BlobURL.java new file mode 100644 index 0000000000000..dc418fc66f739 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/BlobURL.java @@ -0,0 +1,985 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import com.microsoft.azure.storage.blob.models.*; +import com.microsoft.rest.v2.Context; +import com.microsoft.rest.v2.http.HttpPipeline; +import io.reactivex.Single; + +import java.net.MalformedURLException; +import java.net.URL; +import java.net.UnknownHostException; + +import static com.microsoft.azure.storage.blob.Utility.addErrorWrappingToSingle; + +/** + * Represents a URL to a blob of any type: block, append, or page. It may be obtained by direct construction or via the + * create method on a {@link ContainerURL} object. This class does not hold any state about a particular blob but is + * instead a convenient way of sending off appropriate requests to the resource on the service. Please refer to the + * Azure Docs for more information. + */ +public class BlobURL extends StorageURL { + + /** + * Creates a {@code BlobURL} object pointing to the account specified by the URL and using the provided pipeline to + * make HTTP requests. + * + * @param url + * A {@code URL} to an Azure Storage blob. + * @param pipeline + * A {@code HttpPipeline} which configures the behavior of HTTP exchanges. Please refer to + * {@link StorageURL#createPipeline(ICredentials, PipelineOptions)} for more information. + */ + public BlobURL(URL url, HttpPipeline pipeline) { + super(url, pipeline); + } + + /** + * Creates a new {@link BlobURL} with the given pipeline. + * + * @param pipeline + * An {@link HttpPipeline} object to set. + * + * @return A {@link BlobURL} object with the given pipeline. + */ + public BlobURL withPipeline(HttpPipeline pipeline) { + try { + return new BlobURL(new URL(this.storageClient.url()), pipeline); + } catch (MalformedURLException e) { + throw new RuntimeException(e); + } + } + + /** + * Creates a new {@link BlobURL} with the given snapshot. + * + * @param snapshot + * A {@code String} to set. + * + * @return A {@link BlobURL} object with the given pipeline. + * + * @throws MalformedURLException + * Appending the specified snapshot produced an invalid URL. + * @throws UnknownHostException + * If the url contains an improperly formatted ipaddress or unknown host address. + */ + public BlobURL withSnapshot(String snapshot) throws MalformedURLException, UnknownHostException { + BlobURLParts blobURLParts = URLParser.parse(new URL(this.storageClient.url())); + blobURLParts.withSnapshot(snapshot); + return new BlobURL(blobURLParts.toURL(), super.storageClient.httpPipeline()); + } + + /** + * Converts this BlobURL to a {@link BlockBlobURL} object. Note that this does not change the actual type of the + * blob if it has already been created. + * + * @return A {@link BlockBlobURL} object. + */ + public BlockBlobURL toBlockBlobURL() { + try { + return new BlockBlobURL(new URL(this.storageClient.url()), super.storageClient.httpPipeline()); + } catch (MalformedURLException e) { + throw new RuntimeException(e); + } + } + + /** + * Converts this BlobURL to an {@link AppendBlobURL} object. Note that this does not change the actual type of the + * blob if it has already been created. + * + * @return An {@link AppendBlobURL} object. + */ + public AppendBlobURL toAppendBlobURL() { + try { + return new AppendBlobURL(new URL(this.storageClient.url()), super.storageClient.httpPipeline()); + } catch (MalformedURLException e) { + throw new RuntimeException(e); + } + } + + /** + * Converts this BlobURL to a {@link PageBlobURL} object. Note that this does not change the actual type of the blob + * if it has already been created. + * + * @return A {@link PageBlobURL} object. + */ + public PageBlobURL toPageBlobURL() { + try { + return new PageBlobURL(new URL(this.storageClient.url()), super.storageClient.httpPipeline()); + } catch (MalformedURLException e) { + throw new RuntimeException(e); + } + } + + /** + * Copies the data at the source URL to a blob. For more information, see the Azure Docs + * + * @param sourceURL + * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=start_copy "Sample code for BlobURL.startCopyFromURL")] \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=start_copy_helper "Helper for start_copy sample.")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single startCopyFromURL(URL sourceURL) { + return this.startCopyFromURL(sourceURL, null, null, null, null); + } + + /** + * Copies the data at the source URL to a blob. For more information, see the Azure Docs + * + * @param sourceURL + * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. + * @param metadata + * {@link Metadata} + * @param sourceModifiedAccessConditions + * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the + * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob + * was changed relative to the given request. The request will fail if the specified condition is not + * satisfied. + * @param destAccessConditions + * {@link BlobAccessConditions} against the destination. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=start_copy "Sample code for BlobURL.startCopyFromURL")] \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=start_copy_helper "Helper for start_copy sample.")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single startCopyFromURL(URL sourceURL, Metadata metadata, + ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions, + Context context) { + metadata = metadata == null ? Metadata.NONE : metadata; + sourceModifiedAccessConditions = sourceModifiedAccessConditions == null ? + new ModifiedAccessConditions() : sourceModifiedAccessConditions; + destAccessConditions = destAccessConditions == null ? BlobAccessConditions.NONE : destAccessConditions; + context = context == null ? Context.NONE : context; + + // We want to hide the SourceAccessConditions type from the user for consistency's sake, so we convert here. + SourceModifiedAccessConditions sourceConditions = new SourceModifiedAccessConditions() + .withSourceIfModifiedSince(sourceModifiedAccessConditions.ifModifiedSince()) + .withSourceIfUnmodifiedSince(sourceModifiedAccessConditions.ifUnmodifiedSince()) + .withSourceIfMatch(sourceModifiedAccessConditions.ifMatch()) + .withSourceIfNoneMatch(sourceModifiedAccessConditions.ifNoneMatch()); + + return addErrorWrappingToSingle(this.storageClient.generatedBlobs().startCopyFromURLWithRestResponseAsync( + context, sourceURL, null, metadata, null, sourceConditions, + destAccessConditions.modifiedAccessConditions(), destAccessConditions.leaseAccessConditions())); + } + + /** + * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. For + * more information, see the Azure Docs. + * + * @param copyId + * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link + * BlobStartCopyFromURLHeaders} object. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=abort_copy "Sample code for BlobURL.abortCopyFromURL")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single abortCopyFromURL(String copyId) { + return this.abortCopyFromURL(copyId, null, null); + } + + /** + * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. For + * more information, see the Azure Docs. + * + * @param copyId + * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link + * BlobStartCopyFromURLHeaders} object. + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=abort_copy "Sample code for BlobURL.abortCopyFromURL")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single abortCopyFromURL(String copyId, + LeaseAccessConditions leaseAccessConditions, Context context) { + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedBlobs().abortCopyFromURLWithRestResponseAsync( + context, copyId, null, null, leaseAccessConditions)); + } + + /** + * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. + * For more information, see the Azure Docs + * + * @param copySource + * The source URL to copy from. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=sync_copy "Sample code for BlobURL.syncCopyFromURL")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single syncCopyFromURL(URL copySource) { + return this.syncCopyFromURL(copySource, null, null, null, null); + } + + /** + * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. + * For more information, see the Azure Docs + * + * @param copySource + * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. + * @param metadata + * {@link Metadata} + * @param sourceModifiedAccessConditions + * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the + * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob + * was changed relative to the given request. The request will fail if the specified condition is not + * satisfied. + * @param destAccessConditions + * {@link BlobAccessConditions} against the destination. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=sync_copy "Sample code for BlobURL.syncCopyFromURL")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single syncCopyFromURL(URL copySource, Metadata metadata, + ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions, + Context context) { + metadata = metadata == null ? Metadata.NONE : metadata; + sourceModifiedAccessConditions = sourceModifiedAccessConditions == null ? + new ModifiedAccessConditions() : sourceModifiedAccessConditions; + destAccessConditions = destAccessConditions == null ? BlobAccessConditions.NONE : destAccessConditions; + context = context == null ? Context.NONE : context; + + // We want to hide the SourceAccessConditions type from the user for consistency's sake, so we convert here. + SourceModifiedAccessConditions sourceConditions = new SourceModifiedAccessConditions() + .withSourceIfModifiedSince(sourceModifiedAccessConditions.ifModifiedSince()) + .withSourceIfUnmodifiedSince(sourceModifiedAccessConditions.ifUnmodifiedSince()) + .withSourceIfMatch(sourceModifiedAccessConditions.ifMatch()) + .withSourceIfNoneMatch(sourceModifiedAccessConditions.ifNoneMatch()); + + return addErrorWrappingToSingle(this.storageClient.generatedBlobs().copyFromURLWithRestResponseAsync( + context, copySource, null, metadata, null, sourceConditions, + destAccessConditions.modifiedAccessConditions(), destAccessConditions.leaseAccessConditions())); + } + + /** + * Reads a range of bytes from a blob. The response also includes the blob's properties and metadata. For more + * information, see the Azure Docs. + *

    + * Note that the response body has reliable download functionality built in, meaning that a failed download stream + * will be automatically retried. This behavior may be configured with {@link ReliableDownloadOptions}. + * + * @return Emits the successful response. + * @apiNote ## Sample Code \n [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=upload_download + * "Sample code for BlobURL.download")] \n For more samples, please see the [Samples + * file](%https://github.com/Azure/azure-storage-java/blob/New-Storage-SDK-V10-Preview/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single download() { + return this.download(null, null, false, null); + } + + /** + * Reads a range of bytes from a blob. The response also includes the blob's properties and metadata. For more + * information, see the Azure Docs. + *

    + * Note that the response body has reliable download functionality built in, meaning that a failed download stream + * will be automatically retried. This behavior may be configured with {@link ReliableDownloadOptions}. + * + * @param range + * {@link BlobRange} + * @param accessConditions + * {@link BlobAccessConditions} + * @param rangeGetContentMD5 + * Whether the contentMD5 for the specified blob range should be returned. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=upload_download "Sample code for BlobURL.download")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single download(BlobRange range, BlobAccessConditions accessConditions, + boolean rangeGetContentMD5, Context context) { + Boolean getMD5 = rangeGetContentMD5 ? rangeGetContentMD5 : null; + range = range == null ? BlobRange.DEFAULT : range; + accessConditions = accessConditions == null ? BlobAccessConditions.NONE : accessConditions; + HTTPGetterInfo info = new HTTPGetterInfo() + .withOffset(range.offset()) + .withCount(range.count()) + .withETag(accessConditions.modifiedAccessConditions().ifMatch()); + + return addErrorWrappingToSingle(this.storageClient.generatedBlobs().downloadWithRestResponseAsync( + context, null, null, range.toString(), getMD5, null, + accessConditions.leaseAccessConditions(), + accessConditions.modifiedAccessConditions())) + // Convert the autorest response to a DownloadResponse, which enable reliable download. + .map(response -> { + // If there wasn't an etag originally specified, lock on the one returned. + info.withETag(response.headers().eTag()); + return new DownloadResponse(response, info, + // In the event of a stream failure, make a new request to pick up where we left off. + newInfo -> + this.download(new BlobRange().withOffset(newInfo.offset()) + .withCount(newInfo.count()), + new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfMatch(info.eTag())), false, + context == null ? Context.NONE : context)); + }); + } + + /** + * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. For more + * information, see the Azure Docs. + * + * @return Emits the successful response. + * @apiNote ## Sample Code \n [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_delete + * "Sample code for BlobURL.delete")] \n For more samples, please see the [Samples + * file](%https://github.com/Azure/azure-storage-java/blob/New-Storage-SDK-V10-Preview/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single delete() { + return this.delete(null, null, null); + } + + /** + * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. For more + * information, see the Azure Docs. + * + * @param deleteBlobSnapshotOptions + * Specifies the behavior for deleting the snapshots on this blob. {@code Include} will delete the base blob + * and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being deleted, you must + * pass null. + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_delete "Sample code for BlobURL.delete")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single delete(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, + BlobAccessConditions accessConditions, Context context) { + accessConditions = accessConditions == null ? BlobAccessConditions.NONE : accessConditions; + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedBlobs().deleteWithRestResponseAsync( + context, null, null, deleteBlobSnapshotOptions, null, accessConditions.leaseAccessConditions(), + accessConditions.modifiedAccessConditions())); + } + + /** + * Returns the blob's metadata and properties. For more information, see the Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=properties_metadata "Sample code for BlobURL.getProperties")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single getProperties() { + return this.getProperties(null, null); + } + + /** + * Returns the blob's metadata and properties. For more information, see the Azure Docs. + * + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=properties_metadata "Sample code for BlobURL.getProperties")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single getProperties(BlobAccessConditions accessConditions, Context context) { + accessConditions = accessConditions == null ? BlobAccessConditions.NONE : accessConditions; + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedBlobs().getPropertiesWithRestResponseAsync( + context, null, null, null, accessConditions.leaseAccessConditions(), + accessConditions.modifiedAccessConditions())); + } + + /** + * Changes a blob's HTTP header properties. For more information, see the Azure + * Docs. + * + * @param headers + * {@link BlobHTTPHeaders} + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=properties_metadata "Sample code for BlobURL.setHTTPHeaders")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single setHTTPHeaders(BlobHTTPHeaders headers) { + return this.setHTTPHeaders(headers, null, null); + } + + /** + * Changes a blob's HTTP header properties. For more information, see the Azure Docs. + * + * @param headers + * {@link BlobHTTPHeaders} + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=properties_metadata "Sample code for BlobURL.setHTTPHeaders")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single setHTTPHeaders(BlobHTTPHeaders headers, + BlobAccessConditions accessConditions, Context context) { + accessConditions = accessConditions == null ? BlobAccessConditions.NONE : accessConditions; + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedBlobs().setHTTPHeadersWithRestResponseAsync( + context, null, null, headers, accessConditions.leaseAccessConditions(), + accessConditions.modifiedAccessConditions())); + } + + /** + * Changes a blob's metadata. For more information, see the Azure Docs. + * + * @param metadata + * {@link Metadata} + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=properties_metadata "Sample code for BlobURL.setMetadata")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single setMetadata(Metadata metadata) { + return this.setMetadata(metadata, null, null); + } + + /** + * Changes a blob's metadata. For more information, see the Azure Docs. + * + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=properties_metadata "Sample code for BlobURL.setMetadata")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single setMetadata(Metadata metadata, BlobAccessConditions accessConditions, + Context context) { + metadata = metadata == null ? Metadata.NONE : metadata; + accessConditions = accessConditions == null ? BlobAccessConditions.NONE : accessConditions; + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedBlobs().setMetadataWithRestResponseAsync( + context, null, metadata, null, accessConditions.leaseAccessConditions(), + accessConditions.modifiedAccessConditions())); + } + + /** + * Creates a read-only snapshot of a blob. For more information, see the Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=snapshot "Sample code for BlobURL.createSnapshot")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single createSnapshot() { + return this.createSnapshot(null, null, null); + } + + /** + * Creates a read-only snapshot of a blob. For more information, see the Azure Docs. + * + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=snapshot "Sample code for BlobURL.createSnapshot")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single createSnapshot(Metadata metadata, BlobAccessConditions accessConditions, + Context context) { + metadata = metadata == null ? Metadata.NONE : metadata; + accessConditions = accessConditions == null ? BlobAccessConditions.NONE : accessConditions; + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedBlobs().createSnapshotWithRestResponseAsync( + context, null, metadata, null, accessConditions.modifiedAccessConditions(), + accessConditions.leaseAccessConditions())); + } + + /** + * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in + * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of + * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. + *

    + * For detailed information about block blob level tiering see the Azure Docs. + * + * @param tier + * The new tier for the blob. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=tier "Sample code for BlobURL.setTier")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single setTier(AccessTier tier) { + return this.setTier(tier, null, null); + } + + /** + * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in + * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of + * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. + *

    + * For detailed information about block blob level tiering see the Azure Docs. + * + * @param tier + * The new tier for the blob. + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=tier "Sample code for BlobURL.setTier")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single setTier(AccessTier tier, LeaseAccessConditions leaseAccessConditions, + Context context) { + Utility.assertNotNull("tier", tier); + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedBlobs().setTierWithRestResponseAsync(context, tier, + null, null, leaseAccessConditions)); + } + + /** + * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. + * For more information, see the Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=undelete "Sample code for BlobURL.undelete")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single undelete() { + return this.undelete(null); + } + + /** + * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. + * For more information, see the Azure Docs. + * + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=undelete "Sample code for BlobURL.undelete")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single undelete(Context context) { + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedBlobs().undeleteWithRestResponseAsync(context, null, + null)); + } + + /** + * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 + * seconds, or infinite (-1). For more information, see the Azure Docs. + * + * @param proposedId + * A {@code String} in any valid GUID format. May be null. + * @param duration + * The duration of the lease, in seconds, or negative one (-1) for a lease that + * never expires. A non-infinite lease can be between 15 and 60 seconds. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_lease "Sample code for BlobURL.acquireLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single acquireLease(String proposedId, int duration) { + return this.acquireLease(proposedId, duration, null, null); + } + + /** + * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 + * seconds, or infinite (-1). For more information, see the Azure Docs. + * + * @param proposedID + * A {@code String} in any valid GUID format. May be null. + * @param duration + * The duration of the lease, in seconds, or negative one (-1) for a lease that + * never expires. A non-infinite lease can be between 15 and 60 seconds. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_lease "Sample code for BlobURL.acquireLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single acquireLease(String proposedID, int duration, + ModifiedAccessConditions modifiedAccessConditions, Context context) { + if (!(duration == -1 || (duration >= 15 && duration <= 60))) { + // Throwing is preferred to Single.error because this will error out immediately instead of waiting until + // subscription. + throw new IllegalArgumentException("Duration must be -1 or between 15 and 60."); + } + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedBlobs().acquireLeaseWithRestResponseAsync(context, + null, duration, proposedID, null, modifiedAccessConditions)); + } + + /** + * Renews the blob's previously-acquired lease. For more information, see the Azure Docs. + * + * @param leaseID + * The leaseId of the active lease on the blob. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_lease "Sample code for BlobURL.renewLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single renewLease(String leaseID) { + return this.renewLease(leaseID, null, null); + } + + /** + * Renews the blob's previously-acquired lease. For more information, see the Azure Docs. + * + * @param leaseID + * The leaseId of the active lease on the blob. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_lease "Sample code for BlobURL.renewLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single renewLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions, + Context context) { + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedBlobs().renewLeaseWithRestResponseAsync(context, + leaseID, null, null, modifiedAccessConditions)); + } + + /** + * Releases the blob's previously-acquired lease. For more information, see the Azure Docs. + * + * @param leaseID + * The leaseId of the active lease on the blob. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_lease "Sample code for BlobURL.releaseLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single releaseLease(String leaseID) { + return this.releaseLease(leaseID, null, null); + } + + /** + * Releases the blob's previously-acquired lease. For more information, see the Azure Docs. + * + * @param leaseID + * The leaseId of the active lease on the blob. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_lease "Sample code for BlobURL.releaseLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single releaseLease(String leaseID, + ModifiedAccessConditions modifiedAccessConditions, Context context) { + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedBlobs().releaseLeaseWithRestResponseAsync(context, + leaseID, null, null, modifiedAccessConditions)); + } + + /** + * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant + * to break a fixed-duration lease when it expires or an infinite lease immediately. For more information, see the + * Azure Docs. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_lease "Sample code for BlobURL.breakLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/New-Storage-SDK-V10-Preview/src/test/java/com/microsoft/azure/storage/Samples.java) + * + * @return + * Emits the successful response. + */ + public Single breakLease() { + return this.breakLease(null, null, null); + } + + /** + * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant + * to break a fixed-duration lease when it expires or an infinite lease immediately. For more information, see the + * Azure Docs. + * + * @param breakPeriodInSeconds + * An optional {@code Integer} representing the proposed duration of seconds that the lease should continue + * before it is broken, between 0 and 60 seconds. This break period is only used if it is shorter than the + * time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be + * available before the break period has expired, but the lease may be held for longer than the break + * period. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_lease "Sample code for BlobURL.breakLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single breakLease(Integer breakPeriodInSeconds, + ModifiedAccessConditions modifiedAccessConditions, Context context) { + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedBlobs().breakLeaseWithRestResponseAsync(context, + null, breakPeriodInSeconds, null, modifiedAccessConditions)); + } + + /** + * ChangeLease changes the blob's lease ID. For more information, see the Azure Docs. + * + * @param leaseId + * The leaseId of the active lease on the blob. + * @param proposedID + * A {@code String} in any valid GUID format. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_lease "Sample code for BlobURL.changeLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single changeLease(String leaseId, String proposedID) { + return this.changeLease(leaseId, proposedID, null, null); + } + + /** + * ChangeLease changes the blob's lease ID. For more information, see the Azure Docs. + * + * @param leaseId + * The leaseId of the active lease on the blob. + * @param proposedID + * A {@code String} in any valid GUID format. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blob_lease "Sample code for BlobURL.changeLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single changeLease(String leaseId, String proposedID, + ModifiedAccessConditions modifiedAccessConditions, Context context) { + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedBlobs().changeLeaseWithRestResponseAsync(context, + leaseId, proposedID, null, null, modifiedAccessConditions)); + } + + /** + * Returns the sku name and account kind for the account. For more information, please see the Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=account_info "Sample code for BlobURL.getAccountInfo")] \n + * For more samples, please see the [Samples file](https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single getAccountInfo() { + return this.getAccountInfo(null); + } + + /** + * Returns the sku name and account kind for the account. For more information, please see the Azure Docs. + * + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=account_info "Sample code for BlobURL.getAccountInfo")] \n + * For more samples, please see the [Samples file](https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single getAccountInfo(Context context) { + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle( + this.storageClient.generatedBlobs().getAccountInfoWithRestResponseAsync(context)); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/BlobURLParts.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/BlobURLParts.java new file mode 100644 index 0000000000000..30ecca5e6131b --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/BlobURLParts.java @@ -0,0 +1,208 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import com.microsoft.rest.v2.http.UrlBuilder; + +import java.net.MalformedURLException; +import java.net.URL; +import java.util.HashMap; +import java.util.Map; + +/** + * A BlobURLParts object represents the components that make up an Azure Storage Container/Blob URL. You may parse an + * existing URL into its parts with the {@link URLParser} class. You may construct a URL from parts by calling toURL(). + * It is also possible to use the empty constructor to build a blobURL from scratch. + * NOTE: Changing any SAS-related field requires computing a new SAS signature. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=url_parts "Sample code for BlobURLParts")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ +public final class BlobURLParts { + + private String scheme; + + private String host; + + private String containerName; + + private String blobName; + + private String snapshot; + + private SASQueryParameters sasQueryParameters; + + private Map unparsedParameters; + + /** + * Initializes a BlobURLParts object with all fields set to null, except unparsedParameters, which is an empty map. + * This may be useful for constructing a URL to a blob storage resource from scratch when the constituent parts are + * already known. + */ + public BlobURLParts() { + unparsedParameters = new HashMap<>(); + } + + /** + * The scheme. Ex: "https://". + */ + public String scheme() { + return scheme; + } + + /** + * The scheme. Ex: "https://". + */ + public BlobURLParts withScheme(String scheme) { + this.scheme = scheme; + return this; + } + + /** + * The host. Ex: "account.blob.core.windows.net". + */ + public String host() { + return host; + } + + /** + * The host. Ex: "account.blob.core.windows.net". + */ + public BlobURLParts withHost(String host) { + this.host = host; + return this; + } + + /** + * The container name or {@code null} if a {@link ServiceURL} was parsed. + */ + public String containerName() { + return containerName; + } + + /** + * The container name or {@code null} if a {@link ServiceURL} was parsed. + */ + public BlobURLParts withContainerName(String containerName) { + this.containerName = containerName; + return this; + } + + /** + * The blob name or {@code null} if a {@link ServiceURL} or {@link ContainerURL} was parsed. + */ + public String blobName() { + return blobName; + } + + /** + * The blob name or {@code null} if a {@link ServiceURL} or {@link ContainerURL} was parsed. + */ + public BlobURLParts withBlobName(String blobName) { + this.blobName = blobName; + return this; + } + + /** + * The snapshot time or {@code null} if anything except a URL to a snapshot was parsed. + */ + public String snapshot() { + return snapshot; + } + + /** + * The snapshot time or {@code null} if anything except a URL to a snapshot was parsed. + */ + public BlobURLParts withSnapshot(String snapshot) { + this.snapshot = snapshot; + return this; + } + + /** + * A {@link SASQueryParameters} representing the SAS query parameters or {@code null} if there were no such + * parameters. + */ + public SASQueryParameters sasQueryParameters() { + return sasQueryParameters; + } + + /** + * A {@link SASQueryParameters} representing the SAS query parameters or {@code null} if there were no such + * parameters. + */ + public BlobURLParts withSasQueryParameters(SASQueryParameters sasQueryParameters) { + this.sasQueryParameters = sasQueryParameters; + return this; + } + + /** + * The query parameter key value pairs aside from SAS parameters and snapshot time or {@code null} if there were + * no such parameters. + */ + public Map unparsedParameters() { + return unparsedParameters; + } + + /** + * The query parameter key value pairs aside from SAS parameters and snapshot time or {@code null} if there were + * no such parameters. + */ + public BlobURLParts withUnparsedParameters(Map unparsedParameters) { + this.unparsedParameters = unparsedParameters; + return this; + } + + /** + * Converts the blob URL parts to a {@link URL}. + * + * @return A {@code java.net.URL} to the blob resource composed of all the elements in the object. + * + * @throws MalformedURLException + * The fields present on the BlobURLParts object were insufficient to construct a valid URL or were + * ill-formatted. + */ + public URL toURL() throws MalformedURLException { + UrlBuilder url = new UrlBuilder().withScheme(this.scheme).withHost(this.host); + + StringBuilder path = new StringBuilder(); + if (this.containerName != null) { + path.append(this.containerName); + if (this.blobName != null) { + path.append('/'); + path.append(this.blobName); + } + } + url.withPath(path.toString()); + + if (this.snapshot != null) { + url.setQueryParameter(Constants.SNAPSHOT_QUERY_PARAMETER, this.snapshot); + } + if (this.sasQueryParameters != null) { + String encodedSAS = this.sasQueryParameters.encode(); + if (encodedSAS.length() != 0) { + url.withQuery(encodedSAS); + } + } + + for (Map.Entry entry : this.unparsedParameters.entrySet()) { + // The commas are intentionally encoded. + url.setQueryParameter(entry.getKey(), + Utility.safeURLEncode(String.join(",", entry.getValue()))); + } + + return url.toURL(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/BlockBlobURL.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/BlockBlobURL.java new file mode 100644 index 0000000000000..73105ddd98522 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/BlockBlobURL.java @@ -0,0 +1,435 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import com.microsoft.azure.storage.blob.models.*; +import com.microsoft.rest.v2.Context; +import com.microsoft.rest.v2.http.HttpPipeline; +import io.reactivex.Flowable; +import io.reactivex.Single; + +import java.net.MalformedURLException; +import java.net.URL; +import java.net.UnknownHostException; +import java.nio.ByteBuffer; +import java.util.List; + +import static com.microsoft.azure.storage.blob.Utility.addErrorWrappingToSingle; + +/** + * Represents a URL to a block blob. It may be obtained by direct construction or via the create method on a + * {@link ContainerURL} object. This class does not hold any state about a particular blob but is instead a convenient + * way of sending off appropriate requests to the resource on the service. Please refer to the + * Azure Docs + * for more information on block blobs. + */ +public final class BlockBlobURL extends BlobURL { + + /** + * Indicates the maximum number of bytes that can be sent in a call to upload. + */ + public static final int MAX_UPLOAD_BLOB_BYTES = 256 * Constants.MB; + + /** + * Indicates the maximum number of bytes that can be sent in a call to stageBlock. + */ + public static final int MAX_STAGE_BLOCK_BYTES = 100 * Constants.MB; + + /** + * Indicates the maximum number of blocks allowed in a block blob. + */ + public static final int MAX_BLOCKS = 50000; + + /** + * Creates a {@code BlockBlobURL} object pointing to the account specified by the URL and using the provided + * pipeline to make HTTP requests. + * + * @param url + * A {@code URL} to an Azure Storage block blob. + * @param pipeline + * A {@code HttpPipeline} which configures the behavior of HTTP exchanges. Please refer to + * {@link StorageURL#createPipeline(ICredentials, PipelineOptions)} for more information. + */ + public BlockBlobURL(URL url, HttpPipeline pipeline) { + super(url, pipeline); + } + + /** + * Creates a new {@link BlockBlobURL} with the given pipeline. + * + * @param pipeline + * An {@link HttpPipeline} object to set. + * + * @return A {@link BlockBlobURL} object with the given pipeline. + */ + public BlockBlobURL withPipeline(HttpPipeline pipeline) { + try { + return new BlockBlobURL(new URL(this.storageClient.url()), pipeline); + } catch (MalformedURLException e) { + throw new RuntimeException(e); + } + } + + /** + * Creates a new {@link BlockBlobURL} with the given snapshot. + * + * @param snapshot + * A {@code String} of the snapshot identifier. + * + * @return A {@link BlockBlobURL} object with the given pipeline. + */ + public BlockBlobURL withSnapshot(String snapshot) throws MalformedURLException, UnknownHostException { + BlobURLParts blobURLParts = URLParser.parse(new URL(this.storageClient.url())); + blobURLParts.withSnapshot(snapshot); + return new BlockBlobURL(blobURLParts.toURL(), super.storageClient.httpPipeline()); + } + + /** + * Creates a new block blob, or updates the content of an existing block blob. + * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not + * supported with PutBlob; the content of the existing blob is overwritten with the new content. To + * perform a partial update of a block blob's, use PutBlock and PutBlockList. + * For more information, see the + * Azure Docs. + *

    + * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flowable} must produce the same data each time it is subscribed to. + *

    + * For more efficient bulk-upload scenarios, please refer to the {@link TransferManager} for convenience methods. + * + * @param data + * The data to write to the blob. Note that this {@code Flowable} must be replayable if retries are enabled + * (the default). In other words, the Flowable must produce the same data each time it is subscribed to. + * @param length + * The exact length of the data. It is important that this value match precisely the length of the data + * emitted by the {@code Flowable}. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=upload_download "Sample code for BlockBlobURL.upload")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single upload(Flowable data, long length) { + return this.upload(data, length, null, null, null, null); + } + + /** + * Creates a new block blob, or updates the content of an existing block blob. + * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not + * supported with PutBlob; the content of the existing blob is overwritten with the new content. To + * perform a partial update of a block blob's, use PutBlock and PutBlockList. + * For more information, see the + * Azure Docs. + *

    + * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flowable} must produce the same data each time it is subscribed to. + *

    + * For more efficient bulk-upload scenarios, please refer to the {@link TransferManager} for convenience methods. + * + * @param data + * The data to write to the blob. Note that this {@code Flowable} must be replayable if retries are enabled + * (the default). In other words, the Flowable must produce the same data each time it is subscribed to. + * @param length + * The exact length of the data. It is important that this value match precisely the length of the data + * emitted by the {@code Flowable}. + * @param headers + * {@link BlobHTTPHeaders} + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=upload_download "Sample code for BlockBlobURL.upload")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single upload(Flowable data, long length, BlobHTTPHeaders headers, + Metadata metadata, BlobAccessConditions accessConditions, Context context) { + metadata = metadata == null ? Metadata.NONE : metadata; + accessConditions = accessConditions == null ? BlobAccessConditions.NONE : accessConditions; + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedBlockBlobs().uploadWithRestResponseAsync(context, + data, length, null, metadata, null, headers, accessConditions.leaseAccessConditions(), + accessConditions.modifiedAccessConditions())); + } + + /** + * Uploads the specified block to the block blob's "staging area" to be later committed by a call to + * commitBlockList. For more information, see the + * Azure Docs. + *

    + * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flowable} must produce the same data each time it is subscribed to. + * + * @param base64BlockID + * A Base64 encoded {@code String} that specifies the ID for this block. Note that all block ids for a given + * blob must be the same length. + * @param data + * The data to write to the block. Note that this {@code Flowable} must be replayable if retries are enabled + * (the default). In other words, the Flowable must produce the same data each time it is subscribed to. + * @param length + * The exact length of the data. It is important that this value match precisely the length of the data + * emitted by the {@code Flowable}. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blocks "Sample code for BlockBlobURL.stageBlock")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single stageBlock(String base64BlockID, Flowable data, + long length) { + return this.stageBlock(base64BlockID, data, length, null, null); + } + + /** + * Uploads the specified block to the block blob's "staging area" to be later committed by a call to + * commitBlockList. For more information, see the + * Azure Docs. + *

    + * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flowable} must produce the same data each time it is subscribed to. + * + * @param base64BlockID + * A Base64 encoded {@code String} that specifies the ID for this block. Note that all block ids for a given + * blob must be the same length. + * @param data + * The data to write to the block. Note that this {@code Flowable} must be replayable if retries are enabled + * (the default). In other words, the Flowable must produce the same data each time it is subscribed to. + * @param length + * The exact length of the data. It is important that this value match precisely the length of the data + * emitted by the {@code Flowable}. + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blocks "Sample code for BlockBlobURL.stageBlock")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single stageBlock(String base64BlockID, Flowable data, long length, + LeaseAccessConditions leaseAccessConditions, Context context) { + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedBlockBlobs().stageBlockWithRestResponseAsync( + context, base64BlockID, length, data, null, null, null, leaseAccessConditions)); + } + + /** + * Creates a new block to be committed as part of a blob where the contents are read from a URL. For more + * information, see the Azure Docs. + * + * @param base64BlockID + * A Base64 encoded {@code String} that specifies the ID for this block. Note that all block ids for a given + * blob must be the same length. + * @param sourceURL + * The url to the blob that will be the source of the copy. A source blob in the same storage account can be + * authenticated via Shared Key. However, if the source is a blob in another account, the source blob must + * either be public or must be authenticated via a shared access signature. If the source blob is public, no + * authentication is required to perform the operation. + * @param sourceRange + * {@link BlobRange} + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=block_from_url "Sample code for BlockBlobURL.stageBlockFromURL")] + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single stageBlockFromURL(String base64BlockID, URL sourceURL, + BlobRange sourceRange) { + return this.stageBlockFromURL(base64BlockID, sourceURL, sourceRange, null, null, null); + } + + /** + * Creates a new block to be committed as part of a blob where the contents are read from a URL. For more + * information, see the Azure Docs. + * + * @param base64BlockID + * A Base64 encoded {@code String} that specifies the ID for this block. Note that all block ids for a given + * blob must be the same length. + * @param sourceURL + * The url to the blob that will be the source of the copy. A source blob in the same storage account can + * be authenticated via Shared Key. However, if the source is a blob in another account, the source blob + * must either be public or must be authenticated via a shared access signature. If the source blob is + * public, no authentication is required to perform the operation. + * @param sourceRange + * {@link BlobRange} + * @param sourceContentMD5 + * An MD5 hash of the block content from the source blob. If specified, the service will calculate the MD5 + * of the received data and fail the request if it does not match the provided MD5. + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=block_from_url "Sample code for BlockBlobURL.stageBlockFromURL")] + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single stageBlockFromURL(String base64BlockID, URL sourceURL, + BlobRange sourceRange, byte[] sourceContentMD5, LeaseAccessConditions leaseAccessConditions, + Context context) { + sourceRange = sourceRange == null ? BlobRange.DEFAULT : sourceRange; + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle( + this.storageClient.generatedBlockBlobs().stageBlockFromURLWithRestResponseAsync(context, + base64BlockID, 0, sourceURL, sourceRange.toString(), sourceContentMD5, + null, null, leaseAccessConditions)); + } + + /** + * Returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter. + * For more information, see the + * Azure Docs. + * + * @param listType + * Specifies which type of blocks to return. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blocks "Sample code for BlockBlobURL.getBlockList")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single getBlockList(BlockListType listType) { + return this.getBlockList(listType, null, null); + } + + /** + * Returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter. + * For more information, see the + * Azure Docs. + * + * @param listType + * Specifies which type of blocks to return. + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blocks "Sample code for BlockBlobURL.getBlockList")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single getBlockList(BlockListType listType, + LeaseAccessConditions leaseAccessConditions, Context context) { + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedBlockBlobs().getBlockListWithRestResponseAsync( + context, listType, null, null, null, leaseAccessConditions)); + } + + /** + * Writes a blob by specifying the list of block IDs that are to make up the blob. + * In order to be written as part of a blob, a block must have been successfully written + * to the server in a prior stageBlock operation. You can call commitBlockList to update a blob + * by uploading only those blocks that have changed, then committing the new and existing + * blocks together. Any blocks not specified in the block list and permanently deleted. + * For more information, see the + * Azure Docs. + *

    + * For more efficient bulk-upload scenarios, please refer to the {@link TransferManager} for convenience methods. + * + * @param base64BlockIDs + * A list of base64 encode {@code String}s that specifies the block IDs to be committed. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blocks "Sample code for BlockBlobURL.commitBlockList")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single commitBlockList(List base64BlockIDs) { + return this.commitBlockList(base64BlockIDs, null, null, null, null); + } + + /** + * Writes a blob by specifying the list of block IDs that are to make up the blob. + * In order to be written as part of a blob, a block must have been successfully written + * to the server in a prior stageBlock operation. You can call commitBlockList to update a blob + * by uploading only those blocks that have changed, then committing the new and existing + * blocks together. Any blocks not specified in the block list and permanently deleted. + * For more information, see the + * Azure Docs. + *

    + * For more efficient bulk-upload scenarios, please refer to the {@link TransferManager} for convenience methods. + * + * @param base64BlockIDs + * A list of base64 encode {@code String}s that specifies the block IDs to be committed. + * @param headers + * {@link BlobHTTPHeaders} + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=blocks "Sample code for BlockBlobURL.commitBlockList")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single commitBlockList(List base64BlockIDs, + BlobHTTPHeaders headers, Metadata metadata, BlobAccessConditions accessConditions, Context context) { + metadata = metadata == null ? Metadata.NONE : metadata; + accessConditions = accessConditions == null ? BlobAccessConditions.NONE : accessConditions; + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedBlockBlobs().commitBlockListWithRestResponseAsync( + context, new BlockLookupList().withLatest(base64BlockIDs), null, + metadata, null, headers, accessConditions.leaseAccessConditions(), + accessConditions.modifiedAccessConditions())); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/CommonRestResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/CommonRestResponse.java new file mode 100644 index 0000000000000..ba614568c0038 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/CommonRestResponse.java @@ -0,0 +1,121 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import com.microsoft.azure.storage.blob.models.BlockBlobCommitBlockListResponse; +import com.microsoft.azure.storage.blob.models.BlockBlobUploadResponse; +import com.microsoft.rest.v2.RestResponse; + +import java.time.OffsetDateTime; + +/** + * A generic wrapper for any type of blob REST API response. Used and returned by methods in the {@link TransferManager} + * class. The methods there return this type because they represent composite operations which may conclude with any of + * several possible REST calls depending on the data provided. + */ +public final class CommonRestResponse { + + private BlockBlobUploadResponse uploadBlobResponse; + + private BlockBlobCommitBlockListResponse commitBlockListResponse; + + private CommonRestResponse() { + uploadBlobResponse = null; + commitBlockListResponse = null; + } + + static CommonRestResponse createFromPutBlobResponse(BlockBlobUploadResponse response) { + CommonRestResponse commonRestResponse = new CommonRestResponse(); + commonRestResponse.uploadBlobResponse = response; + return commonRestResponse; + } + + static CommonRestResponse createFromPutBlockListResponse(BlockBlobCommitBlockListResponse response) { + CommonRestResponse commonRestResponse = new CommonRestResponse(); + commonRestResponse.commitBlockListResponse = response; + return commonRestResponse; + } + + /** + * @return The status code for the response + */ + public int statusCode() { + if (uploadBlobResponse != null) { + return uploadBlobResponse.statusCode(); + } + return commitBlockListResponse.statusCode(); + } + + /** + * @return An HTTP Etag for the blob at the time of the request. + */ + public String eTag() { + if (uploadBlobResponse != null) { + return uploadBlobResponse.headers().eTag(); + } + return commitBlockListResponse.headers().eTag(); + } + + /** + * @return The time when the blob was last modified. + */ + public OffsetDateTime lastModified() { + if (uploadBlobResponse != null) { + return uploadBlobResponse.headers().lastModified(); + } + return commitBlockListResponse.headers().lastModified(); + } + + /** + * @return The id of the service request for which this is the response. + */ + public String requestId() { + if (uploadBlobResponse != null) { + return uploadBlobResponse.headers().requestId(); + } + return commitBlockListResponse.headers().requestId(); + } + + /** + * @return The date of the response. + */ + public OffsetDateTime date() { + if (uploadBlobResponse != null) { + return uploadBlobResponse.headers().date(); + } + return commitBlockListResponse.headers().date(); + } + + /** + * @return The service version responding to the request. + */ + public String version() { + if (uploadBlobResponse != null) { + return uploadBlobResponse.headers().version(); + } + return commitBlockListResponse.headers().version(); + } + + /** + * @return The underlying response. + */ + public RestResponse response() { + if (uploadBlobResponse != null) { + return uploadBlobResponse; + } + return commitBlockListResponse; + } + +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/Constants.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/Constants.java new file mode 100644 index 0000000000000..1ac29c4f8dbea --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/Constants.java @@ -0,0 +1,280 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +/** + * RESERVED FOR INTERNAL USE. Contains storage constants. + */ +final class Constants { + + /** + * The master Microsoft Azure Storage header prefix. + */ + static final String PREFIX_FOR_STORAGE_HEADER = "x-ms-"; + /** + * Constant representing a kilobyte (Non-SI version). + */ + static final int KB = 1024; + /** + * Constant representing a megabyte (Non-SI version). + */ + static final int MB = 1024 * KB; + /** + * An empty {@code String} to use for comparison. + */ + static final String EMPTY_STRING = ""; + /** + * Specifies HTTP. + */ + static final String HTTP = "http"; + /** + * Specifies HTTPS. + */ + static final String HTTPS = "https"; + /** + * Specifies both HTTPS and HTTP. + */ + static final String HTTPS_HTTP = "https,http"; + /** + * The default type for content-type and accept. + */ + static final String UTF8_CHARSET = "UTF-8"; + /** + * The query parameter for snapshots. + */ + static final String SNAPSHOT_QUERY_PARAMETER = "snapshot"; + /** + * The word redacted. + */ + static final String REDACTED = "REDACTED"; + /** + * The default amount of parallelism for TransferManager operations. + */ + // We chose this to match Go, which followed AWS' default. + static final int TRANSFER_MANAGER_DEFAULT_PARALLELISM = 5; + + /** + * Private Default Ctor + */ + private Constants() { + // Private to prevent construction. + } + + /** + * Defines constants for use with HTTP headers. + */ + static final class HeaderConstants { + /** + * The Authorization header. + */ + static final String AUTHORIZATION = "Authorization"; + + /** + * The format string for specifying ranges with only begin offset. + */ + static final String BEGIN_RANGE_HEADER_FORMAT = "bytes=%d-"; + + /** + * The header that indicates the client request ID. + */ + static final String CLIENT_REQUEST_ID_HEADER = PREFIX_FOR_STORAGE_HEADER + "client-request-id"; + + /** + * The ContentEncoding header. + */ + static final String CONTENT_ENCODING = "Content-Encoding"; + + /** + * The ContentLangauge header. + */ + static final String CONTENT_LANGUAGE = "Content-Language"; + + /** + * The ContentLength header. + */ + static final String CONTENT_LENGTH = "Content-Length"; + + /** + * The ContentMD5 header. + */ + static final String CONTENT_MD5 = "Content-MD5"; + + /** + * The ContentType header. + */ + static final String CONTENT_TYPE = "Content-Type"; + + /** + * The header that specifies the date. + */ + static final String DATE = PREFIX_FOR_STORAGE_HEADER + "date"; + + /** + * The header that specifies the error code on unsuccessful responses. + */ + static final String ERROR_CODE = PREFIX_FOR_STORAGE_HEADER + "error-code"; + + /** + * The IfMatch header. + */ + static final String IF_MATCH = "If-Match"; + + /** + * The IfModifiedSince header. + */ + static final String IF_MODIFIED_SINCE = "If-Modified-Since"; + + /** + * The IfNoneMatch header. + */ + static final String IF_NONE_MATCH = "If-None-Match"; + + /** + * The IfUnmodifiedSince header. + */ + static final String IF_UNMODIFIED_SINCE = "If-Unmodified-Since"; + + /** + * The Range header. + */ + static final String RANGE = "Range"; + + /** + * The format string for specifying ranges. + */ + static final String RANGE_HEADER_FORMAT = "bytes=%d-%d"; + + /** + * The copy source header. + */ + static final String COPY_SOURCE = "x-ms-copy-source"; + + /** + * The version header. + */ + static final String VERSION = "x-ms-version"; + + /** + * The current storage version header value. + */ + static final String TARGET_STORAGE_VERSION = "2018-03-28"; + + /** + * The UserAgent header. + */ + static final String USER_AGENT = "User-Agent"; + + /** + * Specifies the value to use for UserAgent header. + */ + static final String USER_AGENT_PREFIX = "Azure-Storage"; + + /** + * Specifies the value to use for UserAgent header. + */ + static final String USER_AGENT_VERSION = "10.5.0"; + + private HeaderConstants() { + // Private to prevent construction. + } + } + + static final class UrlConstants { + + /** + * The SAS service version parameter. + */ + static final String SAS_SERVICE_VERSION = "sv"; + + /** + * The SAS services parameter. + */ + static final String SAS_SERVICES = "ss"; + + /** + * The SAS resource types parameter. + */ + static final String SAS_RESOURCES_TYPES = "srt"; + + /** + * The SAS protocol parameter. + */ + static final String SAS_PROTOCOL = "spr"; + + /** + * The SAS start time parameter. + */ + static final String SAS_START_TIME = "st"; + + /** + * The SAS expiration time parameter. + */ + static final String SAS_EXPIRY_TIME = "se"; + + /** + * The SAS IP range parameter. + */ + static final String SAS_IP_RANGE = "sip"; + + /** + * The SAS signed identifier parameter. + */ + static final String SAS_SIGNED_IDENTIFIER = "si"; + + /** + * The SAS signed resource parameter. + */ + static final String SAS_SIGNED_RESOURCE = "sr"; + + /** + * The SAS signed permissions parameter. + */ + static final String SAS_SIGNED_PERMISSIONS = "sp"; + + /** + * The SAS signature parameter. + */ + static final String SAS_SIGNATURE = "sig"; + + /** + * The SAS cache control parameter. + */ + static final String SAS_CACHE_CONTROL = "rscc"; + + /** + * The SAS content disposition parameter. + */ + static final String SAS_CONTENT_DISPOSITION = "rscd"; + + /** + * The SAS content encoding parameter. + */ + static final String SAS_CONTENT_ENCODING = "rsce"; + + /** + * The SAS content language parameter. + */ + static final String SAS_CONTENT_LANGUAGE = "rscl"; + + /** + * The SAS content type parameter. + */ + static final String SAS_CONTENT_TYPE = "rsct"; + + private UrlConstants() { + // Private to prevent construction. + } + } +} \ No newline at end of file diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ContainerAccessConditions.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ContainerAccessConditions.java new file mode 100644 index 0000000000000..a6470ce00cf55 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ContainerAccessConditions.java @@ -0,0 +1,80 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import com.microsoft.azure.storage.blob.models.LeaseAccessConditions; +import com.microsoft.azure.storage.blob.models.ModifiedAccessConditions; + +/** + * This class contains values which will restrict the successful operation of a variety of requests to the conditions + * present. These conditions are entirely optional. The entire object or any of its properties may be set to null when + * passed to a method to indicate that those conditions are not desired. Please refer to the type of each field for more + * information on those particular access conditions. + */ +public final class ContainerAccessConditions { + + /** + * An object representing no access conditions. + */ + public static final ContainerAccessConditions NONE = new ContainerAccessConditions(); + + private ModifiedAccessConditions modifiedAccessConditions; + + private LeaseAccessConditions leaseAccessConditions; + + /** + * Creates an instance which has fields set to non-null, empty values. + */ + public ContainerAccessConditions() { + this.modifiedAccessConditions = new ModifiedAccessConditions(); + this.leaseAccessConditions = new LeaseAccessConditions(); + } + + /** + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used to + * construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + */ + public ModifiedAccessConditions modifiedAccessConditions() { + return modifiedAccessConditions; + } + + /** + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used to + * construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + */ + public ContainerAccessConditions withModifiedAccessConditions(ModifiedAccessConditions modifiedAccessConditions) { + this.modifiedAccessConditions = modifiedAccessConditions; + return this; + } + + /** + * By setting lease access conditions, requests will fail if the provided lease does not match the active lease on + * the blob. + */ + public LeaseAccessConditions leaseAccessConditions() { + return leaseAccessConditions; + } + + /** + * By setting lease access conditions, requests will fail if the provided lease does not match the active lease on + * the blob. + */ + public ContainerAccessConditions withLeaseAccessConditions(LeaseAccessConditions leaseID) { + this.leaseAccessConditions = leaseID; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ContainerListingDetails.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ContainerListingDetails.java new file mode 100644 index 0000000000000..c02fb5d11f81a --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ContainerListingDetails.java @@ -0,0 +1,64 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import com.microsoft.azure.storage.blob.models.ListContainersIncludeType; + +/** + * This type allows users to specify additional information the service should return with each container when listing + * containers in an account (via a {@link ServiceURL} object). This type is immutable to ensure thread-safety of + * requests, so changing the details for a different listing operation requires construction of a new object. Null may + * be passed if none of the options are desirable. + */ +public final class ContainerListingDetails { + + /** + * An object indicating that no extra details should be returned. + */ + public static final ContainerListingDetails NONE = new ContainerListingDetails(); + + private boolean metadata; + + public ContainerListingDetails() { + + } + + /** + * Whether metadata should be returned. + */ + public boolean metadata() { + return this.metadata; + } + + /** + * Whether metadata should be returned. + */ + public ContainerListingDetails withMetadata(boolean metadata) { + this.metadata = metadata; + return this; + } + + /* + This is used internally to convert the details structure into the appropriate type to pass to the protocol layer. + It is intended to mirror the BlobListingDetails.toList() method, but is slightly different since there is only one + possible value here currently. The customer should never have need for this. + */ + ListContainersIncludeType toIncludeType() { + if (this.metadata) { + return ListContainersIncludeType.METADATA; + } + return null; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ContainerSASPermission.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ContainerSASPermission.java new file mode 100644 index 0000000000000..ec95d08e64c37 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ContainerSASPermission.java @@ -0,0 +1,215 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + + +import java.util.Locale; + +/** + * This is a helper class to construct a string representing the permissions granted by a ServiceSAS to a container. + * Setting a value to true means that any SAS which uses these permissions will grant permissions for that operation. + * Once all the values are set, this should be serialized with toString and set as the permissions field on a + * {@link ServiceSASSignatureValues} object. It is possible to construct the permissions string without this class, but + * the order of the permissions is particular and this class guarantees correctness. + */ +public final class ContainerSASPermission { + private boolean read; + + private boolean add; + + private boolean create; + + private boolean write; + + private boolean delete; + + private boolean list; + + /** + * Initializes an {@code ContainerSASPermssion} object with all fields set to false. + */ + public ContainerSASPermission() { + } + + /** + * Creates an {@code ContainerSASPermission} from the specified permissions string. This method will throw an + * {@code IllegalArgumentException} if it encounters a character that does not correspond to a valid permission. + * + * @param permString + * A {@code String} which represents the {@code ContainerSASPermission}. + * + * @return A {@code ContainerSASPermission} generated from the given {@code String}. + */ + public static ContainerSASPermission parse(String permString) { + ContainerSASPermission permissions = new ContainerSASPermission(); + + for (int i = 0; i < permString.length(); i++) { + char c = permString.charAt(i); + switch (c) { + case 'r': + permissions.read = true; + break; + case 'a': + permissions.add = true; + break; + case 'c': + permissions.create = true; + break; + case 'w': + permissions.write = true; + break; + case 'd': + permissions.delete = true; + break; + case 'l': + permissions.list = true; + break; + default: + throw new IllegalArgumentException( + String.format(Locale.ROOT, SR.ENUM_COULD_NOT_BE_PARSED_INVALID_VALUE, "Permissions", permString, c)); + } + } + return permissions; + } + + /** + * Specifies Read access granted. + */ + public boolean read() { + return read; + } + + /** + * Specifies Read access granted. + */ + public ContainerSASPermission withRead(boolean read) { + this.read = read; + return this; + } + + /** + * Specifies Add access granted. + */ + public boolean add() { + return add; + } + + /** + * Specifies Add access granted. + */ + public ContainerSASPermission withAdd(boolean add) { + this.add = add; + return this; + } + + /** + * Specifies Create access granted. + */ + public boolean create() { + return create; + } + + /** + * Specifies Create access granted. + */ + public ContainerSASPermission withCreate(boolean create) { + this.create = create; + return this; + } + + /** + * Specifies Write access granted. + */ + public boolean write() { + return write; + } + + /** + * Specifies Write access granted. + */ + public ContainerSASPermission withWrite(boolean write) { + this.write = write; + return this; + } + + /** + * Specifies Delete access granted. + */ + public boolean delete() { + return delete; + } + + /** + * Specifies Delete access granted. + */ + public ContainerSASPermission withDelete(boolean delete) { + this.delete = delete; + return this; + } + + /** + * Specifies List access granted. + */ + public boolean list() { + return list; + } + + /** + * Specifies List access granted. + */ + public ContainerSASPermission withList(boolean list) { + this.list = list; + return this; + } + + /** + * Converts the given permissions to a {@code String}. Using this method will guarantee the permissions are in an + * order accepted by the service. + * + * @return A {@code String} which represents the {@code ContainerSASPermission}. + */ + @Override + public String toString() { + // The order of the characters should be as specified here to ensure correctness: + // https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas + final StringBuilder builder = new StringBuilder(); + + if (this.read) { + builder.append('r'); + } + + if (this.add) { + builder.append('a'); + } + + if (this.create) { + builder.append('c'); + } + + if (this.write) { + builder.append('w'); + } + + if (this.delete) { + builder.append('d'); + } + + if (this.list) { + builder.append('l'); + } + + return builder.toString(); + } +} \ No newline at end of file diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ContainerURL.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ContainerURL.java new file mode 100644 index 0000000000000..886953e34d95b --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ContainerURL.java @@ -0,0 +1,963 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import com.microsoft.azure.storage.blob.models.*; +import com.microsoft.rest.v2.Context; +import com.microsoft.rest.v2.http.HttpPipeline; +import io.reactivex.Single; + +import java.net.MalformedURLException; +import java.net.URL; +import java.time.temporal.ChronoUnit; +import java.util.List; + +import static com.microsoft.azure.storage.blob.Utility.addErrorWrappingToSingle; +import static com.microsoft.azure.storage.blob.Utility.safeURLEncode; + +/** + * Represents a URL to a container. It may be obtained by direct construction or via the create method on a + * {@link ServiceURL} object. This class does not hold any state about a particular blob but is instead a convenient way + * of sending off appropriate requests to the resource on the service. It may also be used to construct URLs to blobs. + * Please refer to the + * Azure Docs + * for more information on containers. + */ +public final class ContainerURL extends StorageURL { + + + public static final String ROOT_CONTAINER_NAME = "$root"; + + public static final String STATIC_WEBSITE_CONTAINER_NAME = "$web"; + + public static final String LOG_CONTAINER_NAME = "$logs"; + + + /** + * Creates a {@code ContainerURL} object pointing to the account specified by the URL and using the provided + * pipeline to make HTTP requests. + * + * @param url + * A {@code URL} to an Azure Storage container. + * @param pipeline + * A {@code HttpPipeline} which configures the behavior of HTTP exchanges. Please refer to + * {@link StorageURL#createPipeline(ICredentials, PipelineOptions)} for more information. + */ + public ContainerURL(URL url, HttpPipeline pipeline) { + super(url, pipeline); + } + + /** + * Creates a new {@link ContainerURL} with the given pipeline. + * + * @param pipeline + * An {@link HttpPipeline} object to set. + * + * @return A {@link ContainerURL} object with the given pipeline. + */ + public ContainerURL withPipeline(HttpPipeline pipeline) { + try { + return new ContainerURL(new URL(this.storageClient.url()), pipeline); + } catch (MalformedURLException e) { + throw new RuntimeException(e); + } + } + + /** + * Creates a new {@link BlockBlobURL} object by concatenating the blobName to the end of + * ContainerURL's URL. The new BlockBlobUrl uses the same request policy pipeline as the ContainerURL. + * To change the pipeline, create the BlockBlobUrl and then call its WithPipeline method passing in the + * desired pipeline object. Or, call this package's NewBlockBlobUrl instead of calling this object's + * NewBlockBlobUrl method. + * + * @param blobName + * A {@code String} representing the name of the blob. + * + * @return A new {@link BlockBlobURL} object which references the blob with the specified name in this container. + */ + public BlockBlobURL createBlockBlobURL(String blobName) { + blobName = safeURLEncode(blobName); + try { + return new BlockBlobURL(StorageURL.appendToURLPath(new URL(this.storageClient.url()), blobName), + this.storageClient.httpPipeline()); + } catch (MalformedURLException e) { + throw new RuntimeException(e); + } + } + + /** + * Creates creates a new PageBlobURL object by concatenating blobName to the end of + * ContainerURL's URL. The new PageBlobURL uses the same request policy pipeline as the ContainerURL. + * To change the pipeline, create the PageBlobURL and then call its WithPipeline method passing in the + * desired pipeline object. Or, call this package's NewPageBlobURL instead of calling this object's + * NewPageBlobURL method. + * + * @param blobName + * A {@code String} representing the name of the blob. + * + * @return A new {@link PageBlobURL} object which references the blob with the specified name in this container. + */ + public PageBlobURL createPageBlobURL(String blobName) { + blobName = safeURLEncode(blobName); + try { + return new PageBlobURL(StorageURL.appendToURLPath(new URL(this.storageClient.url()), blobName), + this.storageClient.httpPipeline()); + } catch (MalformedURLException e) { + throw new RuntimeException(e); + } + } + + /** + * Creates creates a new AppendBlobURL object by concatenating blobName to the end of + * ContainerURL's URL. The new AppendBlobURL uses the same request policy pipeline as the ContainerURL. + * To change the pipeline, create the AppendBlobURL and then call its WithPipeline method passing in the + * desired pipeline object. Or, call this package's NewAppendBlobURL instead of calling this object's + * NewAppendBlobURL method. + * + * @param blobName + * A {@code String} representing the name of the blob. + * + * @return A new {@link AppendBlobURL} object which references the blob with the specified name in this container. + */ + public AppendBlobURL createAppendBlobURL(String blobName) { + blobName = safeURLEncode(blobName); + try { + return new AppendBlobURL(StorageURL.appendToURLPath(new URL(this.storageClient.url()), blobName), + this.storageClient.httpPipeline()); + } catch (MalformedURLException e) { + throw new RuntimeException(e); + } + } + + /** + * Creates a new BlobURL object by concatenating blobName to the end of + * ContainerURL's URL. The new BlobURL uses the same request policy pipeline as the ContainerURL. + * To change the pipeline, create the BlobURL and then call its WithPipeline method passing in the + * desired pipeline object. Or, call this package's createBlobURL instead of calling this object's + * createBlobURL method. + * + * @param blobName + * A {@code String} representing the name of the blob. + * + * @return A new {@link BlobURL} object which references the blob with the specified name in this container. + */ + public BlobURL createBlobURL(String blobName) { + blobName = safeURLEncode(blobName); + try { + return new BlobURL(StorageURL.appendToURLPath(new URL(this.storageClient.url()), blobName), + this.storageClient.httpPipeline()); + } catch (MalformedURLException e) { + throw new RuntimeException(e); + } + } + + /** + * Creates a new container within a storage account. If a container with the same name already exists, the operation + * fails. For more information, see the + * Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_basic "Sample code for ContainerURL.create")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single create() { + return this.create(null, null, null); + } + + /** + * Creates a new container within a storage account. If a container with the same name already exists, the operation + * fails. For more information, see the + * Azure Docs. + * + * @param metadata + * {@link Metadata} + * @param accessType + * Specifies how the data in this container is available to the public. See the x-ms-blob-public-access header + * in the Azure Docs for more information. Pass null for no public access. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_basic "Sample code for ContainerURL.create")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single create(Metadata metadata, PublicAccessType accessType, Context context) { + metadata = metadata == null ? Metadata.NONE : metadata; + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedContainers().createWithRestResponseAsync( + context, null, metadata, accessType, null)); + + } + + /** + * Marks the specified container for deletion. The container and any blobs contained within it are later + * deleted during garbage collection. For more information, see the + * Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_basic "Sample code for ContainerURL.delete")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single delete() { + return this.delete(null, null); + } + + /** + * Marks the specified container for deletion. The container and any blobs contained within it are later + * deleted during garbage collection. For more information, see the + * Azure Docs. + * + * @param accessConditions + * {@link ContainerAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_basic "Sample code for ContainerURL.delete")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single delete(ContainerAccessConditions accessConditions, Context context) { + accessConditions = accessConditions == null ? ContainerAccessConditions.NONE : accessConditions; + context = context == null ? Context.NONE : context; + + if (!validateNoEtag(accessConditions.modifiedAccessConditions())) { + // Throwing is preferred to Single.error because this will error out immediately instead of waiting until + // subscription. + throw new UnsupportedOperationException("ETag access conditions are not supported for this API."); + } + + return addErrorWrappingToSingle(this.storageClient.generatedContainers() + .deleteWithRestResponseAsync(context, null, null, accessConditions.leaseAccessConditions(), + accessConditions.modifiedAccessConditions())); + } + + /** + * Returns the container's metadata and system properties. For more information, see the + * Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_basic "Sample code for ContainerURL.getProperties")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single getProperties() { + return this.getProperties(null, null); + } + + /** + * Returns the container's metadata and system properties. For more information, see the + * Azure Docs. + * + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_basic "Sample code for ContainerURL.getProperties")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single getProperties(LeaseAccessConditions leaseAccessConditions, + Context context) { + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedContainers() + .getPropertiesWithRestResponseAsync(context, null, null, leaseAccessConditions)); + } + + /** + * Sets the container's metadata. For more information, see the + * Azure Docs. + * + * @param metadata + * {@link Metadata} + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_basic "Sample code for ContainerURL.setMetadata")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single setMetadata(Metadata metadata) { + return this.setMetadata(metadata, null, null); + } + + /** + * Sets the container's metadata. For more information, see the + * Azure Docs. + * + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link ContainerAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_basic "Sample code for ContainerURL.setMetadata")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single setMetadata(Metadata metadata, + ContainerAccessConditions accessConditions, Context context) { + metadata = metadata == null ? Metadata.NONE : metadata; + accessConditions = accessConditions == null ? ContainerAccessConditions.NONE : accessConditions; + context = context == null ? Context.NONE : context; + if (!validateNoEtag(accessConditions.modifiedAccessConditions()) || + accessConditions.modifiedAccessConditions().ifUnmodifiedSince() != null) { + // Throwing is preferred to Single.error because this will error out immediately instead of waiting until + // subscription. + throw new UnsupportedOperationException( + "If-Modified-Since is the only HTTP access condition supported for this API"); + } + + return addErrorWrappingToSingle(this.storageClient.generatedContainers() + .setMetadataWithRestResponseAsync(context, null, metadata, null, + accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions())); + } + + /** + * Returns the container's permissions. The permissions indicate whether container's blobs may be accessed publicly. + * For more information, see the + * Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_policy "Sample code for ContainerURL.getAccessPolicy")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single getAccessPolicy() { + return this.getAccessPolicy(null, null); + } + + /** + * Returns the container's permissions. The permissions indicate whether container's blobs may be accessed publicly. + * For more information, see the + * Azure Docs. + * + * @param leaseAccessConditions + * By setting lease access conditions, requests will fail if the provided lease does not match the active + * lease on the blob. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_policy "Sample code for ContainerURL.getAccessPolicy")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single getAccessPolicy(LeaseAccessConditions leaseAccessConditions, + Context context) { + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedContainers().getAccessPolicyWithRestResponseAsync( + context, null, null, leaseAccessConditions)); + } + + /** + * Sets the container's permissions. The permissions indicate whether blobs in a container may be accessed publicly. + * Note that, for each signed identifier, we will truncate the start and expiry times to the nearest second to + * ensure the time formatting is compatible with the service. For more information, see the + * Azure Docs. + * + * @param accessType + * Specifies how the data in this container is available to the public. See the x-ms-blob-public-access header + * in the Azure Docs for more information. Pass null for no public access. + * @param identifiers + * A list of {@link SignedIdentifier} objects that specify the permissions for the container. Please see + * here + * for more information. Passing null will clear all access policies. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_policy "Sample code for ContainerURL.setAccessPolicy")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single setAccessPolicy(PublicAccessType accessType, + List identifiers) { + return this.setAccessPolicy(accessType, identifiers, null, null); + } + + /** + * Sets the container's permissions. The permissions indicate whether blobs in a container may be accessed publicly. + * Note that, for each signed identifier, we will truncate the start and expiry times to the nearest second to + * ensure the time formatting is compatible with the service. For more information, see the + * Azure Docs. + * + * @param accessType + * Specifies how the data in this container is available to the public. See the x-ms-blob-public-access header + * in the Azure Docs for more information. Pass null for no public access. + * @param identifiers + * A list of {@link SignedIdentifier} objects that specify the permissions for the container. Please see + * here + * for more information. Passing null will clear all access policies. + * @param accessConditions + * {@link ContainerAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_policy "Sample code for ContainerURL.setAccessPolicy")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single setAccessPolicy(PublicAccessType accessType, + List identifiers, ContainerAccessConditions accessConditions, Context context) { + accessConditions = accessConditions == null ? ContainerAccessConditions.NONE : accessConditions; + context = context == null ? Context.NONE : context; + + if (!validateNoEtag(accessConditions.modifiedAccessConditions())) { + // Throwing is preferred to Single.error because this will error out immediately instead of waiting until + // subscription. + throw new UnsupportedOperationException("ETag access conditions are not supported for this API."); + } + + /* + We truncate to seconds because the service only supports nanoseconds or seconds, but doing an + OffsetDateTime.now will only give back milliseconds (more precise fields are zeroed and not serialized). This + allows for proper serialization with no real detriment to users as sub-second precision on active time for + signed identifiers is not really necessary. + */ + if (identifiers != null) { + for (SignedIdentifier identifier : identifiers) { + if (identifier.accessPolicy() != null && identifier.accessPolicy().start() != null) { + identifier.accessPolicy().withStart( + identifier.accessPolicy().start().truncatedTo(ChronoUnit.SECONDS)); + } + if (identifier.accessPolicy() != null && identifier.accessPolicy().expiry() != null) { + identifier.accessPolicy().withExpiry( + identifier.accessPolicy().expiry().truncatedTo(ChronoUnit.SECONDS)); + } + } + } + + return addErrorWrappingToSingle(this.storageClient.generatedContainers() + .setAccessPolicyWithRestResponseAsync(context, identifiers, null, accessType, null, + accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions())); + } + + private boolean validateNoEtag(ModifiedAccessConditions modifiedAccessConditions) { + if (modifiedAccessConditions == null) { + return true; + } + return modifiedAccessConditions.ifMatch() == null && modifiedAccessConditions.ifNoneMatch() == null; + } + + /** + * Acquires a lease on the container for delete operations. The lease duration must be between 15 to + * 60 seconds, or infinite (-1). For more information, see the + * Azure Docs. + * + * @apiNote + * ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerURL.acquireLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/New-Storage-SDK-V10-Preview/src/test/java/com/microsoft/azure/storage/Samples.java) + * + * @param proposedId + * A {@code String} in any valid GUID format. + * @param duration + * The duration of the lease, in seconds, or negative one (-1) for a lease that never expires. + * A non-infinite lease can be between 15 and 60 seconds. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerURL.acquireLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single acquireLease(String proposedId, int duration) { + return this.acquireLease(proposedId, duration, null, null); + } + + /** + * Acquires a lease on the container for delete operations. The lease duration must be between 15 to + * 60 seconds, or infinite (-1). For more information, see the + * Azure Docs. + * + * @param proposedID + * A {@code String} in any valid GUID format. + * @param duration + * The duration of the lease, in seconds, or negative one (-1) for a lease that never expires. + * A non-infinite lease can be between 15 and 60 seconds. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerURL.acquireLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single acquireLease(String proposedID, int duration, + ModifiedAccessConditions modifiedAccessConditions, Context context) { + if (!this.validateNoEtag(modifiedAccessConditions)) { + // Throwing is preferred to Single.error because this will error out immediately instead of waiting until + // subscription. + throw new UnsupportedOperationException( + "ETag access conditions are not supported for this API."); + } + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedContainers().acquireLeaseWithRestResponseAsync( + context, null, duration, proposedID, null, modifiedAccessConditions)); + } + + /** + * Renews the container's previously-acquired lease. For more information, see the + * Azure Docs. + * + * @param leaseID + * The leaseId of the active lease on the container. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerURL.renewLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single renewLease(String leaseID) { + return this.renewLease(leaseID, null, null); + } + + /** + * Renews the container's previously-acquired lease. For more information, see the + * Azure Docs. + * + * @param leaseID + * The leaseId of the active lease on the container. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerURL.renewLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single renewLease(String leaseID, + ModifiedAccessConditions modifiedAccessConditions, Context context) { + if (!this.validateNoEtag(modifiedAccessConditions)) { + // Throwing is preferred to Single.error because this will error out immediately instead of waiting until + // subscription. + throw new UnsupportedOperationException( + "ETag access conditions are not supported for this API."); + } + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedContainers().renewLeaseWithRestResponseAsync( + context, leaseID, null, null, modifiedAccessConditions)); + } + + /** + * Releases the container's previously-acquired lease. For more information, see the + * Azure Docs. + * + * @param leaseID + * The leaseId of the active lease on the container. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerURL.releaseLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single releaseLease(String leaseID) { + return this.releaseLease(leaseID, null, null); + } + + /** + * Releases the container's previously-acquired lease. For more information, see the + * Azure Docs. + * + * @param leaseID + * The leaseId of the active lease on the container. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerURL.releaseLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single releaseLease(String leaseID, + ModifiedAccessConditions modifiedAccessConditions, Context context) { + if (!this.validateNoEtag(modifiedAccessConditions)) { + // Throwing is preferred to Single.error because this will error out immediately instead of waiting until + // subscription. + throw new UnsupportedOperationException( + "ETag access conditions are not supported for this API."); + } + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedContainers().releaseLeaseWithRestResponseAsync( + context, leaseID, null, null, modifiedAccessConditions)); + } + + /** + * Breaks the container's previously-acquired lease. For more information, see the + * Azure Docs. + * + * @apiNote + * ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerURL.breakLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/New-Storage-SDK-V10-Preview/src/test/java/com/microsoft/azure/storage/Samples.java) + * + * @return Emits the successful response. + */ + public Single breakLease() { + return this.breakLease(null, null, null); + } + + /** + * Breaks the container's previously-acquired lease. For more information, see the + * Azure Docs. + * + * @param breakPeriodInSeconds + * An optional {@code Integer} representing the proposed duration of seconds that the lease should continue + * before it is broken, between 0 and 60 seconds. This break period is only used if it is shorter than the time + * remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be + * available before the break period has expired, but the lease may be held for longer than the break period. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerURL.breakLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single breakLease(Integer breakPeriodInSeconds, + ModifiedAccessConditions modifiedAccessConditions, Context context) { + if (!this.validateNoEtag(modifiedAccessConditions)) { + // Throwing is preferred to Single.error because this will error out immediately instead of waiting until + // subscription. + throw new UnsupportedOperationException( + "ETag access conditions are not supported for this API."); + } + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedContainers().breakLeaseWithRestResponseAsync( + context, null, breakPeriodInSeconds, null, modifiedAccessConditions)); + } + + /** + * Changes the container's leaseAccessConditions. For more information, see the + * Azure Docs. + * + * @param leaseID + * The leaseId of the active lease on the container. + * @param proposedID + * A {@code String} in any valid GUID format. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerURL.changeLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single changeLease(String leaseID, String proposedID) { + return this.changeLease(leaseID, proposedID, null, null); + } + + /** + * Changes the container's leaseAccessConditions. For more information, see the + * Azure Docs. + * + * @param leaseID + * The leaseId of the active lease on the container. + * @param proposedID + * A {@code String} in any valid GUID format. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=container_lease "Sample code for ContainerURL.changeLease")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single changeLease(String leaseID, String proposedID, + ModifiedAccessConditions modifiedAccessConditions, Context context) { + if (!this.validateNoEtag(modifiedAccessConditions)) { + // Throwing is preferred to Single.error because this will error out immediately instead of waiting until + // subscription. + throw new UnsupportedOperationException( + "ETag access conditions are not supported for this API."); + } + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedContainers().changeLeaseWithRestResponseAsync( + context, leaseID, proposedID, null, null, modifiedAccessConditions)); + } + + /** + * Returns a single segment of blobs starting from the specified Marker. Use an empty + * marker to start enumeration from the beginning. Blob names are returned in lexicographic order. + * After getting a segment, process it, and then call ListBlobs again (passing the the previously-returned + * Marker) to get the next segment. For more information, see the + * Azure Docs. + * + * @param marker + * Identifies the portion of the list to be returned with the next list operation. + * This value is returned in the response of a previous list operation as the + * ListBlobsFlatSegmentResponse.body().nextMarker(). Set to null to list the first segment. + * @param options + * {@link ListBlobsOptions} + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_flat "Sample code for ContainerURL.listBlobsFlatSegment")] \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_flat_helper "helper code for ContainerURL.listBlobsFlatSegment")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single listBlobsFlatSegment(String marker, ListBlobsOptions options) { + return this.listBlobsFlatSegment(marker, options, null); + } + + /** + * Returns a single segment of blobs starting from the specified Marker. Use an empty + * marker to start enumeration from the beginning. Blob names are returned in lexicographic order. + * After getting a segment, process it, and then call ListBlobs again (passing the the previously-returned + * Marker) to get the next segment. For more information, see the + * Azure Docs. + * + * @param marker + * Identifies the portion of the list to be returned with the next list operation. + * This value is returned in the response of a previous list operation as the + * ListBlobsFlatSegmentResponse.body().nextMarker(). Set to null to list the first segment. + * @param options + * {@link ListBlobsOptions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_flat "Sample code for ContainerURL.listBlobsFlatSegment")] \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_flat_helper "helper code for ContainerURL.listBlobsFlatSegment")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single listBlobsFlatSegment(String marker, ListBlobsOptions options, + Context context) { + options = options == null ? ListBlobsOptions.DEFAULT : options; + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedContainers() + .listBlobFlatSegmentWithRestResponseAsync(context, + options.prefix(), marker, options.maxResults(), + options.details().toList(), null, null)); + } + + /** + * Returns a single segment of blobs and blob prefixes starting from the specified Marker. Use an empty + * marker to start enumeration from the beginning. Blob names are returned in lexicographic order. + * After getting a segment, process it, and then call ListBlobs again (passing the the previously-returned + * Marker) to get the next segment. For more information, see the + * Azure Docs. + * + * @param marker + * Identifies the portion of the list to be returned with the next list operation. + * This value is returned in the response of a previous list operation as the + * ListBlobsHierarchySegmentResponse.body().nextMarker(). Set to null to list the first segment. + * @param delimiter + * The operation returns a BlobPrefix element in the response body that acts as a placeholder for all blobs + * whose names begin with the same substring up to the appearance of the delimiter character. The delimiter may + * be a single character or a string. + * @param options + * {@link ListBlobsOptions} + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_hierarchy "Sample code for ContainerURL.listBlobsHierarchySegment")] \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_hierarchy_helper "helper code for ContainerURL.listBlobsHierarchySegment")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single listBlobsHierarchySegment(String marker, String delimiter, + ListBlobsOptions options) { + return this.listBlobsHierarchySegment(marker, delimiter, options, null); + } + + /** + * Returns a single segment of blobs and blob prefixes starting from the specified Marker. Use an empty + * marker to start enumeration from the beginning. Blob names are returned in lexicographic order. + * After getting a segment, process it, and then call ListBlobs again (passing the the previously-returned + * Marker) to get the next segment. For more information, see the + * Azure Docs. + * + * @param marker + * Identifies the portion of the list to be returned with the next list operation. + * This value is returned in the response of a previous list operation as the + * ListBlobsHierarchySegmentResponse.body().nextMarker(). Set to null to list the first segment. + * @param delimiter + * The operation returns a BlobPrefix element in the response body that acts as a placeholder for all blobs + * whose names begin with the same substring up to the appearance of the delimiter character. The delimiter may + * be a single character or a string. + * @param options + * {@link ListBlobsOptions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_hierarchy "Sample code for ContainerURL.listBlobsHierarchySegment")] \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=list_blobs_hierarchy_helper "helper code for ContainerURL.listBlobsHierarchySegment")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single listBlobsHierarchySegment(String marker, String delimiter, + ListBlobsOptions options, Context context) { + options = options == null ? ListBlobsOptions.DEFAULT : options; + if (options.details().snapshots()) { + throw new UnsupportedOperationException("Including snapshots in a hierarchical listing is not supported."); + } + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedContainers() + .listBlobHierarchySegmentWithRestResponseAsync( + context, delimiter, options.prefix(), marker, options.maxResults(), + options.details().toList(), null, null)); + } + + /** + * Returns the sku name and account kind for the account. For more information, please see the + * Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=account_info "Sample code for ContainerURL.getAccountInfo")] \n + * For more samples, please see the [Samples file] (https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single getAccountInfo() { + return this.getAccountInfo(null); + } + + /** + * Returns the sku name and account kind for the account. For more information, please see the + * Azure Docs. + * + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=account_info "Sample code for ContainerURL.getAccountInfo")] \n + * For more samples, please see the [Samples file] (https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single getAccountInfo(Context context) { + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle( + this.storageClient.generatedContainers().getAccountInfoWithRestResponseAsync(context)); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/DownloadResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/DownloadResponse.java new file mode 100644 index 0000000000000..3b65f321611c2 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/DownloadResponse.java @@ -0,0 +1,145 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.microsoft.azure.storage.blob; + +import com.microsoft.azure.storage.blob.models.BlobDownloadHeaders; +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpPipeline; +import io.reactivex.Flowable; +import io.reactivex.Single; +import io.reactivex.functions.Function; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Map; + +/** + * {@code DownloadResponse} wraps the protocol-layer response from {@link BlobURL#download(BlobRange, + * BlobAccessConditions, boolean, com.microsoft.rest.v2.Context)} to automatically retry failed reads from the body as + * appropriate. If the download is interrupted, the {@code DownloadResponse} will make a request to resume the download + * from where it left off, allowing the user to consume the data as one continuous stream, for any interruptions are + * hidden. The retry behavior is defined by the options passed to the {@link #body(ReliableDownloadOptions)}. The + * download will also lock on the blob's etag to ensure consistency. + *

    + * Note that the retries performed as a part of this reader are composed with those of any retries in an {@link + * HttpPipeline} used in conjunction with this reader. That is, if this object issues a request to resume a download, + * an underlying pipeline may issue several retries as a part of that request. Furthermore, this reader only retries on + * network errors; timeouts and unexpected status codes are not retried. Therefore, the behavior of this reader is + * entirely independent of and in no way coupled to an {@link HttpPipeline}'s retry mechanism. + */ +public final class DownloadResponse { + private final HTTPGetterInfo info; + + private final RestResponse> rawResponse; + + private final Function> getter; + + public DownloadResponse(RestResponse> response, + HTTPGetterInfo info, Function> getter) { + Utility.assertNotNull("getter", getter); + Utility.assertNotNull("info", info); + Utility.assertNotNull("info.eTag", info.eTag()); + this.rawResponse = response; + this.info = info; + this.getter = getter; + } + + /** + * Returns the response body which has been modified to enable reliably reading data if desired (if + * {@code options.maxRetryRequests > 0}. If retries are enabled, if a connection fails while reading, the stream + * will make additional requests to reestablish a connection and continue reading. + * + * @param options + * {@link ReliableDownloadOptions} + * + * @return A {@code Flowable} which emits the data as {@code ByteBuffer}s. + */ + public Flowable body(ReliableDownloadOptions options) { + ReliableDownloadOptions optionsReal = options == null ? new ReliableDownloadOptions() : options; + if (optionsReal.maxRetryRequests() == 0) { + return this.rawResponse.body(); + } + + /* + We pass -1 for currentRetryCount because we want tryContinueFlowable to receive a value of 0 for number of + retries as we have not actually retried yet, only made the initial try. Because applyReliableDownload() will + add 1 before calling into tryContinueFlowable, we set the initial value to -1. + */ + return this.applyReliableDownload(this.rawResponse.body(), -1, optionsReal); + } + + private Flowable tryContinueFlowable(Throwable t, int retryCount, ReliableDownloadOptions options) { + // If all the errors are exhausted, return this error to the user. + if (retryCount > options.maxRetryRequests() || !(t instanceof IOException)) { + return Flowable.error(t); + } else { + /* + We wrap this in a try catch because we don't know the behavior of the getter. Most errors would probably + come from an unsuccessful request, which would be propagated through the onError methods. However, it is + possible the method call that returns a Single is what throws (like how our apis throw some exceptions at + call time rather than at subscription time. + */ + try { + // Get a new response and try reading from it. + return getter.apply(this.info) + .flatMapPublisher(response -> + /* + Do not compound the number of retries by passing in another set of downloadOptions; just get + the raw body. + */ + this.applyReliableDownload(this.rawResponse.body(), retryCount, options)); + } catch (Exception e) { + // If the getter fails, return the getter failure to the user. + return Flowable.error(e); + } + } + } + + private Flowable applyReliableDownload(Flowable data, + int currentRetryCount, ReliableDownloadOptions options) { + return data + .doOnNext(buffer -> { + /* + Update how much data we have received in case we need to retry and propagate to the user the data we + have received. + */ + this.info.withOffset(this.info.offset() + buffer.remaining()); + if (this.info.count() != null) { + this.info.withCount(this.info.count() - buffer.remaining()); + } + }) + .onErrorResumeNext(t2 -> { + // Increment the retry count and try again with the new exception. + return tryContinueFlowable(t2, currentRetryCount + 1, options); + }); + } + + public int statusCode() { + return this.rawResponse.statusCode(); + } + + public BlobDownloadHeaders headers() { + return this.rawResponse.headers(); + } + + public Map rawHeaders() { + return this.rawResponse.rawHeaders(); + } + + public RestResponse> rawResponse() { + return this.rawResponse; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/HTTPGetterInfo.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/HTTPGetterInfo.java new file mode 100644 index 0000000000000..452301d1c099a --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/HTTPGetterInfo.java @@ -0,0 +1,82 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.microsoft.azure.storage.blob; + +/** + * HTTPGetterInfo is a passed to the getter function of a reliable download to specify parameters needed for the GET + * request. + */ +public final class HTTPGetterInfo { + private long offset = 0; + + private Long count = null; + + private String eTag = null; + + /** + * The start offset that should be used when creating the HTTP GET request's Range header. Defaults to 0. + */ + public long offset() { + return offset; + } + + /** + * The start offset that should be used when creating the HTTP GET request's Range header. Defaults to 0. + */ + public HTTPGetterInfo withOffset(long offset) { + this.offset = offset; + return this; + } + + /** + * The count of bytes that should be used to calculate the end offset when creating the HTTP GET request's Range + * header. {@code} null is the default and indicates that the entire rest of the blob should be retrieved. + */ + public Long count() { + return count; + } + + /** + * The count of bytes that should be used to calculate the end offset when creating the HTTP GET request's Range + * header. {@code} null is the default and indicates that the entire rest of the blob should be retrieved. + */ + public HTTPGetterInfo withCount(Long count) { + if (count != null) { + Utility.assertInBounds("count", count, 0, Long.MAX_VALUE); + } + this.count = count; + return this; + } + + /** + * The resource's etag that should be used when creating the HTTP GET request's If-Match header. Note that the + * Etag is returned with any operation that modifies the resource and by a call to {@link + * BlobURL#getProperties(BlobAccessConditions, com.microsoft.rest.v2.Context)}. Defaults to null. + */ + public String eTag() { + return eTag; + } + + /** + * The resource's etag that should be used when creating the HTTP GET request's If-Match header. Note that the + * Etag is returned with any operation that modifies the resource and by a call to {@link + * BlobURL#getProperties(BlobAccessConditions, com.microsoft.rest.v2.Context)}. Defaults to null. + */ + public HTTPGetterInfo withETag(String eTag) { + this.eTag = eTag; + return this; + } +} \ No newline at end of file diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ICredentials.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ICredentials.java new file mode 100644 index 0000000000000..916249efc57ca --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ICredentials.java @@ -0,0 +1,25 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import com.microsoft.rest.v2.policy.RequestPolicyFactory; + +/** + * Credentials represent any credential type + * it is used to create a credential policy Factory. + */ +public interface ICredentials extends RequestPolicyFactory { + +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/IPRange.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/IPRange.java new file mode 100644 index 0000000000000..93be08e318f10 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/IPRange.java @@ -0,0 +1,102 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import java.net.Inet4Address; + +/** + * This type specifies a continuous range of IP addresses. It is used to limit permissions on SAS tokens. Null may be + * set if it is not desired to confine the sas permissions to an IP range. Please refer to + * {@link AccountSASSignatureValues} or {@link ServiceSASSignatureValues} for more information. + */ +public final class IPRange { + + public static final IPRange DEFAULT = new IPRange(); + + private String ipMin; + + private String ipMax; + + public IPRange() { + } + + /** + * Creates a {@code IPRange} from the specified string. + * + * @param rangeStr + * The {@code String} representation of the {@code IPRange}. + * + * @return The {@code IPRange} generated from the {@code String}. + */ + public static IPRange parse(String rangeStr) { + String[] addrs = rangeStr.split("-"); + IPRange range = new IPRange(); + range.ipMin = addrs[0]; + if (addrs.length > 1) { + range.ipMax = addrs[1]; + } + return range; + } + + /** + * The minimum IP address of the range. + */ + public String ipMin() { + return ipMin; + } + + /** + * The minimum IP address of the range. + */ + public IPRange withIpMin(String ipMin) { + this.ipMin = ipMin; + return this; + } + + /** + * The maximum IP address of the range. + */ + public String ipMax() { + return ipMax; + } + + /** + * The maximum IP address of the range. + */ + public IPRange withIpMax(String ipMax) { + this.ipMax = ipMax; + return this; + } + + /** + * Output the single IP address or range of IP addresses for. + * + * @return The single IP address or range of IP addresses formatted as a {@code String}. + */ + @Override + public String toString() { + if (this.ipMin == null) { + return ""; + } + this.ipMax = this.ipMax == null ? this.ipMin : this.ipMax; + StringBuilder str = new StringBuilder(this.ipMin); + if (!this.ipMin.equals(this.ipMax)) { + str.append('-'); + str.append(this.ipMax); + } + + return str.toString(); + } +} \ No newline at end of file diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/IProgressReceiver.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/IProgressReceiver.java new file mode 100644 index 0000000000000..f47f446eefa53 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/IProgressReceiver.java @@ -0,0 +1,35 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import io.reactivex.Flowable; + +/** + * An {@code IProgressReceiver} is an object that can be used to report progress on network transfers. When specified on + * transfer operations, the {@code reportProgress} method will be called periodically with the total number of bytes + * transferred. The user may configure this method to report progress in whatever format desired. It is recommended + * that this type be used in conjunction with + * {@link ProgressReporter#addProgressReporting(Flowable, IProgressReceiver)}. + */ +public interface IProgressReceiver { + + /** + * The callback function invoked as progress is reported. + * + * @param bytesTransferred + * The total number of bytes transferred during this transaction. + */ + public void reportProgress(long bytesTransferred); +} \ No newline at end of file diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ListBlobsOptions.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ListBlobsOptions.java new file mode 100644 index 0000000000000..8dec46b22c71e --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ListBlobsOptions.java @@ -0,0 +1,92 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +/** + * Defines options available to configure the behavior of a call to listBlobsFlatSegment on a {@link ContainerURL} + * object. See the constructor for details on each of the options. + */ +public final class ListBlobsOptions { + + /** + * An object representing the default options: no details, prefix, or delimiter. Uses the server default for + * maxResults. + */ + public static final ListBlobsOptions DEFAULT = new ListBlobsOptions(); + + private BlobListingDetails details; + + private String prefix; + + private Integer maxResults; + + public ListBlobsOptions() { + this.details = BlobListingDetails.NONE; + } + + /** + * {@link BlobListingDetails} + */ + public BlobListingDetails details() { + return details; + } + + /** + * {@link BlobListingDetails} + */ + public ListBlobsOptions withDetails(BlobListingDetails details) { + this.details = details; + return this; + } + + /** + * Filters the results to return only blobs whose names begin with the specified prefix. May be null to return + * all blobs. + */ + public String prefix() { + return prefix; + } + + /** + * Filters the results to return only blobs whose names begin with the specified prefix. May be null to return + * all blobs. + */ + public ListBlobsOptions withPrefix(String prefix) { + this.prefix = prefix; + return this; + } + + /** + * Specifies the maximum number of blobs to return, including all BlobPrefix elements. If the request does not + * specify maxResults or specifies a value greater than 5,000, the server will return up to 5,000 items. + */ + public Integer maxResults() { + return maxResults; + } + + /** + * Specifies the maximum number of blobs to return, including all BlobPrefix elements. If the request does not + * specify maxResults or specifies a value greater than 5,000, the server will return up to 5,000 items. + */ + public ListBlobsOptions withMaxResults(Integer maxResults) { + if (maxResults != null && maxResults <= 0) { + throw new IllegalArgumentException("MaxResults must be greater than 0."); + } + this.maxResults = maxResults; + return this; + } + + +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ListContainersOptions.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ListContainersOptions.java new file mode 100644 index 0000000000000..fb94b0fe80cb6 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ListContainersOptions.java @@ -0,0 +1,89 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +/** + * Defines options available to configure the behavior of a call to listContainersSegment on a {@link ServiceURL} + * object. See the constructor for details on each of the options. Null may be passed in place of an object of this + * type if no options are desirable. + */ +public final class ListContainersOptions { + + /** + * An object representing the default options: no details or prefix and using the service's default for maxResults. + */ + public static final ListContainersOptions DEFAULT = + new ListContainersOptions(); + + private ContainerListingDetails details; + + private String prefix; + + private Integer maxResults; + + public ListContainersOptions() { + this.details = ContainerListingDetails.NONE; + } + + /** + * {@link ContainerListingDetails} + */ + public ContainerListingDetails details() { + return details; + } + + /** + * {@link ContainerListingDetails} + */ + public ListContainersOptions withDetails(ContainerListingDetails details) { + this.details = details; + return this; + } + + /** + * Filters the results to return only blobs whose names begin with the specified prefix. * + */ + public String prefix() { + return prefix; + } + + /** + * Filters the results to return only blobs whose names begin with the specified prefix. * + */ + public ListContainersOptions withPrefix(String prefix) { + this.prefix = prefix; + return this; + } + + /** + * Specifies the maximum number of blobs to return, including all BlobPrefix elements. If the request does not + * specify maxResults or specifies a value greater than 5,000, the server will return up to 5,000 items. + */ + public Integer maxResults() { + return maxResults; + } + + /** + * Specifies the maximum number of blobs to return, including all BlobPrefix elements. If the request does not + * specify maxResults or specifies a value greater than 5,000, the server will return up to 5,000 items. + */ + public ListContainersOptions withMaxResults(Integer maxResults) { + if (maxResults != null && maxResults <= 0) { + throw new IllegalArgumentException("MaxResults must be greater than 0."); + } + this.maxResults = maxResults; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/LoggingFactory.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/LoggingFactory.java new file mode 100644 index 0000000000000..6ec4cf954df7e --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/LoggingFactory.java @@ -0,0 +1,367 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import com.microsoft.rest.v2.http.HttpPipeline; +import com.microsoft.rest.v2.http.HttpPipelineLogLevel; +import com.microsoft.rest.v2.http.HttpRequest; +import com.microsoft.rest.v2.http.HttpResponse; +import com.microsoft.rest.v2.policy.RequestPolicy; +import com.microsoft.rest.v2.policy.RequestPolicyFactory; +import com.microsoft.rest.v2.policy.RequestPolicyOptions; +import io.reactivex.Single; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.net.HttpURLConnection; +import java.net.MalformedURLException; +import java.net.URL; +import java.net.UnknownHostException; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; +import java.util.logging.FileHandler; +import java.util.logging.Level; +import java.util.logging.Logger; + +/** + * This is a factory which creates policies in an {@link HttpPipeline} for logging requests and responses. In most + * cases, it is sufficient to configure an object of the {@link LoggingOptions} type and set those as a field on a + * {@link PipelineOptions} structure to configure a default pipeline. The factory and policy must only be used directly + * when creating a custom pipeline. + */ +public final class LoggingFactory implements RequestPolicyFactory { + + private static final Logger forceLogger = Logger.getLogger("Azure Storage Java SDK"); + private static final org.slf4j.Logger slf4jLogger = LoggerFactory.getLogger("Azure Storage Java SDK"); + private static final Map javaLogLevelMap = new HashMap<>(); + private static boolean defaultLoggerLoaded; + + static { + try { + forceLogger.setLevel(Level.WARNING); + + // Create the logs directory if it doesn't exist. + File logDir = new File(System.getProperty("java.io.tmpdir"), "AzureStorageJavaSDKLogs"); + if (!logDir.exists()) { + if (!logDir.mkdir()) { + throw new Exception("Could not create logs directory"); + } + } + + /* + "/" the local pathname separator + "%t" the system temporary directory + "%h" the value of the "user.home" system property + "%g" the generation number to distinguish rotated logs + "%u" a unique number to resolve conflicts + "%%" translates to a single percent sign "%" + + 10MB files, 5 files + + true- append mode + */ + FileHandler handler = new FileHandler("%t/AzureStorageJavaSDKLogs/%u%g", 10 * Constants.MB, 5, false); + handler.setLevel(Level.WARNING); + forceLogger.addHandler(handler); + + javaLogLevelMap.put(HttpPipelineLogLevel.ERROR, Level.SEVERE); + javaLogLevelMap.put(HttpPipelineLogLevel.WARNING, Level.WARNING); + javaLogLevelMap.put(HttpPipelineLogLevel.INFO, Level.INFO); + defaultLoggerLoaded = true; + + /* + If we can't setup default logging, there's nothing we can do. We shouldn't interfere with the rest of logging. + */ + } catch (Exception e) { + defaultLoggerLoaded = false; + System.err.println("Azure Storage default logging could not be configured due to the following exception: " + + e); + } + } + + private final LoggingOptions loggingOptions; + + /** + * Creates a factory which can create LoggingPolicy objects to insert in the pipeline. This will allow for logging + * requests and responses. + * + * @param loggingOptions + * The configurations for this factory. Null will indicate use of the default options. + */ + public LoggingFactory(LoggingOptions loggingOptions) { + this.loggingOptions = loggingOptions == null ? LoggingOptions.DEFAULT : loggingOptions; + } + + @Override + public RequestPolicy create(RequestPolicy next, RequestPolicyOptions options) { + return new LoggingPolicy(this, next, options); + } + + private final class LoggingPolicy implements RequestPolicy { + + private final LoggingFactory factory; + + private final RequestPolicy nextPolicy; + + private final RequestPolicyOptions options; + + // The following fields are not final because they are updated by the policy. + private int tryCount; + + private long operationStartTime; + + private long requestStartTime; + + /** + * Creates a policy which configures the logging behavior within the + * {@link com.microsoft.rest.v2.http.HttpPipeline}. + * + * @param nextPolicy + * {@link RequestPolicy} + * @param options + * {@link RequestPolicyOptions} + * @param factory + * {@link LoggingFactory} + */ + private LoggingPolicy(LoggingFactory factory, RequestPolicy nextPolicy, RequestPolicyOptions options) { + this.factory = factory; + this.nextPolicy = nextPolicy; + this.options = options; + } + + /** + * Logs as appropriate. + * + * @param request + * The request to log. + * + * @return A {@link Single} representing the {@link HttpResponse} that will arrive asynchronously. + */ + @Override + public Single sendAsync(final HttpRequest request) { + this.tryCount++; + this.requestStartTime = System.currentTimeMillis(); + if (this.tryCount == 1) { + this.operationStartTime = requestStartTime; + } + + if (this.shouldLog(HttpPipelineLogLevel.INFO)) { + String logMessage = String.format("'%s'==> OUTGOING REQUEST (Try number='%d')%n", request.url(), + this.tryCount); + this.log(HttpPipelineLogLevel.INFO, logMessage); + } + + return nextPolicy.sendAsync(request) + .doOnError(throwable -> { + if (this.shouldLog(HttpPipelineLogLevel.ERROR)) { + String logMessage = String.format( + "Unexpected failure attempting to make request.%nError message:'%s'%n", + throwable.getMessage()); + this.log(HttpPipelineLogLevel.ERROR, logMessage); + } + }) + .doOnSuccess(response -> { + long requestEndTime = System.currentTimeMillis(); + long requestCompletionTime = requestEndTime - requestStartTime; + long operationDuration = requestEndTime - operationStartTime; + HttpPipelineLogLevel currentLevel = HttpPipelineLogLevel.INFO; + + String logMessage = Constants.EMPTY_STRING; + if (this.shouldLog(HttpPipelineLogLevel.INFO)) { + // Assume success and default to informational logging. + logMessage = "Successfully Received Response" + System.lineSeparator(); + } + + // If the response took too long, we'll upgrade to warning. + if (requestCompletionTime >= + factory.loggingOptions.minDurationToLogSlowRequestsInMs()) { + // Log a warning if the try duration exceeded the specified threshold. + if (this.shouldLog(HttpPipelineLogLevel.WARNING)) { + currentLevel = HttpPipelineLogLevel.WARNING; + logMessage = String.format(Locale.ROOT, + "SLOW OPERATION. Duration > %d ms.%n", + factory.loggingOptions.minDurationToLogSlowRequestsInMs()); + } + } + + if (((response.statusCode() >= 400 && response.statusCode() <= 499) && + (response.statusCode() != HttpURLConnection.HTTP_NOT_FOUND && + response.statusCode() != HttpURLConnection.HTTP_CONFLICT && + response.statusCode() != HttpURLConnection.HTTP_PRECON_FAILED && + response.statusCode() != 416)) || + /* 416 is missing from the Enum but it is Range Not Satisfiable */ + (response.statusCode() >= 500 && response.statusCode() <= 509)) { + String errorString = String.format(Locale.ROOT, + "REQUEST ERROR%nHTTP request failed with status code:'%d'%n", + response.statusCode()); + if (currentLevel == HttpPipelineLogLevel.WARNING) { + logMessage += errorString; + } else { + logMessage = errorString; + } + + currentLevel = HttpPipelineLogLevel.ERROR; + } + + /* + We don't want to format the log message unless we have to. Format once we've determined that + either the customer wants this log level or we need to force log it. + */ + if (this.shouldLog(currentLevel)) { + String additionalMessageInfo = buildAdditionalMessageInfo(request); + String messageInfo = String.format(Locale.ROOT, + "Request try:'%d', request duration:'%d' ms, operation duration:'%d' ms%n%s", + tryCount, requestCompletionTime, operationDuration, additionalMessageInfo); + this.log(currentLevel, logMessage + messageInfo); + } + }); + } + + private String buildAdditionalMessageInfo(final HttpRequest httpRequest) { + HttpRequest sanitizedRequest = buildSanitizedRequest(httpRequest); + StringBuilder stringBuilder = new StringBuilder(); + String format = "%s: %s" + System.lineSeparator(); + stringBuilder.append(String.format(format, sanitizedRequest.httpMethod().toString(), + sanitizedRequest.url().toString())); + sanitizedRequest.headers().forEach((header) -> stringBuilder.append(String.format(format, header.name(), + header.value()))); + return stringBuilder.toString(); + } + + private HttpRequest buildSanitizedRequest(final HttpRequest initialRequest) { + // Build new URL and redact SAS signature, if present + URL url = sanitizeURL(initialRequest.url()); + + // Build resultRequest + HttpRequest resultRequest = new HttpRequest( + initialRequest.callerMethod(), + initialRequest.httpMethod(), + url, + initialRequest.headers(), + initialRequest.body(), + initialRequest.responseDecoder()); + + // Redact Authorization header, if present + if (resultRequest.headers().value(Constants.HeaderConstants.AUTHORIZATION) != null) { + resultRequest.headers().set(Constants.HeaderConstants.AUTHORIZATION, Constants.REDACTED); + } + + // Redact Copy Source header SAS signature, if present + if (resultRequest.headers().value(Constants.HeaderConstants.COPY_SOURCE) != null) { + try { + URL copySourceUrl = sanitizeURL(new URL(resultRequest.headers() + .value(Constants.HeaderConstants.COPY_SOURCE))); + resultRequest.headers().set(Constants.HeaderConstants.COPY_SOURCE, copySourceUrl.toString()); + } catch (MalformedURLException e) { + throw new RuntimeException(e); + } + } + + return resultRequest; + } + + private URL sanitizeURL(URL initialURL) { + URL resultURL = initialURL; + try { + BlobURLParts urlParts = URLParser.parse(initialURL); + if (urlParts.sasQueryParameters() == null || urlParts.sasQueryParameters().signature() == null) { + return resultURL; + } + urlParts.withSasQueryParameters(new SASQueryParameters( + urlParts.sasQueryParameters().version(), + urlParts.sasQueryParameters().services(), + urlParts.sasQueryParameters().resourceTypes(), + urlParts.sasQueryParameters().protocol(), + urlParts.sasQueryParameters().startTime(), + urlParts.sasQueryParameters().expiryTime(), + urlParts.sasQueryParameters().ipRange(), + urlParts.sasQueryParameters().identifier(), + urlParts.sasQueryParameters().resource(), + urlParts.sasQueryParameters().permissions(), + Constants.REDACTED, + urlParts.sasQueryParameters().cacheControl(), + urlParts.sasQueryParameters().contentDisposition(), + urlParts.sasQueryParameters().contentEncoding(), + urlParts.sasQueryParameters().contentLanguage(), + urlParts.sasQueryParameters().contentType() + )); + resultURL = urlParts.toURL(); + + /* + We are only making valid changes to what has already been validated as a URL (since we got it from a + URL object), so there should be no need for either us or the caller to check this error. + */ + } catch (UnknownHostException | MalformedURLException e) { + throw new RuntimeException(e); + } + return resultURL; + } + + /* + We need to support the HttpPipelineLogger as it already exists. We also want to allow users to hook up SLF4J. + Finally, we need to do our own default logging. + */ + private void log(HttpPipelineLogLevel level, String message) { + /* + We need to explicitly check before we send it to the HttpPipelineLogger as its log function may only + expect to receive messages for which shouldLog() returns true. + */ + if (this.options.shouldLog(level)) { + this.options.log(level, message); + } + + /* + The Java logger and slf4j logger should do the correct thing given any log level. forceLogger is + configured to only log warnings and errors. + */ + if (!this.factory.loggingOptions.disableDefaultLogging() && LoggingFactory.defaultLoggerLoaded) { + forceLogger.log(javaLogLevelMap.get(level), message); + } + if (level.equals(HttpPipelineLogLevel.ERROR)) { + slf4jLogger.error(message); + } else if (level.equals(HttpPipelineLogLevel.WARNING)) { + slf4jLogger.warn(message); + } else if (level.equals(HttpPipelineLogLevel.INFO)) { + slf4jLogger.info(message); + } + } + + /* + Check the HttpPipelineLogger, SLF4J Logger, and Java Logger + */ + private boolean shouldLog(HttpPipelineLogLevel level) { + // Default log Warnings and Errors as long as default logging is enabled. + if ((level.equals(HttpPipelineLogLevel.WARNING) || level.equals(HttpPipelineLogLevel.ERROR)) && + !this.factory.loggingOptions.disableDefaultLogging() && LoggingFactory.defaultLoggerLoaded) { + return true; + } + + // The user has configured the HttpPipelineLogger to log at this level. + if (this.options.shouldLog(level)) { + return true; + } + + // The SLF4J logger is configured at the given level. + if ((level.equals(HttpPipelineLogLevel.INFO) && slf4jLogger.isInfoEnabled()) || + (level.equals(HttpPipelineLogLevel.WARNING) && slf4jLogger.isWarnEnabled()) || + (level.equals(HttpPipelineLogLevel.ERROR) && slf4jLogger.isErrorEnabled())) { + return true; + } + + return false; + } + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/LoggingOptions.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/LoggingOptions.java new file mode 100644 index 0000000000000..870740ca5de90 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/LoggingOptions.java @@ -0,0 +1,76 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +/** + * Options to configure the {@link LoggingFactory}. Please refer to the Factory for more information. + */ +public final class LoggingOptions { + + /** + * Default logging options. {@code MinDurationToLogSlowRequestsInMs} is set to 3000; + */ + public static final LoggingOptions DEFAULT = new LoggingOptions(3000); + + private final long minDurationToLogSlowRequestsInMs; + + private final boolean disableDefaultLogging; + + /** + * Creates a new {@link LoggingOptions} object. + * + * @param minDurationToLogSlowRequestsInMs + * The duration after which a tried operation will be logged as a warning. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=pipeline_options "Sample code for LoggingOptions constructor")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public LoggingOptions(long minDurationToLogSlowRequestsInMs) { + this(minDurationToLogSlowRequestsInMs, false); + } + + /** + * Creates a new {@link LoggingOptions} object. + * + * @param minDurationToLogSlowRequestsInMs + * The duration after which a tried operation will be logged as a warning. + * @param disableDefaultLogging + * By default, this library will automatically log warnings and errors to some files in the system's temp + * directory. The size of these files is bounded to a few dozen MB and should not impose a burden on the + * system. It is strongly recommended to leave these logs enabled for customer support reasons, but if + * the user desires a different logging story and enables logging via the HttpPipelineLogger or SLF4J, then + * it should be safe to disable default logging. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=pipeline_options "Sample code for LoggingOptions constructor")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public LoggingOptions(long minDurationToLogSlowRequestsInMs, boolean disableDefaultLogging) { + this.minDurationToLogSlowRequestsInMs = minDurationToLogSlowRequestsInMs; + this.disableDefaultLogging = disableDefaultLogging; + } + + /** + * @return The duration after which a tried operation will be logged as a warning. + */ + public long minDurationToLogSlowRequestsInMs() { + return minDurationToLogSlowRequestsInMs; + } + + public boolean disableDefaultLogging() { + return disableDefaultLogging; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/Metadata.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/Metadata.java new file mode 100644 index 0000000000000..ee6b4305be8fd --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/Metadata.java @@ -0,0 +1,35 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import java.util.HashMap; +import java.util.Map; + +/** + * Contains metadata key/value pairs to be associated with a storage resource. The user may store any additional + * information about the resource that they like using this map. It is passed to create and setMetadata methods on any + * URL type. Null may be passed to set no metadata. + */ +public final class Metadata extends HashMap { + public static final Metadata NONE = new Metadata(); + + public Metadata() { + super(); + } + + public Metadata(Map m) { + super(m); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/PageBlobAccessConditions.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/PageBlobAccessConditions.java new file mode 100644 index 0000000000000..b108c5c3a1f81 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/PageBlobAccessConditions.java @@ -0,0 +1,101 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import com.microsoft.azure.storage.blob.models.LeaseAccessConditions; +import com.microsoft.azure.storage.blob.models.ModifiedAccessConditions; +import com.microsoft.azure.storage.blob.models.SequenceNumberAccessConditions; + +/** + * This class contains values that restrict the successful completion of PageBlob operations to certain conditions. + * It may be set to null if no access conditions are desired. + *

    + * Please refer to the request header section + * here for more conceptual information. + */ +public final class PageBlobAccessConditions { + + /** + * An object representing no access conditions. + */ + public static final PageBlobAccessConditions NONE = new PageBlobAccessConditions(); + + private SequenceNumberAccessConditions sequenceNumberAccessConditions; + + private ModifiedAccessConditions modifiedAccessConditions; + + private LeaseAccessConditions leaseAccessConditions; + + /** + * Creates an instance which has fields set to non-null, empty values. + */ + public PageBlobAccessConditions() { + this.sequenceNumberAccessConditions = new SequenceNumberAccessConditions(); + this.modifiedAccessConditions = new ModifiedAccessConditions(); + this.leaseAccessConditions = new LeaseAccessConditions(); + } + + /** + * Access conditions that will fail the request if the sequence number does not meet the provided condition. + */ + public SequenceNumberAccessConditions sequenceNumberAccessConditions() { + return sequenceNumberAccessConditions; + } + + /** + * Access conditions that will fail the request if the sequence number does not meet the provided condition. + */ + public PageBlobAccessConditions withSequenceNumberAccessConditions( + SequenceNumberAccessConditions sequenceNumberAccessConditions) { + this.sequenceNumberAccessConditions = sequenceNumberAccessConditions; + return this; + } + + /** + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used to + * construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + */ + public ModifiedAccessConditions modifiedAccessConditions() { + return modifiedAccessConditions; + } + + /** + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used to + * construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + */ + public PageBlobAccessConditions withModifiedAccessConditions(ModifiedAccessConditions modifiedAccessConditions) { + this.modifiedAccessConditions = modifiedAccessConditions; + return this; + } + + /** + * By setting lease access conditions, requests will fail if the provided lease does not match the active lease on + * the blob. + */ + public LeaseAccessConditions leaseAccessConditions() { + return leaseAccessConditions; + } + + /** + * By setting lease access conditions, requests will fail if the provided lease does not match the active lease on + * the blob. + */ + public PageBlobAccessConditions withLeaseAccessConditions(LeaseAccessConditions leaseAccessConditions) { + this.leaseAccessConditions = leaseAccessConditions; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/PageBlobURL.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/PageBlobURL.java new file mode 100644 index 0000000000000..9bb1729aab23e --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/PageBlobURL.java @@ -0,0 +1,605 @@ +/** + * Copyright Microsoft Corporation + *

    + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import com.microsoft.azure.storage.blob.models.*; +import com.microsoft.rest.v2.Context; +import com.microsoft.rest.v2.http.HttpPipeline; +import com.microsoft.rest.v2.http.UrlBuilder; +import io.reactivex.Flowable; +import io.reactivex.Single; + +import java.net.MalformedURLException; +import java.net.URL; +import java.net.UnknownHostException; +import java.nio.ByteBuffer; + +import static com.microsoft.azure.storage.blob.Utility.addErrorWrappingToSingle; + +/** + * Represents a URL to a page blob. It may be obtained by direct construction or via the create method on a + * {@link ContainerURL} object. This class does not hold any state about a particular blob but is instead a convenient + * way of sending off appropriate requests to the resource on the service. Please refer to the + * Azure Docs + * for more information. + */ +public final class PageBlobURL extends BlobURL { + + /** + * Indicates the number of bytes in a page. + */ + public static final int PAGE_BYTES = 512; + + /** + * Indicates the maximum number of bytes that may be sent in a call to putPage. + */ + public static final int MAX_PUT_PAGES_BYTES = 4 * Constants.MB; + + /** + * Creates a {@code PageBlobURL} object pointing to the account specified by the URL and using the provided + * pipeline to make HTTP requests. + * + * @param url + * A {@code URL} to an Azure Storage page blob. + * @param pipeline + * A {@code HttpPipeline} which configures the behavior of HTTP exchanges. Please refer to + * {@link StorageURL#createPipeline(ICredentials, PipelineOptions)} for more information. + */ + public PageBlobURL(URL url, HttpPipeline pipeline) { + super(url, pipeline); + } + + private static String pageRangeToString(PageRange pageRange) { + if (pageRange.start() < 0 || pageRange.end() <= 0) { + throw new IllegalArgumentException("PageRange's start and end values must be greater than or equal to " + + "0 if specified."); + } + if (pageRange.start() % PageBlobURL.PAGE_BYTES != 0) { + throw new IllegalArgumentException("PageRange's start value must be a multiple of 512."); + } + if (pageRange.end() % PageBlobURL.PAGE_BYTES != PageBlobURL.PAGE_BYTES - 1) { + throw new IllegalArgumentException("PageRange's end value must be 1 less than a multiple of 512."); + } + if (pageRange.end() <= pageRange.start()) { + throw new IllegalArgumentException("PageRange's End value must be after the start."); + } + return new StringBuilder("bytes=").append(pageRange.start()).append('-').append(pageRange.end()).toString(); + } + + /** + * Creates a new {@link PageBlobURL} with the given pipeline. + * + * @param pipeline + * A {@link HttpPipeline} object to set. + * + * @return A {@link PageBlobURL} object with the given pipeline. + */ + public PageBlobURL withPipeline(HttpPipeline pipeline) { + try { + return new PageBlobURL(new URL(this.storageClient.url()), pipeline); + } catch (MalformedURLException e) { + throw new RuntimeException(e); + } + } + + /** + * Creates a new {@link PageBlobURL} with the given snapshot. + * + * @param snapshot + * A {@code String} of the snapshot id. + * + * @return A {@link PageBlobURL} object with the given pipeline. + */ + public PageBlobURL withSnapshot(String snapshot) throws MalformedURLException, UnknownHostException { + BlobURLParts blobURLParts = URLParser.parse(new URL(this.storageClient.url())); + blobURLParts.withSnapshot(snapshot); + return new PageBlobURL(blobURLParts.toURL(), super.storageClient.httpPipeline()); + } + + /** + * Creates a page blob of the specified length. Call PutPage to upload data data to a page blob. + * For more information, see the + * Azure Docs. + * + * @param size + * Specifies the maximum size for the page blob, up to 8 TB. The page blob size must be aligned to a + * 512-byte boundary. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobURL.create")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single create(long size) { + return this.create(size, null, null, null, null, null); + } + + /** + * Creates a page blob of the specified length. Call PutPage to upload data data to a page blob. + * For more information, see the + * Azure Docs. + * + * @param size + * Specifies the maximum size for the page blob, up to 8 TB. The page blob size must be aligned to a + * 512-byte boundary. + * @param sequenceNumber + * A user-controlled value that you can use to track requests. The value of the sequence number must be + * between 0 and 2^63 - 1.The default value is 0. + * @param headers + * {@link BlobHTTPHeaders} + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobURL.create")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single create(long size, Long sequenceNumber, BlobHTTPHeaders headers, + Metadata metadata, BlobAccessConditions accessConditions, Context context) { + accessConditions = accessConditions == null ? BlobAccessConditions.NONE : accessConditions; + + if (size % PageBlobURL.PAGE_BYTES != 0) { + // Throwing is preferred to Single.error because this will error out immediately instead of waiting until + // subscription. + throw new IllegalArgumentException("size must be a multiple of PageBlobURL.PAGE_BYTES."); + } + if (sequenceNumber != null && sequenceNumber < 0) { + // Throwing is preferred to Single.error because this will error out immediately instead of waiting until + // subscription. + throw new IllegalArgumentException("SequenceNumber must be greater than or equal to 0."); + } + metadata = metadata == null ? Metadata.NONE : metadata; + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedPageBlobs().createWithRestResponseAsync( + context, 0, size, null, metadata, sequenceNumber, null, headers, + accessConditions.leaseAccessConditions(), accessConditions.modifiedAccessConditions())); + } + + /** + * Writes 1 or more pages to the page blob. The start and end offsets must be a multiple of 512. + * For more information, see the + * Azure Docs. + *

    + * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flowable} must produce the same data each time it is subscribed to. + * + * @param pageRange + * A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start offset must + * be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges are + * 0-511, 512-1023, etc. + * @param body + * The data to upload. Note that this {@code Flowable} must be replayable if retries are enabled + * (the default). In other words, the Flowable must produce the same data each time it is subscribed to. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobURL.uploadPages")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single uploadPages(PageRange pageRange, Flowable body) { + return this.uploadPages(pageRange, body, null, null); + } + + /** + * Writes 1 or more pages to the page blob. The start and end offsets must be a multiple of 512. + * For more information, see the + * Azure Docs. + *

    + * Note that the data passed must be replayable if retries are enabled (the default). In other words, the + * {@code Flowable} must produce the same data each time it is subscribed to. + * + * @param pageRange + * A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start offset + * must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges + * are 0-511, 512-1023, etc. + * @param body + * The data to upload. Note that this {@code Flowable} must be replayable if retries are enabled + * (the default). In other words, the Flowable must produce the same data each time it is subscribed to. + * @param pageBlobAccessConditions + * {@link PageBlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobURL.uploadPages")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single uploadPages(PageRange pageRange, Flowable body, + PageBlobAccessConditions pageBlobAccessConditions, Context context) { + pageBlobAccessConditions = pageBlobAccessConditions == null ? PageBlobAccessConditions.NONE : + pageBlobAccessConditions; + + if (pageRange == null) { + // Throwing is preferred to Single.error because this will error out immediately instead of waiting until + // subscription. + throw new IllegalArgumentException("pageRange cannot be null."); + } + String pageRangeStr = pageRangeToString(pageRange); + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedPageBlobs().uploadPagesWithRestResponseAsync( + context, body, pageRange.end() - pageRange.start() + 1, null, null, pageRangeStr, null, + pageBlobAccessConditions.leaseAccessConditions(), + pageBlobAccessConditions.sequenceNumberAccessConditions(), + pageBlobAccessConditions.modifiedAccessConditions())); + } + + /** + * Frees the specified pages from the page blob. + * For more information, see the + * Azure Docs. + * + * @param pageRange + * A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start offset + * must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges + * are 0-511, 512-1023, etc. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobURL.clearPages")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single clearPages(PageRange pageRange) { + return this.clearPages(pageRange, null, null); + } + + /** + * Frees the specified pages from the page blob. + * For more information, see the + * Azure Docs. + * + * @param pageRange + * A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start offset + * must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges + * are 0-511, 512-1023, etc. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * @param pageBlobAccessConditions + * {@link PageBlobAccessConditions} + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobURL.clearPages")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single clearPages(PageRange pageRange, + PageBlobAccessConditions pageBlobAccessConditions, Context context) { + pageBlobAccessConditions = pageBlobAccessConditions == null ? PageBlobAccessConditions.NONE : + pageBlobAccessConditions; + if (pageRange == null) { + // Throwing is preferred to Single.error because this will error out immediately instead of waiting until + // subscription. + throw new IllegalArgumentException("pageRange cannot be null."); + } + String pageRangeStr = pageRangeToString(pageRange); + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedPageBlobs().clearPagesWithRestResponseAsync( + context, 0, null, pageRangeStr, null, pageBlobAccessConditions.leaseAccessConditions(), + pageBlobAccessConditions.sequenceNumberAccessConditions(), + pageBlobAccessConditions.modifiedAccessConditions())); + } + + /** + * Returns the list of valid page ranges for a page blob or snapshot of a page blob. + * For more information, see the Azure Docs. + * + * @param blobRange + * {@link BlobRange} + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobURL.getPageRanges")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single getPageRanges(BlobRange blobRange) { + return this.getPageRanges(blobRange, null, null); + } + + /** + * Returns the list of valid page ranges for a page blob or snapshot of a page blob. + * For more information, see the Azure Docs. + * + * @param blobRange + * {@link BlobRange} + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobURL.getPageRanges")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single getPageRanges(BlobRange blobRange, + BlobAccessConditions accessConditions, Context context) { + blobRange = blobRange == null ? BlobRange.DEFAULT : blobRange; + accessConditions = accessConditions == null ? BlobAccessConditions.NONE : accessConditions; + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedPageBlobs().getPageRangesWithRestResponseAsync( + context, null, null, blobRange.toString(), null, accessConditions.leaseAccessConditions(), + accessConditions.modifiedAccessConditions())); + } + + /** + * Gets the collection of page ranges that differ between a specified snapshot and this page blob. + * For more information, see the Azure Docs. + * + * @param blobRange + * {@link BlobRange} + * @param prevSnapshot + * Specifies that the response will contain only pages that were changed between target blob and previous + * snapshot. Changed pages include both updated and cleared pages. The target + * blob may be a snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_diff "Sample code for PageBlobURL.getPageRangesDiff")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single getPageRangesDiff(BlobRange blobRange, String prevSnapshot) { + return this.getPageRangesDiff(blobRange, prevSnapshot, null, null); + } + + /** + * Gets the collection of page ranges that differ between a specified snapshot and this page blob. + * For more information, see the Azure Docs. + * + * @param blobRange + * {@link BlobRange} + * @param prevSnapshot + * Specifies that the response will contain only pages that were changed between target blob and previous + * snapshot. Changed pages include both updated and cleared pages. The target + * blob may be a snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_diff "Sample code for PageBlobURL.getPageRangesDiff")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single getPageRangesDiff(BlobRange blobRange, String prevSnapshot, + BlobAccessConditions accessConditions, Context context) { + blobRange = blobRange == null ? BlobRange.DEFAULT : blobRange; + accessConditions = accessConditions == null ? BlobAccessConditions.NONE : accessConditions; + context = context == null ? Context.NONE : context; + + if (prevSnapshot == null) { + throw new IllegalArgumentException("prevSnapshot cannot be null"); + } + + return addErrorWrappingToSingle(this.storageClient.generatedPageBlobs().getPageRangesDiffWithRestResponseAsync( + context, null, null, prevSnapshot, blobRange.toString(), null, accessConditions.leaseAccessConditions(), + accessConditions.modifiedAccessConditions())); + } + + /** + * Resizes the page blob to the specified size (which must be a multiple of 512). + * For more information, see the Azure Docs. + * + * @param size + * Resizes a page blob to the specified size. If the specified value is less than the current size of the + * blob, then all pages above the specified value are cleared. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobURL.resize")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single resize(long size) { + return this.resize(size, null, null); + } + + /** + * Resizes the page blob to the specified size (which must be a multiple of 512). + * For more information, see the Azure Docs. + * + * @param size + * Resizes a page blob to the specified size. If the specified value is less than the current size of the + * blob, then all pages above the specified value are cleared. + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobURL.resize")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single resize(long size, BlobAccessConditions accessConditions, Context context) { + if (size % PageBlobURL.PAGE_BYTES != 0) { + // Throwing is preferred to Single.error because this will error out immediately instead of waiting until + // subscription. + throw new IllegalArgumentException("size must be a multiple of PageBlobURL.PAGE_BYTES."); + } + accessConditions = accessConditions == null ? BlobAccessConditions.NONE : accessConditions; + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle(this.storageClient.generatedPageBlobs().resizeWithRestResponseAsync( + context, size, null, null, accessConditions.leaseAccessConditions(), + accessConditions.modifiedAccessConditions())); + } + + /** + * Sets the page blob's sequence number. + * For more information, see the Azure Docs. + * + * @param action + * Indicates how the service should modify the blob's sequence number. + * @param sequenceNumber + * The blob's sequence number. The sequence number is a user-controlled property that you can use to track + * requests and manage concurrency issues. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobURL.updateSequenceNumber")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single updateSequenceNumber(SequenceNumberActionType action, + Long sequenceNumber) { + return this.updateSequenceNumber(action, sequenceNumber, null, null); + } + + /** + * Sets the page blob's sequence number. + * For more information, see the Azure Docs. + * + * @param action + * Indicates how the service should modify the blob's sequence number. + * @param sequenceNumber + * The blob's sequence number. The sequence number is a user-controlled property that you can use to track + * requests and manage concurrency issues. + * @param accessConditions + * {@link BlobAccessConditions} + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=page_blob_basic "Sample code for PageBlobURL.updateSequenceNumber")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single updateSequenceNumber(SequenceNumberActionType action, + Long sequenceNumber, BlobAccessConditions accessConditions, Context context) { + if (sequenceNumber != null && sequenceNumber < 0) { + // Throwing is preferred to Single.error because this will error out immediately instead of waiting until + // subscription. + throw new IllegalArgumentException("SequenceNumber must be greater than or equal to 0."); + } + accessConditions = accessConditions == null ? BlobAccessConditions.NONE : accessConditions; + sequenceNumber = action == SequenceNumberActionType.INCREMENT ? null : sequenceNumber; + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle( + this.storageClient.generatedPageBlobs().updateSequenceNumberWithRestResponseAsync(context, + action, null, sequenceNumber, null, accessConditions.leaseAccessConditions(), + accessConditions.modifiedAccessConditions())); + } + + /** + * Begins an operation to start an incremental copy from one page blob's snapshot to this page + * blob. The snapshot is copied such that only the differential changes between the previously copied snapshot are + * transferred to the destination. The copied snapshots are complete copies of the original snapshot and can be read + * or copied from as usual. For more information, see + * the Azure Docs here and + * here. + * + * @param source + * The source page blob. + * @param snapshot + * The snapshot on the copy source. + * + * @return Emits the successful response. + */ + public Single copyIncremental(URL source, String snapshot) { + return this.copyIncremental(source, snapshot, null, null); + } + + /** + * Begins an operation to start an incremental copy from one page blob's snapshot to this page + * blob. The snapshot is copied such that only the differential changes between the previously copied snapshot are + * transferred to the destination. The copied snapshots are complete copies of the original snapshot and can be read + * or copied from as usual. For more information, see + * the Azure Docs here and + * here. + * + * @param source + * The source page blob. + * @param snapshot + * The snapshot on the copy source. + * @param modifiedAccessConditions + * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used + * to construct conditions related to when the blob was changed relative to the given request. The request + * will fail if the specified condition is not satisfied. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to its + * parent, forming a linked list. + * + * @return Emits the successful response. + */ + public Single copyIncremental(URL source, String snapshot, + ModifiedAccessConditions modifiedAccessConditions, Context context) { + context = context == null ? Context.NONE : context; + + UrlBuilder builder = UrlBuilder.parse(source); + builder.setQueryParameter(Constants.SNAPSHOT_QUERY_PARAMETER, snapshot); + try { + source = builder.toURL(); + } catch (MalformedURLException e) { + // We are parsing a valid url and adding a query parameter. If this fails, we can't recover. + throw new Error(e); + } + return addErrorWrappingToSingle(this.storageClient.generatedPageBlobs().copyIncrementalWithRestResponseAsync( + context, source, null, null, modifiedAccessConditions)); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/PipelineOptions.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/PipelineOptions.java new file mode 100644 index 0000000000000..6d5a84f57f28e --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/PipelineOptions.java @@ -0,0 +1,148 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import com.microsoft.rest.v2.http.HttpClient; +import com.microsoft.rest.v2.http.HttpPipelineLogLevel; +import com.microsoft.rest.v2.http.HttpPipelineLogger; + +import java.util.Locale; +import java.util.logging.Logger; + +/** + * This type encapsulates all the possible configuration for the default pipeline. It may be passed to the + * createPipeline method on {@link StorageURL}. All the options fields have default values if nothing is passed, and + * no logger will be used if it is not set. An HttpClient must be set, however. + */ +public final class PipelineOptions { + /* + PipelineOptions is mutable, but its fields refer to immutable objects. The createPipeline method can pass the + fields to other methods, but the PipelineOptions object itself can only be used for the duration of this call; it + must not be passed to anything with a longer lifetime. + */ + + private HttpClient client; + + private HttpPipelineLogger logger; + + private RequestRetryOptions requestRetryOptions = RequestRetryOptions.DEFAULT; + + private LoggingOptions loggingOptions = LoggingOptions.DEFAULT; + + private TelemetryOptions telemetryOptions = TelemetryOptions.DEFAULT; + + /** + * Returns a {@code PipelineOptions} object with default values for each of the options fields. An + * {@link HttpClient} must still be set explicitly, however. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=pipeline_options "Sample code for PipelineOptions constructor")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public PipelineOptions() { + this.logger = new HttpPipelineLogger() { + @Override + public HttpPipelineLogLevel minimumLogLevel() { + return HttpPipelineLogLevel.OFF; + } + + @Override + public void log(HttpPipelineLogLevel logLevel, String s, Object... objects) { + if (logLevel == HttpPipelineLogLevel.INFO) { + Logger.getGlobal().info(String.format(Locale.ROOT, s, objects)); + } else if (logLevel == HttpPipelineLogLevel.WARNING) { + Logger.getGlobal().warning(String.format(Locale.ROOT, s, objects)); + } else if (logLevel == HttpPipelineLogLevel.ERROR) { + Logger.getGlobal().severe(String.format(Locale.ROOT, s, objects)); + } + } + }; + } + + /** + * Specifies which HttpClient to use to send the requests. + */ + public HttpClient client() { + return client; + } + + /** + * Specifies which HttpClient to use to send the requests. + */ + public PipelineOptions withClient(HttpClient client) { + this.client = client; + return this; + } + + /** + * Specifies the logger for the pipeline. + */ + public HttpPipelineLogger logger() { + return logger; + } + + /** + * Specifies the logger for the pipeline. + */ + public PipelineOptions withLogger(HttpPipelineLogger logger) { + this.logger = logger; + return this; + } + + /** + * Configures the retry policy's behavior. + */ + public RequestRetryOptions requestRetryOptions() { + return requestRetryOptions; + } + + /** + * Configures the retry policy's behavior. + */ + public PipelineOptions withRequestRetryOptions(RequestRetryOptions requestRetryOptions) { + this.requestRetryOptions = requestRetryOptions; + return this; + } + + /** + * Configures the built-in request logging policy. + */ + public LoggingOptions loggingOptions() { + return loggingOptions; + } + + /** + * Configures the built-in request logging policy. + */ + public PipelineOptions withLoggingOptions(LoggingOptions loggingOptions) { + this.loggingOptions = loggingOptions; + return this; + } + + /** + * Configures the built-in telemetry policy behavior. + */ + public TelemetryOptions telemetryOptions() { + return telemetryOptions; + } + + /** + * Configures the built-in telemetry policy behavior. + */ + public PipelineOptions withTelemetryOptions(TelemetryOptions telemetryOptions) { + this.telemetryOptions = telemetryOptions; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ProgressReporter.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ProgressReporter.java new file mode 100644 index 0000000000000..e5c624f5dfbce --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ProgressReporter.java @@ -0,0 +1,181 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.microsoft.azure.storage.blob; + +import io.reactivex.Flowable; +import io.reactivex.Single; + +import java.nio.ByteBuffer; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.Lock; + +/** + * {@code ProgressReporterImpl} offers a convenient way to add progress tracking to a given Flowable. + */ +public final class ProgressReporter { + + private static abstract class ProgressReporterImpl implements IProgressReceiver{ + long blockProgress; + + final IProgressReceiver progressReceiver; + + ProgressReporterImpl(IProgressReceiver progressReceiver) { + this.blockProgress = 0; + this.progressReceiver = progressReceiver; + } + + @Override + public void reportProgress(long bytesTransferred) { + this.blockProgress += bytesTransferred; + } + + void rewindProgress() { + this.blockProgress = 0; + } + + Flowable addProgressReporting(Flowable data) { + return Single.just(this) + .flatMapPublisher(progressReporter -> { + /* + Each time there is a new subscription, we will rewind the progress. This is desirable specifically + for retries, which resubscribe on each try. The first time this flowable is subscribed to, the + rewind will be a noop as there will have been no progress made. Subsequent rewinds will work as + expected. + */ + progressReporter.rewindProgress(); + /* + Every time we emit some data, report it to the Tracker, which will pass it on to the end user. + */ + return data.doOnNext(buffer -> + progressReporter.reportProgress(buffer.remaining())); + }); + } + } + + /** + * This type is used to keep track of the total amount of data transferred for a single request. This is the type + * we will use when the customer uses the factory to add progress reporting to their Flowable. We need this + * additional type because we can't keep local state directly as lambdas require captured local variables to be + * effectively final. + */ + private static class SequentialProgressReporter extends ProgressReporterImpl { + SequentialProgressReporter(IProgressReceiver progressReceiver) { + super(progressReceiver); + } + + @Override + public void reportProgress(long bytesTransferred) { + super.reportProgress(bytesTransferred); + this.progressReceiver.reportProgress(this.blockProgress); + } + } + + /** + * This type is used to keep track of the total amount of data transferred as a part of a parallel upload in order + * to coordinate progress reporting to the end user. We need this additional type because we can't keep local state + * directly as lambdas require captured local variables to be effectively final. + */ + private static class ParallelProgressReporter extends ProgressReporterImpl { + /* + This lock will be instantiated by the operation initiating the whole transfer to coordinate each + ProgressReporterImpl. + */ + private final Lock transferLock; + + /* + We need an AtomicLong to be able to update the value referenced. Because we are already synchronizing with the + lock, we don't incur any additional performance hit here by the synchronization. + */ + private AtomicLong totalProgress; + + ParallelProgressReporter(IProgressReceiver progressReceiver, Lock lock, AtomicLong totalProgress) { + super(progressReceiver); + this.transferLock = lock; + this.totalProgress = totalProgress; + } + + @Override + public void reportProgress(long bytesTransferred) { + super.reportProgress(bytesTransferred); + + /* + It is typically a bad idea to lock around customer code (which the progressReceiver is) because they could + never release the lock. However, we have decided that it is sufficiently difficult for them to make their + progressReporting code threadsafe that we will take that burden and the ensuing risks. Although it is the + case that only one thread is allowed to be in onNext at once, however there are multiple independent + requests happening at once to stage/download separate chunks, so we still need to lock either way. + */ + transferLock.lock(); + this.progressReceiver.reportProgress(this.totalProgress.addAndGet(bytesTransferred)); + transferLock.unlock(); + } + + /* + This is used in the case of retries to rewind the amount of progress reported so as not to over-report at the + end. + */ + @Override + public void rewindProgress() { + /* + Blocks do not interfere with each other's block progress and there is no way that, for a single block, one + thread will be trying to add to the progress while the other is trying to zero it. The updates are strictly + sequential. Avoiding using the lock is ideal. + */ + this.totalProgress.addAndGet(-1 * this.blockProgress); + super.rewindProgress(); + } + + } + + /** + * Adds progress reporting functionality to the given {@code Flowable}. Each subscription (and therefore each + * retry) will rewind the progress reported so as not to over-report. The data reported will be the total amount + * of data emitted so far, or the "current position" of the Flowable. + * + * @param data + * The data whose transfer progress is to be tracked. + * @param progressReceiver + * {@link IProgressReceiver} + * + * @return A {@code Flowable} that emits the same data as the source but calls a callback to report the total amount + * of data emitted so far. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=progress "Sample code for ProgressReporterFactor.addProgressReporting")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public static Flowable addProgressReporting(Flowable data, + IProgressReceiver progressReceiver) { + if (progressReceiver == null) { + return data; + } + else { + ProgressReporterImpl tracker = new SequentialProgressReporter(progressReceiver); + return tracker.addProgressReporting(data); + } + } + + static Flowable addParallelProgressReporting(Flowable data, + IProgressReceiver progressReceiver, Lock lock, AtomicLong totalProgress) { + if (progressReceiver == null) { + return data; + } + else { + ParallelProgressReporter tracker = new ParallelProgressReporter(progressReceiver, lock, totalProgress); + return tracker.addProgressReporting(data); + } + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ReliableDownloadOptions.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ReliableDownloadOptions.java new file mode 100644 index 0000000000000..b0c934b40abb2 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ReliableDownloadOptions.java @@ -0,0 +1,48 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.microsoft.azure.storage.blob; + +/** + * {@code ReliableDownloadOptions} contains properties which help the {@code Flowable} returned from + * {@link DownloadResponse#body(ReliableDownloadOptions)} determine when to retry. + */ +public final class ReliableDownloadOptions { + + /* + We use "retry" here because by the time the user passes this type, the initial request, or try, has already been + issued and returned. This is in contrast to the retry policy options, which includes the initial try in its count, + thus the difference in verbiage. + */ + private int maxRetryRequests = 0; + + /** + * Specifies the maximum number of additional HTTP Get requests that will be made while reading the data from a + * response body. + */ + public int maxRetryRequests() { + return maxRetryRequests; + } + + /** + * Specifies the maximum number of additional HTTP Get requests that will be made while reading the data from a + * response body. + */ + public ReliableDownloadOptions withMaxRetryRequests(int maxRetryRequests) { + Utility.assertInBounds("options.maxRetryRequests", maxRetryRequests, 0, Integer.MAX_VALUE); + this.maxRetryRequests = maxRetryRequests; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/RequestIDFactory.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/RequestIDFactory.java new file mode 100644 index 0000000000000..6d62af9b3e502 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/RequestIDFactory.java @@ -0,0 +1,63 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import com.microsoft.rest.v2.http.HttpPipeline; +import com.microsoft.rest.v2.http.HttpRequest; +import com.microsoft.rest.v2.http.HttpResponse; +import com.microsoft.rest.v2.policy.RequestPolicy; +import com.microsoft.rest.v2.policy.RequestPolicyFactory; +import com.microsoft.rest.v2.policy.RequestPolicyOptions; +import io.reactivex.Single; + +import java.util.UUID; + +/** + * This is a factory which creates policies in an {@link HttpPipeline} for setting a unique request ID in the + * x-ms-client-request-id header as is required for all requests to the service. In most cases, it is sufficient to + * allow the default pipeline to add this factory automatically and assume that it works. The factory and policy must + * only be used directly when creating a custom pipeline. + */ +public final class RequestIDFactory implements RequestPolicyFactory { + + @Override + public RequestPolicy create(RequestPolicy next, RequestPolicyOptions options) { + return new RequestIDPolicy(next, options); + } + + private final class RequestIDPolicy implements RequestPolicy { + private final RequestPolicy nextPolicy; + + private final RequestPolicyOptions options; + + private RequestIDPolicy(RequestPolicy nextPolicy, RequestPolicyOptions options) { + this.nextPolicy = nextPolicy; + this.options = options; + } + + /** + * Add the unique client request ID to the request. + * + * @param request + * the request to populate with the client request ID + * + * @return A {@link Single} representing the {@link HttpResponse} that will arrive asynchronously. + */ + public Single sendAsync(HttpRequest request) { + request.headers().set(Constants.HeaderConstants.CLIENT_REQUEST_ID_HEADER, UUID.randomUUID().toString()); + return nextPolicy.sendAsync(request); + } + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/RequestRetryFactory.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/RequestRetryFactory.java new file mode 100644 index 0000000000000..d5574a92021fb --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/RequestRetryFactory.java @@ -0,0 +1,241 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import com.microsoft.rest.v2.http.*; +import com.microsoft.rest.v2.policy.RequestPolicy; +import com.microsoft.rest.v2.policy.RequestPolicyFactory; +import com.microsoft.rest.v2.policy.RequestPolicyOptions; +import io.reactivex.Flowable; +import io.reactivex.Single; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.nio.ByteBuffer; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +/** + * This is a factory which creates policies in an {@link HttpPipeline} for retrying a given HTTP request. The request + * that is retried will be identical each time it is reissued. In most cases, it is sufficient to configure a {@link + * RequestRetryOptions} object and set those as a field on a {@link PipelineOptions} object to configure a default + * pipeline. Retries will try against a secondary if one is specified and the type of operation/error indicates that the + * secondary can handle the request. Exponential and fixed backoff are supported. The factory and policy must only be + * used directly when creating a custom pipeline. + */ +public final class RequestRetryFactory implements RequestPolicyFactory { + + private final RequestRetryOptions requestRetryOptions; + + /** + * Creates a factory capable of generating RequestRetry policies for the {@link HttpPipeline}. + * + * @param requestRetryOptions + * {@link RequestRetryOptions} + */ + public RequestRetryFactory(RequestRetryOptions requestRetryOptions) { + this.requestRetryOptions = requestRetryOptions == null ? RequestRetryOptions.DEFAULT : requestRetryOptions; + } + + @Override + public RequestPolicy create(RequestPolicy next, RequestPolicyOptions options) { + return new RequestRetryPolicy(next, this.requestRetryOptions); + } + + private final class RequestRetryPolicy implements RequestPolicy { + + private final RequestPolicy nextPolicy; + + private final RequestRetryOptions requestRetryOptions; + + private RequestRetryPolicy(RequestPolicy nextPolicy, RequestRetryOptions requestRetryOptions) { + this.nextPolicy = nextPolicy; + this.requestRetryOptions = requestRetryOptions; + } + + @Override + public Single sendAsync(HttpRequest httpRequest) { + boolean considerSecondary = (httpRequest.httpMethod().equals(HttpMethod.GET) || + httpRequest.httpMethod().equals(HttpMethod.HEAD)) + && (this.requestRetryOptions.secondaryHost() != null); + + return this.attemptAsync(httpRequest, 1, considerSecondary, 1); + } + + // This is to log for debugging purposes only. Comment/uncomment as necessary for releasing/debugging. + private void logf(String s, Object... args) { + //System.out.println(String.format(s, args)); + } + + /** + * This method actually attempts to send the request and determines if we should attempt again and, if so, how + * long to wait before sending out the next request. + *

    + * Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2) When to retry: connection failure + * or an HTTP status code of 500 or greater, except 501 and 505 If using a secondary: Odd tries go against + * primary; even tries go against the secondary For a primary wait ((2 ^ primaryTries - 1) * delay * random(0.8, + * 1.2) If secondary gets a 404, don't fail, retry but future retries are only against the primary When retrying + * against a secondary, ignore the retry count and wait (.1 second * random(0.8, 1.2)) + * + * @param httpRequest + * The request to try. + * @param primaryTry + * This indicates how man tries we've attempted against the primary DC. + * @param considerSecondary + * Before each try, we'll select either the primary or secondary URL if appropriate. + * @param attempt + * This indicates the total number of attempts to send the request. + * + * @return A single containing either the successful response or an error that was not retryable because either + * the maxTries was exceeded or retries will not mitigate the issue. + */ + private Single attemptAsync(final HttpRequest httpRequest, final int primaryTry, + final boolean considerSecondary, + final int attempt) { + logf("\n=====> Try=%d\n", attempt); + + // Determine which endpoint to try. It's primary if there is no secondary or if it is an odd number attempt. + final boolean tryingPrimary = !considerSecondary || (attempt % 2 == 1); + + // Select the correct host and delay. + long delayMs; + if (tryingPrimary) { + // The first attempt returns 0 delay. + delayMs = this.requestRetryOptions.calculateDelayInMs(primaryTry); + logf("Primary try=%d, Delay=%d\n", primaryTry, delayMs); + } else { + // Delay with some jitter before trying the secondary. + delayMs = (long) ((ThreadLocalRandom.current().nextFloat() / 2 + 0.8) * 1000); // Add jitter + logf("Secondary try=%d, Delay=%d\n", attempt - primaryTry, delayMs); + } + + /* + Clone the original request to ensure that each try starts with the original (unmutated) request. We cannot + simply call httpRequest.buffer() because although the body will start emitting from the beginning of the + stream, the buffers that were emitted will have already been consumed (their position set to their limit), + so it is not a true reset. By adding the map function, we ensure that anything which consumes the + ByteBuffers downstream will only actually consume a duplicate so the original is preserved. This only + duplicates the ByteBuffer object, not the underlying data. + */ + HttpHeaders bufferedHeaders = new HttpHeaders(httpRequest.headers()); + Flowable bufferedBody = httpRequest.body() == null ? + null : httpRequest.body().map(ByteBuffer::duplicate); + final HttpRequest requestCopy = new HttpRequest(httpRequest.callerMethod(), httpRequest.httpMethod(), + httpRequest.url(), bufferedHeaders, bufferedBody, httpRequest.responseDecoder()); + if (!tryingPrimary) { + UrlBuilder builder = UrlBuilder.parse(requestCopy.url()); + builder.withHost(this.requestRetryOptions.secondaryHost()); + try { + requestCopy.withUrl(builder.toURL()); + } catch (MalformedURLException e) { + return Single.error(e); + } + } + requestCopy.withContext(httpRequest.context()); + + // Deadline stuff + + /* + We want to send the request with a given timeout, but we don't want to kickoff that timeout-bound operation + until after the retry backoff delay, so we call delaySubscription. + */ + return this.nextPolicy.sendAsync(requestCopy) + .timeout(this.requestRetryOptions.tryTimeout(), TimeUnit.SECONDS) + .delaySubscription(delayMs, TimeUnit.MILLISECONDS) + .flatMap(response -> { + boolean newConsiderSecondary = considerSecondary; + String action; + int statusCode = response.statusCode(); + + /* + If attempt was against the secondary & it returned a StatusNotFound (404), then the + resource was not found. This may be due to replication delay. So, in this case, + we'll never try the secondary again for this operation. + */ + if (!tryingPrimary && statusCode == 404) { + newConsiderSecondary = false; + action = "Retry: Secondary URL returned 404"; + } else if (statusCode == 503 || statusCode == 500) { + action = "Retry: Temporary error or server timeout"; + } else { + action = "NoRetry: Successful HTTP request"; + } + + logf("Action=%s\n", action); + if (action.charAt(0) == 'R' && attempt < requestRetryOptions.maxTries()) { + /* + We increment primaryTry if we are about to try the primary again (which is when we + consider the secondary and tried the secondary this time (tryingPrimary==false) or + we do not consider the secondary at all (considerSecondary==false)). This will + ensure primaryTry is correct when passed to calculate the delay. + */ + int newPrimaryTry = !tryingPrimary || !considerSecondary ? + primaryTry + 1 : primaryTry; + return attemptAsync(httpRequest, newPrimaryTry, newConsiderSecondary, + attempt + 1); + } + return Single.just(response); + }) + .onErrorResumeNext(throwable -> { + /* + It is likely that many users will not realize that their Flowable must be replayable and + get an error upon retries when the provided data length does not match the length of the exact + data. We cannot enforce the desired Flowable behavior, so we provide a hint when this is likely + the root cause. + */ + if (throwable instanceof UnexpectedLengthException && attempt > 1) { + return Single.error(new IllegalStateException("The request failed because the " + + "size of the contents of the provided Flowable did not match the provided " + + "data size upon attempting to retry. This is likely caused by the Flowable " + + "not being replayable. To support retries, all Flowables must produce the " + + "same data for each subscriber. Please ensure this behavior.", throwable)); + } + + /* + IOException is a catch-all for IO related errors. Technically it includes many types which may + not be network exceptions, but we should not hit those unless there is a bug in our logic. In + either case, it is better to optimistically retry instead of failing too soon. + A Timeout Exception is a client-side timeout coming from Rx. + */ + String action; + if (throwable instanceof IOException) { + action = "Retry: Network error"; + } else if (throwable instanceof TimeoutException) { + action = "Retry: Client timeout"; + } else { + action = "NoRetry: Unknown error"; + } + + + + logf("Action=%s\n", action); + if (action.charAt(0) == 'R' && attempt < requestRetryOptions.maxTries()) { + /* + We increment primaryTry if we are about to try the primary again (which is when we + consider the secondary and tried the secondary this time (tryingPrimary==false) or + we do not consider the secondary at all (considerSecondary==false)). This will + ensure primaryTry is correct when passed to calculate the delay. + */ + int newPrimaryTry = !tryingPrimary || !considerSecondary ? + primaryTry + 1 : primaryTry; + return attemptAsync(httpRequest, newPrimaryTry, considerSecondary, + attempt + 1); + } + return Single.error(throwable); + }); + } + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/RequestRetryOptions.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/RequestRetryOptions.java new file mode 100644 index 0000000000000..441120e2fa15c --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/RequestRetryOptions.java @@ -0,0 +1,171 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import java.util.concurrent.TimeUnit; + +/** + * Options for configuring the {@link RequestRetryFactory}. Please refer to the Factory for more information. Note + * that there is no option for overall operation timeout. This is because Rx object have a timeout field which provides + * this functionality. + */ +public final class RequestRetryOptions { + + /** + * An object representing default retry values: Exponential backoff, maxTries=4, tryTimeout=30, retryDelayInMs=4000, + * maxRetryDelayInMs=120000, secondaryHost=null. + */ + public static final RequestRetryOptions DEFAULT = new RequestRetryOptions(RetryPolicyType.EXPONENTIAL, null, + null, null, null, null); + final private int maxTries; + final private int tryTimeout; + final private long retryDelayInMs; + final private long maxRetryDelayInMs; + /** + * A {@link RetryPolicyType} telling the pipeline what kind of retry policy to use. + */ + private RetryPolicyType retryPolicyType = RetryPolicyType.EXPONENTIAL; + private String secondaryHost; + + /** + * Configures how the {@link com.microsoft.rest.v2.http.HttpPipeline} should retry requests. + * + * @param retryPolicyType + * A {@link RetryPolicyType} specifying the type of retry pattern to use. A value of {@code null} accepts + * the default. + * @param maxTries + * Specifies the maximum number of attempts an operation will be tried before producing an error. A value of + * {@code null} means that you accept our default policy. A value of 1 means 1 try and no retries. + * @param tryTimeout + * Indicates the maximum time allowed for any single try of an HTTP request. A value of {@code null} means + * that you accept our default. NOTE: When transferring large amounts of data, the default TryTimeout will + * probably not be sufficient. You should override this value based on the bandwidth available to the host + * machine and proximity to the Storage service. A good starting point may be something like (60 seconds per + * MB of anticipated-payload-size). + * @param retryDelayInMs + * Specifies the amount of delay to use before retrying an operation. A value of {@code null} means you + * accept the default value. The delay increases (exponentially or linearly) with each retry up to a maximum + * specified by MaxRetryDelay. If you specify {@code null}, then you must also specify {@code null} for + * MaxRetryDelay. + * @param maxRetryDelayInMs + * Specifies the maximum delay allowed before retrying an operation. A value of {@code null} means you + * accept the default value. If you specify {@code null}, then you must also specify {@code null} for + * RetryDelay. + * @param secondaryHost + * If a secondaryHost is specified, retries will be tried against this host. If secondaryHost is + * {@code null} (the default) then operations are not retried against another host. NOTE: Before setting + * this field, make sure you understand the issues around reading stale and potentially-inconsistent data at + * this webpage + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=pipeline_options "Sample code for RequestRetryOptions constructor")] \n + * For more samples, please see the [Samples file] (https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public RequestRetryOptions(RetryPolicyType retryPolicyType, Integer maxTries, Integer tryTimeout, + Long retryDelayInMs, Long maxRetryDelayInMs, String secondaryHost) { + this.retryPolicyType = retryPolicyType == null ? RetryPolicyType.EXPONENTIAL : retryPolicyType; + if (maxTries != null) { + Utility.assertInBounds("maxRetries", maxTries, 1, Integer.MAX_VALUE); + this.maxTries = maxTries; + } else { + this.maxTries = 4; + } + + if (tryTimeout != null) { + Utility.assertInBounds("tryTimeout", tryTimeout, 1, Integer.MAX_VALUE); + this.tryTimeout = tryTimeout; + } else { + this.tryTimeout = 60; + } + + if ((retryDelayInMs == null && maxRetryDelayInMs != null) || + (retryDelayInMs != null && maxRetryDelayInMs == null)) { + throw new IllegalArgumentException("Both retryDelay and maxRetryDelay must be null or neither can be null"); + } + + if (retryDelayInMs != null && maxRetryDelayInMs != null) { + Utility.assertInBounds("maxRetryDelayInMs", maxRetryDelayInMs, 1, Long.MAX_VALUE); + Utility.assertInBounds("retryDelayInMs", retryDelayInMs, 1, maxRetryDelayInMs); + this.maxRetryDelayInMs = maxRetryDelayInMs; + this.retryDelayInMs = retryDelayInMs; + } else { + switch (this.retryPolicyType) { + case EXPONENTIAL: + this.retryDelayInMs = TimeUnit.SECONDS.toMillis(4); + break; + case FIXED: + this.retryDelayInMs = TimeUnit.SECONDS.toMillis(30); + break; + default: + throw new IllegalArgumentException("Unrecognize retry policy type."); + } + this.maxRetryDelayInMs = TimeUnit.SECONDS.toMillis(120); + } + + this.secondaryHost = secondaryHost; + } + + int maxTries() { + return this.maxTries; + } + + int tryTimeout() { + return this.tryTimeout; + } + + String secondaryHost() { + return this.secondaryHost; + } + + long retryDelayInMs() { + return retryDelayInMs; + } + + long maxRetryDelayInMs() { + return maxRetryDelayInMs; + } + + /** + * Calculates how long to delay before sending the next request. + * + * @param tryCount + * An {@code int} indicating which try we are on. + * + * @return A {@code long} value of how many milliseconds to delay. + */ + long calculateDelayInMs(int tryCount) { + long delay = 0; + switch (this.retryPolicyType) { + case EXPONENTIAL: + delay = (pow(2L, tryCount - 1) - 1L) * this.retryDelayInMs; + break; + + case FIXED: + delay = this.retryDelayInMs; + break; + } + + return Math.min(delay, this.maxRetryDelayInMs); + } + + private long pow(long number, int exponent) { + long result = 1; + for (int i = 0; i < exponent; i++) { + result *= number; + } + + return result; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/RetryPolicyType.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/RetryPolicyType.java new file mode 100644 index 0000000000000..26642ccd8f44a --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/RetryPolicyType.java @@ -0,0 +1,30 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +/** + * This type holds possible options for retry backoff algorithms. They may be used with {@link RequestRetryOptions}. + */ +public enum RetryPolicyType { + /** + * Tells the pipeline to use an exponential back-off retry policy. + */ + EXPONENTIAL, + + /** + * Tells the pipeline to use a fixed back-off retry policy. + */ + FIXED +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/SASProtocol.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/SASProtocol.java new file mode 100644 index 0000000000000..069b8fbfd15e3 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/SASProtocol.java @@ -0,0 +1,62 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import java.util.Locale; + +/** + * Specifies the set of possible permissions for a shared access signature protocol. Values of this type can be used + * to set the fields on the {@link AccountSASSignatureValues} and {@link ServiceSASSignatureValues} types. + */ +public enum SASProtocol { + /** + * Permission to use SAS only through https granted. + */ + HTTPS_ONLY(Constants.HTTPS), + + /** + * Permission to use SAS only through https or http granted. + */ + HTTPS_HTTP(Constants.HTTPS_HTTP); + + private final String protocols; + + SASProtocol(String p) { + this.protocols = p; + } + + /** + * Parses a {@code String} into a {@code SASProtocl} value if possible. + * + * @param str + * The value to try to parse. + * + * @return A {@code SASProtocol} value that represents the string if possible. + */ + public static SASProtocol parse(String str) { + if (str.equals(Constants.HTTPS)) { + return SASProtocol.HTTPS_ONLY; + } else if (str.equals(Constants.HTTPS_HTTP)) { + return SASProtocol.HTTPS_HTTP; + } + throw new IllegalArgumentException(String.format(Locale.ROOT, + "%s could not be parsed into a SASProtocl value.", str)); + } + + @Override + public String toString() { + return this.protocols; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/SASQueryParameters.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/SASQueryParameters.java new file mode 100644 index 0000000000000..acbe347b999bd --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/SASQueryParameters.java @@ -0,0 +1,500 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import java.net.UnknownHostException; +import java.time.OffsetDateTime; +import java.util.Map; + +import static com.microsoft.azure.storage.blob.Utility.safeURLEncode; + +/** + * Represents the components that make up an Azure Storage SAS' query parameters. This type is not constructed directly + * by the user; it is only generated by the {@link AccountSASSignatureValues} and {@link ServiceSASSignatureValues} + * types. Once generated, it can be set on a {@link BlobURLParts} object to be constructed as part of a URL or it can + * be encoded into a {@code String} and appended to a URL directly (though caution should be taken here in case there + * are existing query parameters, which might affect the appropriate means of appending these query parameters). + * NOTE: Instances of this class are immutable to ensure thread safety. + */ +public final class SASQueryParameters { + + private final String version; + + private final String services; + + private final String resourceTypes; + + private final SASProtocol protocol; + + private final OffsetDateTime startTime; + + private final OffsetDateTime expiryTime; + + private final IPRange ipRange; + + private final String identifier; + + private final String resource; + + private final String permissions; + + private final String signature; + + private final String cacheControl; + + private final String contentDisposition; + + private final String contentEncoding; + + private final String contentLanguage; + + private final String contentType; + + /** + * Creates a new {@link SASQueryParameters} object. + * + * @param queryParamsMap + * All query parameters for the request as key-value pairs + * @param removeSASParametersFromMap + * When {@code true}, the SAS query parameters will be removed from queryParamsMap + */ + SASQueryParameters(Map queryParamsMap, boolean removeSASParametersFromMap) + throws UnknownHostException { + + String[] queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_SERVICE_VERSION); + if (queryValue != null) { + this.version = queryValue[0]; + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_SERVICE_VERSION); + } + } else { + this.version = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_SERVICES); + if (queryValue != null) { + this.services = queryValue[0]; + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_SERVICES); + } + } else { + this.services = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_RESOURCES_TYPES); + if (queryValue != null) { + this.resourceTypes = queryValue[0]; + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_RESOURCES_TYPES); + } + } else { + this.resourceTypes = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_PROTOCOL); + if (queryValue != null) { + this.protocol = SASProtocol.parse(queryValue[0]); + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_PROTOCOL); + } + } else { + this.protocol = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_START_TIME); + if (queryValue != null) { + this.startTime = Utility.parseDate(queryValue[0]); + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_START_TIME); + } + } else { + this.startTime = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_EXPIRY_TIME); + if (queryValue != null) { + this.expiryTime = Utility.parseDate(queryValue[0]); + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_EXPIRY_TIME); + } + } else { + this.expiryTime = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_IP_RANGE); + if (queryValue != null) { + this.ipRange = IPRange.parse(queryValue[0]); + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_IP_RANGE); + } + } else { + this.ipRange = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_SIGNED_IDENTIFIER); + if (queryValue != null) { + this.identifier = queryValue[0]; + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_SIGNED_IDENTIFIER); + } + } else { + this.identifier = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_SIGNED_RESOURCE); + if (queryValue != null) { + this.resource = queryValue[0]; + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_SIGNED_RESOURCE); + } + } else { + this.resource = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_SIGNED_PERMISSIONS); + if (queryValue != null) { + this.permissions = queryValue[0]; + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_SIGNED_PERMISSIONS); + } + } else { + this.permissions = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_SIGNATURE); + if (queryValue != null) { + this.signature = queryValue[0]; + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_SIGNATURE); + } + } else { + this.signature = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_CACHE_CONTROL); + if (queryValue != null) { + this.cacheControl = queryValue[0]; + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_CACHE_CONTROL); + } + } else { + this.cacheControl = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_CONTENT_DISPOSITION); + if (queryValue != null) { + this.contentDisposition = queryValue[0]; + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_CONTENT_DISPOSITION); + } + } else { + this.contentDisposition = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_CONTENT_ENCODING); + if (queryValue != null) { + this.contentEncoding = queryValue[0]; + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_CONTENT_ENCODING); + } + } else { + this.contentEncoding = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_CONTENT_LANGUAGE); + if (queryValue != null) { + this.contentLanguage = queryValue[0]; + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_CONTENT_LANGUAGE); + } + } else { + this.contentLanguage = null; + } + + queryValue = queryParamsMap.get(Constants.UrlConstants.SAS_CONTENT_TYPE); + if (queryValue != null) { + this.contentType = queryValue[0]; + if (removeSASParametersFromMap) { + queryParamsMap.remove(Constants.UrlConstants.SAS_CONTENT_TYPE); + } + } else { + this.contentType = null; + } + } + + /** + * Creates a new {@link SASQueryParameters} object. These objects are only created internally by + * *SASSignatureValues classes. + * + * @param version + * A {@code String} representing the storage version. + * @param services + * A {@code String} representing the storage services being accessed (only for Account SAS). + * @param resourceTypes + * A {@code String} representing the storage resource types being accessed (only for Account SAS). + * @param protocol + * A {@code String} representing the allowed HTTP protocol(s) or {@code null}. + * @param startTime + * A {@code java.util.Date} representing the start time for this SAS token or {@code null}. + * @param expiryTime + * A {@code java.util.Date} representing the expiry time for this SAS token. + * @param ipRange + * A {@link IPRange} representing the range of valid IP addresses for this SAS token or {@code null}. + * @param identifier + * A {@code String} representing the signed identifier (only for Service SAS) or {@code null}. + * @param resource + * A {@code String} representing the storage container or blob (only for Service SAS). + * @param permissions + * A {@code String} representing the storage permissions or {@code null}. + * @param signature + * A {@code String} representing the signature for the SAS token. + */ + SASQueryParameters(String version, String services, String resourceTypes, SASProtocol protocol, + OffsetDateTime startTime, OffsetDateTime expiryTime, IPRange ipRange, String identifier, + String resource, String permissions, String signature, String cacheControl, String contentDisposition, + String contentEncoding, String contentLanguage, String contentType) { + + this.version = version; + this.services = services; + this.resourceTypes = resourceTypes; + this.protocol = protocol; + this.startTime = startTime; + this.expiryTime = expiryTime; + this.ipRange = ipRange; + this.identifier = identifier; + this.resource = resource; + this.permissions = permissions; + this.signature = signature; + this.cacheControl = cacheControl; + this.contentDisposition = contentDisposition; + this.contentEncoding = contentEncoding; + this.contentLanguage = contentLanguage; + this.contentType = contentType; + } + + /** + * @return The storage version + */ + public String version() { + return version; + } + + /** + * @return The storage services being accessed (only for Account SAS). Please refer to {@link AccountSASService} for + * more details. + */ + public String services() { + return services; + } + + /** + * @return The storage resource types being accessed (only for Account SAS). Please refer to + * {@link AccountSASResourceType} for more details. + */ + public String resourceTypes() { + return resourceTypes; + } + + /** + * @return The allowed HTTP protocol(s) or {@code null}. Please refer to {@link SASProtocol} for more details. + */ + public SASProtocol protocol() { + return protocol; + } + + /** + * @return The start time for this SAS token or {@code null}. + */ + public OffsetDateTime startTime() { + return startTime; + } + + /** + * @return The expiry time for this SAS token. + */ + public OffsetDateTime expiryTime() { + return expiryTime; + } + + /** + * @return {@link IPRange} + */ + public IPRange ipRange() { + return ipRange; + } + + /** + * @return The signed identifier (only for {@link ServiceSASSignatureValues}) or {@code null}. Please see + * here + * for more information. + */ + public String identifier() { + return identifier; + } + + /** + * @return The storage container or blob (only for {@link ServiceSASSignatureValues}). + */ + public String resource() { + return resource; + } + + /** + * @return Please refer to {@link AccountSASPermission}, {@link BlobSASPermission}, or {@link ContainerSASPermission} + * for more details. + */ + public String permissions() { + return permissions; + } + + /** + * @return The signature for the SAS token. + */ + public String signature() { + return signature; + } + + /** + * @return The Cache-Control header value when a client accesses the resource with this sas token. + */ + public String cacheControl() { + return cacheControl; + } + + /** + * @return The Content-Disposition header value when a client accesses the resource with this sas token. + */ + public String contentDisposition() { + return contentDisposition; + } + + /** + * @return The Content-Encoding header value when a client accesses the resource with this sas token. + */ + public String contentEncoding() { + return contentEncoding; + } + + /** + * @return The Content-Language header value when a client accesses the resource with this sas token. + */ + public String contentLanguage() { + return contentLanguage; + } + + /** + * @return The Content-Type header value when a client accesses the resource with this sas token. + */ + public String contentType() { + return contentType; + } + + private void tryAppendQueryParameter(StringBuilder sb, String param, Object value) { + if (value != null) { + if (sb.length() == 0) { + sb.append('?'); + } else { + sb.append('&'); + } + sb.append(safeURLEncode(param)).append('=').append(safeURLEncode(value.toString())); + } + } + + /** + * Encodes all SAS query parameters into a string that can be appended to a URL. + * + * @return A {@code String} representing all SAS query parameters. + */ + public String encode() { + /* + We should be url-encoding each key and each value, but because we know all the keys and values will encode to + themselves, we cheat except for the signature value. + */ + String[] params = { + Constants.UrlConstants.SAS_SERVICE_VERSION, + Constants.UrlConstants.SAS_SERVICES, + Constants.UrlConstants.SAS_RESOURCES_TYPES, + Constants.UrlConstants.SAS_PROTOCOL, + Constants.UrlConstants.SAS_START_TIME, + Constants.UrlConstants.SAS_EXPIRY_TIME, + Constants.UrlConstants.SAS_IP_RANGE, + Constants.UrlConstants.SAS_SIGNED_IDENTIFIER, + Constants.UrlConstants.SAS_SIGNED_RESOURCE, + Constants.UrlConstants.SAS_SIGNED_PERMISSIONS, + Constants.UrlConstants.SAS_SIGNATURE, + Constants.UrlConstants.SAS_CACHE_CONTROL, + Constants.UrlConstants.SAS_CONTENT_DISPOSITION, + Constants.UrlConstants.SAS_CONTENT_ENCODING, + Constants.UrlConstants.SAS_CONTENT_LANGUAGE, + Constants.UrlConstants.SAS_CONTENT_TYPE + }; + StringBuilder sb = new StringBuilder(); + for (String param : params) { + switch (param) { + case Constants.UrlConstants.SAS_SERVICE_VERSION: + tryAppendQueryParameter(sb, param, this.version); + break; + case Constants.UrlConstants.SAS_SERVICES: + tryAppendQueryParameter(sb, param, this.services); + break; + case Constants.UrlConstants.SAS_RESOURCES_TYPES: + tryAppendQueryParameter(sb, param, this.resourceTypes); + break; + case Constants.UrlConstants.SAS_PROTOCOL: + tryAppendQueryParameter(sb, param, this.protocol); + break; + case Constants.UrlConstants.SAS_START_TIME: + tryAppendQueryParameter(sb, param, + this.startTime == null ? null : Utility.ISO8601UTCDateFormatter.format(this.startTime)); + break; + case Constants.UrlConstants.SAS_EXPIRY_TIME: + tryAppendQueryParameter(sb, param, + this.expiryTime == null ? null : Utility.ISO8601UTCDateFormatter.format(this.expiryTime)); + break; + case Constants.UrlConstants.SAS_IP_RANGE: + tryAppendQueryParameter(sb, param, this.ipRange); + break; + case Constants.UrlConstants.SAS_SIGNED_IDENTIFIER: + tryAppendQueryParameter(sb, param, this.identifier); + break; + case Constants.UrlConstants.SAS_SIGNED_RESOURCE: + tryAppendQueryParameter(sb, param, this.resource); + break; + case Constants.UrlConstants.SAS_SIGNED_PERMISSIONS: + tryAppendQueryParameter(sb, param, this.permissions); + break; + case Constants.UrlConstants.SAS_SIGNATURE: + tryAppendQueryParameter(sb, param, this.signature); + break; + case Constants.UrlConstants.SAS_CACHE_CONTROL: + tryAppendQueryParameter(sb, param, this.cacheControl); + break; + case Constants.UrlConstants.SAS_CONTENT_DISPOSITION: + tryAppendQueryParameter(sb, param, this.contentDisposition); + break; + case Constants.UrlConstants.SAS_CONTENT_ENCODING: + tryAppendQueryParameter(sb, param, this.contentEncoding); + break; + case Constants.UrlConstants.SAS_CONTENT_LANGUAGE: + tryAppendQueryParameter(sb, param, this.contentLanguage); + break; + case Constants.UrlConstants.SAS_CONTENT_TYPE: + tryAppendQueryParameter(sb, param, this.contentType); + break; + } + } + return sb.toString(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/SR.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/SR.java new file mode 100644 index 0000000000000..827590bc7d973 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/SR.java @@ -0,0 +1,130 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +/** + * RESERVED FOR INTERNAL USE. Provides a standard set of errors that could be thrown from the client library. + */ +final class SR { + // TODO: Do we want to keep any of what's left? + public static final String ACCOUNT_NAME_NULL_OR_EMPTY = "The account name is null or empty."; + public static final String ACCOUNT_NAME_MISMATCH = "The account name does not match the existing account name on the credentials."; + public static final String ARGUMENT_NULL_OR_EMPTY = "The argument must not be null or an empty string. Argument name: %s."; + public static final String ARGUMENT_OUT_OF_RANGE_ERROR = "The argument is out of range. Argument name: %s, Value passed: %s."; + public static final String BLOB_OVER_MAX_BLOCK_LIMIT = "The total blocks for this upload exceeds the maximum allowable limit."; + public static final String BLOB_DATA_CORRUPTED = "Blob data corrupted (integrity check failed), Expected value is %s, retrieved %s"; + public static final String BLOB_ENDPOINT_NOT_CONFIGURED = "No blob endpoint configured."; + public static final String BLOB_HASH_MISMATCH = "Blob hash mismatch (integrity check failed), Expected value is %s, retrieved %s."; + public static final String BLOB_MD5_NOT_SUPPORTED_FOR_PAGE_BLOBS = "Blob level MD5 is not supported for page blobs."; + public static final String CANNOT_CREATE_SAS_FOR_GIVEN_CREDENTIALS = "Cannot create Shared Access Signature as the credentials does not have account name information. Please check that the credentials provided support creating Shared Access Signature."; + public static final String CANNOT_CREATE_SAS_WITHOUT_ACCOUNT_KEY = "Cannot create Shared Access Signature unless the Account Key credentials are used by the ServiceClient."; + public static final String CANNOT_TRANSFORM_NON_HTTPS_URI_WITH_HTTPS_ONLY_CREDENTIALS = "Cannot use HTTP with credentials that only support HTTPS."; + public static final String CONTAINER = "container"; + public static final String CONTENT_LENGTH_MISMATCH = "An incorrect number of bytes was read from the connection. The connection may have been closed."; + public static final String CREATING_NETWORK_STREAM = "Creating a NetworkInputStream and expecting to read %s bytes."; + public static final String CREDENTIALS_CANNOT_SIGN_REQUEST = "CloudBlobClient, CloudQueueClient and CloudTableClient require credentials that can sign a request."; + public static final String DEFAULT_SERVICE_VERSION_ONLY_SET_FOR_BLOB_SERVICE = "DefaultServiceVersion can only be set for the Blob service."; + public static final String DELETE_SNAPSHOT_NOT_VALID_ERROR = "The option '%s' must be 'None' to delete a specific snapshot specified by '%s'."; + public static final String ENUMERATION_ERROR = "An error occurred while enumerating the result, check the original exception for details."; + public static final String ENDPOINT_INFORMATION_UNAVAILABLE = "Endpoint information not available for Account using Shared Access Credentials."; + public static final String ETAG_INVALID_FOR_DELETE = "Delete requires a valid ETag (which may be the '*' wildcard)."; + public static final String ETAG_INVALID_FOR_MERGE = "Merge requires a valid ETag (which may be the '*' wildcard)."; + public static final String ETAG_INVALID_FOR_UPDATE = "Replace requires a valid ETag (which may be the '*' wildcard)."; + public static final String ENUM_COULD_NOT_BE_PARSED_INVALID_VALUE = "%s could not be parsed from '%s' due to invalid value %s."; + public static final String INCORRECT_STREAM_LENGTH = "An incorrect stream length was specified, resulting in an authentication failure. Please specify correct length, or -1."; + public static final String INPUT_STREAM_SHOULD_BE_MARKABLE = "Input stream must be markable."; + public static final String INVALID_ACCOUNT_NAME = "Invalid account name."; + public static final String INVALID_ACL_ACCESS_TYPE = "Invalid acl public access type returned '%s'. Expected blob or container."; + public static final String INVALID_BLOB_TYPE = "Incorrect Blob type, please use the correct Blob type to access a blob on the server. Expected %s, actual %s."; + public static final String INVALID_BLOCK_ID = "Invalid blockID, blockID must be a valid Base64 String."; + public static final String INVALID_BLOCK_SIZE = "Block data should not exceed BlockBlobURL.MAX_STAGE_BLOCK_BYTES"; + public static final String INVALID_CONDITIONAL_HEADERS = "The conditionals specified for this operation did not match server."; + public static final String INVALID_CONNECTION_STRING = "Invalid connection string."; + public static final String INVALID_CONNECTION_STRING_DEV_STORE_NOT_TRUE = "Invalid connection string, the UseDevelopmentStorage key must always be paired with 'true'. Remove the flag entirely otherwise."; + public static final String INVALID_CONTENT_LENGTH = "ContentLength must be set to -1 or positive Long value."; + public static final String INVALID_CONTENT_TYPE = "An incorrect Content-Type was returned from the server."; + public static final String INVALID_CORS_RULE = "A CORS rule must contain at least one allowed origin and allowed method, and MaxAgeInSeconds cannot have a value less than zero."; + public static final String INVALID_DATE_STRING = "Invalid Date String: %s."; + public static final String INVALID_EDMTYPE_VALUE = "Invalid value '%s' for EdmType."; + public static final String INVALID_FILE_LENGTH = "File length must be greater than or equal to 0 bytes."; + public static final String INVALID_GEO_REPLICATION_STATUS = "Null or Invalid geo-replication status in response: %s."; + public static final String INVALID_IP_ADDRESS = "Error when parsing IPv4 address: IP address '%s' is invalid."; + public static final String INVALID_KEY = "Storage Key is not a valid base64 encoded string."; + public static final String INVALID_LISTING_DETAILS = "Invalid blob listing details specified."; + public static final String INVALID_LOGGING_LEVEL = "Invalid logging operations specified."; + public static final String INVALID_MAX_WRITE_SIZE = "Max write size is 4MB. Please specify a smaller range."; + public static final String INVALID_MESSAGE_LENGTH = "The message size cannot be larger than %s bytes."; + public static final String INVALID_MIME_RESPONSE = "Invalid MIME response received."; + public static final String INVALID_NUMBER_OF_BYTES_IN_THE_BUFFER = "Page data must be a multiple of 512 bytes. Buffer currently contains %d bytes."; + public static final String INVALID_OPERATION_FOR_A_SNAPSHOT = "Cannot perform this operation on a blob representing a snapshot."; + public static final String INVALID_PAGE_BLOB_LENGTH = "Page blob length must be multiple of 512."; + public static final String INVALID_PAGE_START_OFFSET = "Page start offset must be multiple of 512."; + public static final String INVALID_RANGE_CONTENT_MD5_HEADER = "Cannot specify x-ms-range-get-content-md5 header on ranges larger than 4 MB. Either use a BlobReadStream via openRead, or disable TransactionalMD5 via the BlobRequestOptions."; + public static final String INVALID_RESOURCE_NAME = "Invalid %s name. Check MSDN for more information about valid naming."; + public static final String INVALID_RESOURCE_NAME_LENGTH = "Invalid %s name length. The name must be between %s and %s characters long."; + public static final String INVALID_RESOURCE_RESERVED_NAME = "Invalid %s name. This name is reserved."; + public static final String INVALID_RESPONSE_RECEIVED = "The response received is invalid or improperly formatted."; + public static final String INVALID_STORAGE_PROTOCOL_VERSION = "Storage protocol version prior to 2009-09-19 do not support shared key authentication."; + public static final String INVALID_STORAGE_SERVICE = "Invalid storage service specified."; + public static final String INVALID_STREAM_LENGTH = "Invalid stream length; stream must be between 0 and %s MB in length."; + public static final String ITERATOR_EMPTY = "There are no more elements in this enumeration."; + public static final String KEY_AND_RESOLVER_MISSING = "Key and Resolver are not initialized. Decryption requires either of them to be initialized."; + public static final String LEASE_CONDITION_ON_SOURCE = "A lease condition cannot be specified on the source of a copy."; + public static final String LOG_STREAM_END_ERROR = "Error parsing log record: unexpected end of stream."; + public static final String LOG_STREAM_DELIMITER_ERROR = "Error parsing log record: unexpected delimiter encountered."; + public static final String LOG_STREAM_QUOTE_ERROR = "Error parsing log record: unexpected quote character encountered."; + public static final String LOG_VERSION_UNSUPPORTED = "A storage log version of %s is unsupported."; + public static final String MARK_EXPIRED = "Stream mark expired."; + public static final String MAXIMUM_EXECUTION_TIMEOUT_EXCEPTION = "The client could not finish the operation within specified maximum execution timeout."; + public static final String MISSING_CREDENTIALS = "No credentials provided."; + public static final String MISSING_MANDATORY_DATE_HEADER = "Canonicalization did not find a non-empty x-ms-date header in the request. Please use a request with a valid x-ms-date header in RFC 123 format."; + public static final String MISSING_MANDATORY_PARAMETER_FOR_SAS = "Missing mandatory parameters for valid Shared Access Signature."; + public static final String MISSING_MD5 = "ContentMD5 header is missing in the response."; + public static final String MISSING_NULLARY_CONSTRUCTOR = "Class type must contain contain a nullary constructor."; + public static final String MULTIPLE_CREDENTIALS_PROVIDED = "Cannot provide credentials as part of the address and as constructor parameter. Either pass in the address or use a different constructor."; + public static final String PARAMETER_NOT_IN_RANGE = "The value of the parameter '%s' should be between %s and %s."; + public static final String PARAMETER_SHOULD_BE_GREATER = "The value of the parameter '%s' should be greater than %s."; + public static final String PARAMETER_SHOULD_BE_GREATER_OR_EQUAL = "The value of the parameter '%s' should be greater than or equal to %s."; + public static final String PATH_STYLE_URI_MISSING_ACCOUNT_INFORMATION = "Missing account name information inside path style URI. Path style URIs should be of the form http:///"; + public static final String PRIMARY_ONLY_COMMAND = "This operation can only be executed against the primary storage location."; + public static final String PROPERTY_CANNOT_BE_SERIALIZED_AS_GIVEN_EDMTYPE = "Property %s with Edm Type %s cannot be de-serialized."; + public static final String PRECONDITION_FAILURE_IGNORED = "Pre-condition failure on a retry is being ignored since the request should have succeeded in the first attempt."; + public static final String RELATIVE_ADDRESS_NOT_PERMITTED = "Address %s is a relative address. Only absolute addresses are permitted."; + public static final String RESOURCE_NAME_EMPTY = "Invalid %s name. The name may not be null, empty, or whitespace only."; + public static final String RESPONSE_RECEIVED_IS_INVALID = "The response received is invalid or improperly formatted."; + public static final String SCHEME_NULL_OR_EMPTY = "The protocol to use is null. Please specify whether to use http or https."; + public static final String SECONDARY_ONLY_COMMAND = "This operation can only be executed against the secondary storage location."; + public static final String SNAPSHOT_LISTING_ERROR = "Listing snapshots is only supported in flat mode (no delimiter). Consider setting useFlatBlobListing to true."; + public static final String SNAPSHOT_QUERY_OPTION_ALREADY_DEFINED = "Snapshot query parameter is already defined in the blob URI. Either pass in a snapshotTime parameter or use a full URL with a snapshot query parameter."; + public static final String STORAGE_CREDENTIALS_NULL_OR_ANONYMOUS = "StorageCredentials cannot be null or anonymous for this service."; + public static final String STORAGE_CLIENT_OR_SAS_REQUIRED = "Either a SAS token or a service client must be specified."; + public static final String STORAGE_URI_MISSING_LOCATION = "The URI for the target storage location is not specified. Please consider changing the request's location mode."; + public static final String STORAGE_URI_MUST_MATCH = "Primary and secondary location URIs in a StorageUri must point to the same resource."; + public static final String STORAGE_URI_NOT_NULL = "Primary and secondary location URIs in a StorageUri must not both be null."; + public static final String STREAM_CLOSED = "Stream is already closed."; + public static final String STREAM_SKIP_FAILED = "The supplied stream has failed to skip to the correct position after successive attempts. Please ensure there are bytes available and try your upload again."; + public static final String STREAM_LENGTH_GREATER_THAN_4MB = "Invalid stream length, length must be less than or equal to 4 MB in size."; + public static final String STREAM_LENGTH_GREATER_THAN_100MB = "Invalid stream length, length must be less than or equal to 100 MB in size."; + public static final String STREAM_LENGTH_NEGATIVE = "Invalid stream length, specify -1 for unknown length stream, or a positive number of bytes."; + public static final String STRING_NOT_VALID = "The String is not a valid Base64-encoded string."; + public static final String TAKE_COUNT_ZERO_OR_NEGATIVE = "Take count must be positive and greater than 0."; + public static final String TOO_MANY_PATH_SEGMENTS = "The count of URL path segments (strings between '/' characters) as part of the blob name cannot exceed 254."; + public static final String TOO_MANY_SHARED_ACCESS_POLICY_IDENTIFIERS = "Too many %d shared access policy identifiers provided. Server does not support setting more than %d on a single container, queue, or table."; + public static final String TOO_MANY_SHARED_ACCESS_POLICY_IDS = "Too many %d shared access policy identifiers provided. Server does not support setting more than %d on a single container."; + public static final String UNEXPECTED_CONTINUATION_TYPE = "The continuation type passed in is unexpected. Please verify that the correct continuation type is passed in. Expected {%s}, found {%s}."; + public static final String UNEXPECTED_FIELD_NAME = "Unexpected field name. Expected: '%s'. Actual: '%s'."; + public static final String UNEXPECTED_STATUS_CODE_RECEIVED = "Unexpected http status code received."; + public static final String UNEXPECTED_STREAM_READ_ERROR = "Unexpected error. Stream returned unexpected number of bytes."; +} \ No newline at end of file diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ServiceSASSignatureValues.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ServiceSASSignatureValues.java new file mode 100644 index 0000000000000..5be52477ea1bc --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ServiceSASSignatureValues.java @@ -0,0 +1,366 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import java.security.InvalidKeyException; +import java.time.OffsetDateTime; + +/** + * ServiceSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage service. Once + * all the values here are set appropriately, call generateSASQueryParameters to obtain a representation of the SAS + * which can actually be applied to blob urls. Note: that both this class and {@link SASQueryParameters} exist because + * the former is mutable and a logical representation while the latter is immutable and used to generate actual REST + * requests. + *

    + * Please see here + * for more conceptual information on SAS. + *

    + * Please see here for + * more details on each value, including which are required. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_sas "Sample code for ServiceSASSignatureValues")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ +public final class ServiceSASSignatureValues { + + private String version = Constants.HeaderConstants.TARGET_STORAGE_VERSION; + + private SASProtocol protocol; + + private OffsetDateTime startTime; + + private OffsetDateTime expiryTime; + + private String permissions; + + private IPRange ipRange; + + private String containerName; + + private String blobName; + + private String identifier; + + private String cacheControl; + + private String contentDisposition; + + private String contentEncoding; + + private String contentLanguage; + + private String contentType; + + /** + * Creates an object with empty values for all fields. + */ + public ServiceSASSignatureValues() { + } + + /** + * The version of the service this SAS will target. If not specified, it will default to the version targeted by the + * library. + */ + public String version() { + return version; + } + + /** + * The version of the service this SAS will target. If not specified, it will default to the version targeted by the + * library. + */ + public ServiceSASSignatureValues withVersion(String version) { + this.version = version; + return this; + } + + /** + * {@link SASProtocol} + */ + public SASProtocol protocol() { + return protocol; + } + + /** + * {@link SASProtocol} + */ + public ServiceSASSignatureValues withProtocol(SASProtocol protocol) { + this.protocol = protocol; + return this; + } + + /** + * When the SAS will take effect. + */ + public OffsetDateTime startTime() { + return startTime; + } + + /** + * When the SAS will take effect. + */ + public ServiceSASSignatureValues withStartTime(OffsetDateTime startTime) { + this.startTime = startTime; + return this; + } + + /** + * The time after which the SAS will no longer work. + */ + public OffsetDateTime expiryTime() { + return expiryTime; + } + + /** + * The time after which the SAS will no longer work. + */ + public ServiceSASSignatureValues withExpiryTime(OffsetDateTime expiryTime) { + this.expiryTime = expiryTime; + return this; + } + + /** + * Please refer to either {@link ContainerSASPermission} or {@link BlobSASPermission} depending on the resource + * being accessed for help constructing the permissions string. + */ + public String permissions() { + return permissions; + } + + /** + * Please refer to either {@link ContainerSASPermission} or {@link BlobSASPermission} depending on the resource + * being accessed for help constructing the permissions string. + */ + public ServiceSASSignatureValues withPermissions(String permissions) { + this.permissions = permissions; + return this; + } + + /** + * {@link IPRange} + */ + public IPRange ipRange() { + return ipRange; + } + + /** + * {@link IPRange} + */ + public ServiceSASSignatureValues withIpRange(IPRange ipRange) { + this.ipRange = ipRange; + return this; + } + + /** + * The name of the container the SAS user may access. + */ + public String containerName() { + return containerName; + } + + /** + * The name of the container the SAS user may access. + */ + public ServiceSASSignatureValues withContainerName(String containerName) { + this.containerName = containerName; + return this; + } + + /** + * The name of the container the SAS user may access. + */ + public String blobName() { + return blobName; + } + + /** + * The name of the container the SAS user may access. + */ + public ServiceSASSignatureValues withBlobName(String blobName) { + this.blobName = blobName; + return this; + } + + /** + * The name of the access policy on the container this SAS references if any. Please see + * here + * for more information. + */ + public String identifier() { + return identifier; + } + + /** + * The name of the access policy on the container this SAS references if any. Please see + * here + * for more information. + */ + public ServiceSASSignatureValues withIdentifier(String identifier) { + this.identifier = identifier; + return this; + } + + /** + * The cache-control header for the SAS. + */ + public String cacheControl() { + return cacheControl; + } + + /** + * The cache-control header for the SAS. + */ + public ServiceSASSignatureValues withCacheControl(String cacheControl) { + this.cacheControl = cacheControl; + return this; + } + + /** + * The content-disposition header for the SAS. + */ + public String contentDisposition() { + return contentDisposition; + } + + /** + * The content-disposition header for the SAS. + */ + public ServiceSASSignatureValues withContentDisposition(String contentDisposition) { + this.contentDisposition = contentDisposition; + return this; + } + + /** + * The content-encoding header for the SAS. + */ + public String contentEncoding() { + return contentEncoding; + } + + /** + * The content-encoding header for the SAS. + */ + public ServiceSASSignatureValues withContentEncoding(String contentEncoding) { + this.contentEncoding = contentEncoding; + return this; + } + + /** + * The content-language header for the SAS. + */ + public String contentLanguage() { + return contentLanguage; + } + + /** + * The content-language header for the SAS. + */ + public ServiceSASSignatureValues withContentLanguage(String contentLanguage) { + this.contentLanguage = contentLanguage; + return this; + } + + /** + * The content-type header for the SAS. + */ + public String contentType() { + return contentType; + } + + /** + * The content-type header for the SAS. + */ + public ServiceSASSignatureValues withContentType(String contentType) { + this.contentType = contentType; + return this; + } + + /** + * Uses an account's shared key credential to sign these signature values to produce the proper SAS query + * parameters. + * + * @param sharedKeyCredentials + * A {@link SharedKeyCredentials} object used to sign the SAS values. + * + * @return {@link SASQueryParameters} + */ + public SASQueryParameters generateSASQueryParameters(SharedKeyCredentials sharedKeyCredentials) { + Utility.assertNotNull("sharedKeyCredentials", sharedKeyCredentials); + Utility.assertNotNull("version", this.version); + Utility.assertNotNull("containerName", this.containerName); + + String resource = "c"; + String verifiedPermissions = null; + // Calling parse and toString guarantees the proper ordering and throws on invalid characters. + if (Utility.isNullOrEmpty(this.blobName)) { + if (this.permissions != null) { + verifiedPermissions = ContainerSASPermission.parse(this.permissions).toString(); + } + } else { + if (this.permissions != null) { + verifiedPermissions = BlobSASPermission.parse(this.permissions).toString(); + } + resource = "b"; + } + + // Signature is generated on the un-url-encoded values. + final String stringToSign = stringToSign(verifiedPermissions, sharedKeyCredentials); + + String signature = null; + try { + signature = sharedKeyCredentials.computeHmac256(stringToSign); + } catch (InvalidKeyException e) { + throw new Error(e); // The key should have been validated by now. If it is no longer valid here, we fail. + } + + return new SASQueryParameters(this.version, null, null, + this.protocol, this.startTime, this.expiryTime, this.ipRange, this.identifier, resource, + this.permissions, signature, this.cacheControl, this.contentDisposition, this.contentEncoding, + this.contentLanguage, this.contentType); + } + + private String getCanonicalName(String accountName) { + // Container: "/blob/account/containername" + // Blob: "/blob/account/containername/blobname" + StringBuilder canonicalName = new StringBuilder("/blob"); + canonicalName.append('/').append(accountName).append('/').append(this.containerName); + + if (!Utility.isNullOrEmpty(this.blobName)) { + canonicalName.append("/").append(this.blobName); + } + + return canonicalName.toString(); + } + + private String stringToSign(final String verifiedPermissions, + final SharedKeyCredentials sharedKeyCredentials) { + return String.join("\n", + verifiedPermissions == null ? "" : verifiedPermissions, + this.startTime == null ? "" : Utility.ISO8601UTCDateFormatter.format(this.startTime), + this.expiryTime == null ? "" : Utility.ISO8601UTCDateFormatter.format(this.expiryTime), + getCanonicalName(sharedKeyCredentials.getAccountName()), + this.identifier == null ? "" : this.identifier, + this.ipRange == null ? IPRange.DEFAULT.toString() : this.ipRange.toString(), + this.protocol == null ? "" : protocol.toString(), + this.version, + this.cacheControl == null ? "" : this.cacheControl, + this.contentDisposition == null ? "" : this.contentDisposition, + this.contentEncoding == null ? "" : this.contentEncoding, + this.contentLanguage == null ? "" : this.contentLanguage, + this.contentType == null ? "" : this.contentType + ); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ServiceURL.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ServiceURL.java new file mode 100644 index 0000000000000..4646013cad4be --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/ServiceURL.java @@ -0,0 +1,307 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import com.microsoft.azure.storage.blob.models.*; +import com.microsoft.rest.v2.Context; +import com.microsoft.rest.v2.http.HttpPipeline; +import io.reactivex.Single; + +import java.net.MalformedURLException; +import java.net.URL; + +import static com.microsoft.azure.storage.blob.Utility.addErrorWrappingToSingle; + +/** + * Represents a URL to a blob service. This class does not hold any state about a particular storage account but is + * instead a convenient way of sending off appropriate requests to the resource on the service. + * It may also be used to construct URLs to blobs and containers. + * Please see here for more + * information on containers. + */ +public final class ServiceURL extends StorageURL { + + /** + * Creates a {@code ServiceURL} object pointing to the account specified by the URL and using the provided pipeline + * to make HTTP requests. + * + * @param url + * A url to an Azure Storage account. + * @param pipeline + * A {@code HttpPipeline} which configures the behavior of HTTP exchanges. Please refer to + * {@link StorageURL#createPipeline(ICredentials, PipelineOptions)} for more information. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_url "Sample code for ServiceURL constructor")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public ServiceURL(URL url, HttpPipeline pipeline) { + super(url, pipeline); + } + + public ContainerURL createContainerURL(String containerName) { + try { + return new ContainerURL(StorageURL.appendToURLPath(new URL(super.storageClient.url()), containerName), + super.storageClient.httpPipeline()); + } catch (MalformedURLException e) { + throw new RuntimeException(e); + } + } + + /** + * Creates a new {@link ServiceURL} with the given pipeline. + * + * @param pipeline + * An {@link HttpPipeline} object to set. + * + * @return A {@link ServiceURL} object with the given pipeline. + */ + public ServiceURL withPipeline(HttpPipeline pipeline) { + try { + return new ServiceURL(new URL(super.storageClient.url()), pipeline); + } catch (MalformedURLException e) { + throw new RuntimeException(e); + } + } + + /** + * Returns a single segment of containers starting from the specified Marker. + * Use an empty marker to start enumeration from the beginning. Container names are returned in lexicographic order. + * After getting a segment, process it, and then call ListContainers again (passing the the previously-returned + * Marker) to get the next segment. For more information, see + * the Azure Docs. + * + * @param marker + * Identifies the portion of the list to be returned with the next list operation. + * This value is returned in the response of a previous list operation as the + * ListContainersSegmentResponse.body().nextMarker(). Set to null to list the first segment. + * @param options + * A {@link ListContainersOptions} which specifies what data should be returned by the service. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_list "Sample code for ServiceURL.listContainersSegment")] \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_list_helper "Helper code for ServiceURL.listContainersSegment")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single listContainersSegment(String marker, + ListContainersOptions options) { + return this.listContainersSegment(marker, options, null); + } + + /** + * Returns a single segment of containers starting from the specified Marker. + * Use an empty marker to start enumeration from the beginning. Container names are returned in lexicographic order. + * After getting a segment, process it, and then call ListContainers again (passing the the previously-returned + * Marker) to get the next segment. For more information, see + * the Azure Docs. + * + * @param marker + * Identifies the portion of the list to be returned with the next list operation. + * This value is returned in the response of a previous list operation as the + * ListContainersSegmentResponse.body().nextMarker(). Set to null to list the first segment. + * @param options + * A {@link ListContainersOptions} which specifies what data should be returned by the service. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_list "Sample code for ServiceURL.listContainersSegment")] \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_list_helper "Helper code for ServiceURL.listContainersSegment")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single listContainersSegment(String marker, + ListContainersOptions options, Context context) { + options = options == null ? ListContainersOptions.DEFAULT : options; + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle( + this.storageClient.generatedServices().listContainersSegmentWithRestResponseAsync(context, + options.prefix(), marker, options.maxResults(), options.details().toIncludeType(), null, null)); + } + + /** + * Gets the properties of a storage account’s Blob service. For more information, see the + * Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_getsetprops "Sample code for ServiceURL.getProperties")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single getProperties() { + return this.getProperties(null); + } + + /** + * Gets the properties of a storage account’s Blob service. For more information, see the + * Azure Docs. + * + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_getsetprops "Sample code for ServiceURL.getProperties")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single getProperties(Context context) { + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle( + this.storageClient.generatedServices().getPropertiesWithRestResponseAsync(context, null, null)); + } + + /** + * Sets properties for a storage account's Blob service endpoint. For more information, see the + * Azure Docs. + * Note that setting the default service version has no effect when using this client because this client explicitly + * sets the version header on each request, overriding the default. + * + * @param properties + * Configures the service. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_getsetprops "Sample code for ServiceURL.setProperties")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single setProperties(StorageServiceProperties properties) { + return this.setProperties(properties, null); + } + + /** + * Sets properties for a storage account's Blob service endpoint. For more information, see the + * Azure Docs. + * Note that setting the default service version has no effect when using this client because this client explicitly + * sets the version header on each request, overriding the default. + * + * @param properties + * Configures the service. + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_getsetprops "Sample code for ServiceURL.setProperties")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single setProperties(StorageServiceProperties properties, Context context) { + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle( + this.storageClient.generatedServices().setPropertiesWithRestResponseAsync(context, properties, null, + null)); + } + + /** + * Retrieves statistics related to replication for the Blob service. It is only available on the secondary + * location endpoint when read-access geo-redundant replication is enabled for the storage account. For more + * information, see the + * Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_stats "Sample code for ServiceURL.getStats")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single getStatistics() { + return this.getStatistics(null); + } + + /** + * Retrieves statistics related to replication for the Blob service. It is only available on the secondary + * location endpoint when read-access geo-redundant replication is enabled for the storage account. For more + * information, see the + * Azure Docs. + * + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_stats "Sample code for ServiceURL.getStats")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single getStatistics(Context context) { + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle( + this.storageClient.generatedServices().getStatisticsWithRestResponseAsync(context, null, null)); + } + + /** + * Returns the sku name and account kind for the account. For more information, please see the + * Azure Docs. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=account_info "Sample code for ServiceURL.getAccountInfo")] \n + * For more samples, please see the [Samples file] (https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single getAccountInfo() { + return this.getAccountInfo(null); + } + + /** + * Returns the sku name and account kind for the account. For more information, please see the + * Azure Docs. + * + * @param context + * {@code Context} offers a means of passing arbitrary data (key/value pairs) to an + * {@link com.microsoft.rest.v2.http.HttpPipeline}'s policy objects. Most applications do not need to pass + * arbitrary data to the pipeline and can pass {@code Context.NONE} or {@code null}. Each context object is + * immutable. The {@code withContext} with data method creates a new {@code Context} object that refers to + * its parent, forming a linked list. + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=account_info "Sample code for ServiceURL.getAccountInfo")] \n + * For more samples, please see the [Samples file] (https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public Single getAccountInfo(Context context) { + context = context == null ? Context.NONE : context; + + return addErrorWrappingToSingle( + this.storageClient.generatedServices().getAccountInfoWithRestResponseAsync(context)); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/SetResponseFieldFactory.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/SetResponseFieldFactory.java new file mode 100644 index 0000000000000..167f98d4d497f --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/SetResponseFieldFactory.java @@ -0,0 +1,60 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.microsoft.azure.storage.blob; + +import com.microsoft.rest.v2.http.HttpPipeline; +import com.microsoft.rest.v2.http.HttpRequest; +import com.microsoft.rest.v2.http.HttpResponse; +import com.microsoft.rest.v2.policy.RequestPolicy; +import com.microsoft.rest.v2.policy.RequestPolicyFactory; +import com.microsoft.rest.v2.policy.RequestPolicyOptions; +import io.reactivex.Single; + +/** + * This is a factory which creates policies in an {@link HttpPipeline} for setting the request property on the response + * object. This is necessary because of a bug in autorest which fails to set this property. In most cases, it is + * sufficient to allow the default pipeline to add this factory automatically and assume that it works. The factory and + * policy must only be used directly when creating a custom pipeline. + */ +final class SetResponseFieldFactory implements RequestPolicyFactory { + + @Override + public RequestPolicy create(RequestPolicy next, RequestPolicyOptions options) { + return new SetResponseFieldPolicy(next); + } + + private final class SetResponseFieldPolicy implements RequestPolicy { + private final RequestPolicy nextPolicy; + + private SetResponseFieldPolicy(RequestPolicy nextPolicy) { + this.nextPolicy = nextPolicy; + } + + /** + * Add the unique client request ID to the request. + * + * @param request + * the request to populate with the client request ID + * + * @return A {@link Single} representing the {@link HttpResponse} that will arrive asynchronously. + */ + public Single sendAsync(HttpRequest request) { + return nextPolicy.sendAsync(request) + .map(response -> + response.withRequest(request)); + } + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/SharedKeyCredentials.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/SharedKeyCredentials.java new file mode 100644 index 0000000000000..65f1306432295 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/SharedKeyCredentials.java @@ -0,0 +1,286 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import com.microsoft.rest.v2.http.*; +import com.microsoft.rest.v2.policy.RequestPolicy; +import com.microsoft.rest.v2.policy.RequestPolicyOptions; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.QueryStringDecoder; +import io.reactivex.Single; + +import javax.crypto.Mac; +import javax.crypto.spec.SecretKeySpec; +import java.io.UnsupportedEncodingException; +import java.net.URL; +import java.security.InvalidKeyException; +import java.security.NoSuchAlgorithmException; +import java.time.OffsetDateTime; +import java.util.*; + +/** + * SharedKeyCredentials are a means of signing and authenticating storage requests. The key can be obtained from the + * Azure portal. This factory will create policies which take care of all the details of creating strings to sign, + * signing them, and setting the Authentication header. While this is a common way of authenticating with the service, + * recommended practice is using {@link TokenCredentials}. Pass this as the credentials in the construction of a new + * {@link HttpPipeline} via the {@link StorageURL} type. + */ +public final class SharedKeyCredentials implements ICredentials { + + private final String accountName; + + private final byte[] accountKey; + + /** + * Initializes a new instance of SharedKeyCredentials contains an account's name and its primary or secondary + * accountKey. + * + * @param accountName + * The account name associated with the request. + * @param accountKey + * The account access key used to authenticate the request. + * + * @throws InvalidKeyException + * Thrown when the accountKey is ill-formatted. + */ + public SharedKeyCredentials(String accountName, String accountKey) throws InvalidKeyException { + this.accountName = accountName; + this.accountKey = Base64.getDecoder().decode(accountKey); + } + + /** + * Gets the account name associated with the request. + * + * @return The account name. + */ + public String getAccountName() { + return accountName; + } + + @Override + public RequestPolicy create(RequestPolicy nextRequestPolicy, RequestPolicyOptions options) { + return new SharedKeyCredentialsPolicy(this, nextRequestPolicy, options); + } + + /** + * Constructs a canonicalized string for signing a request. + * + * @param request + * The request to canonicalize. + * + * @return A canonicalized string. + */ + private String buildStringToSign(final HttpRequest request) { + final HttpHeaders httpHeaders = request.headers(); + String contentLength = getStandardHeaderValue(httpHeaders, Constants.HeaderConstants.CONTENT_LENGTH); + contentLength = contentLength.equals("0") ? Constants.EMPTY_STRING : contentLength; + + return String.join("\n", + request.httpMethod().toString(), + getStandardHeaderValue(httpHeaders, Constants.HeaderConstants.CONTENT_ENCODING), + getStandardHeaderValue(httpHeaders, Constants.HeaderConstants.CONTENT_LANGUAGE), + contentLength, + getStandardHeaderValue(httpHeaders, Constants.HeaderConstants.CONTENT_MD5), + getStandardHeaderValue(httpHeaders, Constants.HeaderConstants.CONTENT_TYPE), + // x-ms-date header exists, so don't sign date header + Constants.EMPTY_STRING, + getStandardHeaderValue(httpHeaders, Constants.HeaderConstants.IF_MODIFIED_SINCE), + getStandardHeaderValue(httpHeaders, Constants.HeaderConstants.IF_MATCH), + getStandardHeaderValue(httpHeaders, Constants.HeaderConstants.IF_NONE_MATCH), + getStandardHeaderValue(httpHeaders, Constants.HeaderConstants.IF_UNMODIFIED_SINCE), + getStandardHeaderValue(httpHeaders, Constants.HeaderConstants.RANGE), + getAdditionalXmsHeaders(httpHeaders), + getCanonicalizedResource(request.url()) + ); + } + + private void appendCanonicalizedElement(final StringBuilder builder, final String element) { + builder.append("\n"); + builder.append(element); + } + + private String getAdditionalXmsHeaders(final HttpHeaders headers) { + // Add only headers that begin with 'x-ms-' + final ArrayList xmsHeaderNameArray = new ArrayList<>(); + for (HttpHeader header : headers) { + String lowerCaseHeader = header.name().toLowerCase(Locale.ROOT); + if (lowerCaseHeader.startsWith(Constants.PREFIX_FOR_STORAGE_HEADER)) { + xmsHeaderNameArray.add(lowerCaseHeader); + } + } + + if (xmsHeaderNameArray.isEmpty()) { + return Constants.EMPTY_STRING; + } + + Collections.sort(xmsHeaderNameArray); + + final StringBuilder canonicalizedHeaders = new StringBuilder(); + for (final String key : xmsHeaderNameArray) { + if (canonicalizedHeaders.length() > 0) { + canonicalizedHeaders.append('\n'); + } + + canonicalizedHeaders.append(key); + canonicalizedHeaders.append(':'); + canonicalizedHeaders.append(headers.value(key)); + } + + return canonicalizedHeaders.toString(); + } + + /** + * Canonicalized the resource to sign. + * + * @param requestURL + * A {@code java.net.URL} of the request. + * + * @return The canonicalized resource to sign. + */ + private String getCanonicalizedResource(URL requestURL) { + + // Resource path + final StringBuilder canonicalizedResource = new StringBuilder("/"); + canonicalizedResource.append(this.accountName); + + // Note that AbsolutePath starts with a '/'. + if (requestURL.getPath().length() > 0) { + canonicalizedResource.append(requestURL.getPath()); + } else { + canonicalizedResource.append('/'); + } + + // check for no query params and return + if (requestURL.getQuery() == null) { + return canonicalizedResource.toString(); + } + + // The URL object's query field doesn't include the '?'. The QueryStringDecoder expects it. + QueryStringDecoder queryDecoder = new QueryStringDecoder("?" + requestURL.getQuery()); + Map> queryParams = queryDecoder.parameters(); + + ArrayList queryParamNames = new ArrayList<>(queryParams.keySet()); + Collections.sort(queryParamNames); + + for (String queryParamName : queryParamNames) { + final List queryParamValues = queryParams.get(queryParamName); + Collections.sort(queryParamValues); + String queryParamValuesStr = String.join(",", queryParamValues.toArray(new String[]{})); + canonicalizedResource.append("\n").append(queryParamName.toLowerCase(Locale.ROOT)).append(":") + .append(queryParamValuesStr); + } + + // append to main string builder the join of completed params with new line + return canonicalizedResource.toString(); + } + + /** + * Returns the standard header value from the specified connection request, or an empty string if no header value + * has been specified for the request. + * + * @param httpHeaders + * A {@code HttpHeaders} object that represents the headers for the request. + * @param headerName + * A {@code String} that represents the name of the header being requested. + * + * @return A {@code String} that represents the header value, or {@code null} if there is no corresponding + * header value for {@code headerName}. + */ + private String getStandardHeaderValue(final HttpHeaders httpHeaders, final String headerName) { + final String headerValue = httpHeaders.value(headerName); + + return headerValue == null ? Constants.EMPTY_STRING : headerValue; + } + + /** + * Computes a signature for the specified string using the HMAC-SHA256 algorithm. + * Package-private because it is used to generate SAS signatures. + * + * @param stringToSign + * The UTF-8-encoded string to sign. + * + * @return A {@code String} that contains the HMAC-SHA256-encoded signature. + * + * @throws InvalidKeyException + * If the accountKey is not a valid Base64-encoded string. + */ + String computeHmac256(final String stringToSign) throws InvalidKeyException { + try { + /* + We must get a new instance of the Mac calculator for each signature calculated because the instances are + not threadsafe and there is some suggestion online that they may not even be safe for reuse, so we use a + new one each time to be sure. + */ + Mac hmacSha256 = Mac.getInstance("HmacSHA256"); + hmacSha256.init(new SecretKeySpec(this.accountKey, "HmacSHA256")); + byte[] utf8Bytes = stringToSign.getBytes(Constants.UTF8_CHARSET); + return Base64.getEncoder().encodeToString(hmacSha256.doFinal(utf8Bytes)); + } catch (final UnsupportedEncodingException | NoSuchAlgorithmException e) { + throw new Error(e); + } + } + + private final class SharedKeyCredentialsPolicy implements RequestPolicy { + + private final SharedKeyCredentials factory; + + private final RequestPolicy nextPolicy; + + private final RequestPolicyOptions options; + + SharedKeyCredentialsPolicy(SharedKeyCredentials factory, RequestPolicy nextPolicy, + RequestPolicyOptions options) { + this.factory = factory; + this.nextPolicy = nextPolicy; + this.options = options; + } + + /** + * Sign the request. + * + * @param request + * The request to sign. + * + * @return A {@link Single} representing the HTTP response that will arrive asynchronously. + */ + @Override + public Single sendAsync(final HttpRequest request) { + if (request.headers().value(Constants.HeaderConstants.DATE) == null) { + request.headers().set(Constants.HeaderConstants.DATE, + Utility.RFC1123GMTDateFormatter.format(OffsetDateTime.now())); + } + final String stringToSign = this.factory.buildStringToSign(request); + try { + final String computedBase64Signature = this.factory.computeHmac256(stringToSign); + request.headers().set(Constants.HeaderConstants.AUTHORIZATION, + "SharedKey " + this.factory.accountName + ":" + computedBase64Signature); + } catch (Exception e) { + return Single.error(e); + } + + Single response = nextPolicy.sendAsync(request); + return response.doOnSuccess(response1 -> { + if (response1.statusCode() == HttpResponseStatus.FORBIDDEN.code()) { + if (options.shouldLog(HttpPipelineLogLevel.ERROR)) { + options.log(HttpPipelineLogLevel.ERROR, + "===== HTTP Forbidden status, String-to-Sign:%n'%s'%n==================%n", + stringToSign); + } + } + }); + } + } +} + diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/StorageException.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/StorageException.java new file mode 100644 index 0000000000000..5dc55868ded70 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/StorageException.java @@ -0,0 +1,68 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.microsoft.azure.storage.blob; + +import com.microsoft.azure.storage.blob.models.StorageErrorCode; +import com.microsoft.azure.storage.blob.models.StorageErrorException; +import com.microsoft.rest.v2.RestException; + +/** + * A {@code StorageException} is thrown whenever Azure Storage successfully returns an error code that is not 200-level. + * Users can inspect the status code and error code to determine the cause of the error response. The exception message + * may also contain more detailed information depending on the type of error. The user may also inspect the raw HTTP + * response or call toString to get the full payload of the error response if present. + * Note that even some expected "errors" will be thrown as a {@code StorageException}. For example, some users may + * perform a getProperties request on an entity to determine whether it exists or not. If it does not exists, an + * exception will be thrown even though this may be considered an expected indication of absence in this case. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=exception "Sample code for StorageExceptions")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ +public final class StorageException extends RestException { + + private final String message; + + StorageException(StorageErrorException e) { + super(e.getMessage(), e.response(), e); + if (e.body() != null) { + this.message = e.body().message(); + } else { + this.message = null; + } + } + + /** + * @return The error code returned by the service. + */ + public StorageErrorCode errorCode() { + return StorageErrorCode.fromString(super.response().headers().value(Constants.HeaderConstants.ERROR_CODE)); + } + + /** + * @return The message returned by the service. + */ + public String message() { + return this.message; + } + + /** + * @return The status code on the response. + */ + public int statusCode() { + return super.response().statusCode(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/StorageURL.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/StorageURL.java new file mode 100644 index 0000000000000..d4c844779c48b --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/StorageURL.java @@ -0,0 +1,186 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import com.microsoft.azure.storage.GeneratedStorageClient; +import com.microsoft.rest.v2.http.HttpPipeline; +import com.microsoft.rest.v2.http.HttpPipelineOptions; +import com.microsoft.rest.v2.http.UrlBuilder; +import com.microsoft.rest.v2.policy.DecodingPolicyFactory; +import com.microsoft.rest.v2.policy.RequestPolicyFactory; + +import java.net.MalformedURLException; +import java.net.URL; +import java.util.ArrayList; + +/** + * Represents a URL to a Azure storage object. Typically this class is only needed to generate a new pipeline. In most + * cases, one of the other URL types will be more useful. + */ +public abstract class StorageURL { + + protected final GeneratedStorageClient storageClient; + + protected StorageURL(URL url, HttpPipeline pipeline) { + if (url == null) { + throw new IllegalArgumentException("url cannot be null."); + } + if (pipeline == null) { + throw new IllegalArgumentException("Pipeline cannot be null. Create a pipeline by calling" + + " StorageURL.createPipeline."); + } + + this.storageClient = new GeneratedStorageClient(pipeline) + .withVersion(Constants.HeaderConstants.TARGET_STORAGE_VERSION); + this.storageClient.withUrl(url.toString()); + } + + /** + * Appends a string to the end of a URL's path (prefixing the string with a '/' if required). + * + * @param baseURL + * The url to which the name should be appended. + * @param name + * The name to be appended. + * + * @return A url with the name appended. + * + * @throws MalformedURLException + * Appending the specified name produced an invalid URL. + */ + protected static URL appendToURLPath(URL baseURL, String name) throws MalformedURLException { + UrlBuilder url = UrlBuilder.parse(baseURL.toString()); + if (url.path() == null) { + url.withPath("/"); // .path() will return null if it is empty, so we have to process separately from below. + } else if (url.path().charAt(url.path().length() - 1) != '/') { + url.withPath(url.path() + '/'); + } + url.withPath(url.path() + name); + return new URL(url.toString()); + } + + /** + * Creates an pipeline to process the HTTP requests and Responses. + * + * @apiNote + * ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_url "Sample code for StorageURL.createPipeline")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/New-Storage-SDK-V10-Preview/src/test/java/com/microsoft/azure/storage/Samples.java) + * + * @return The pipeline. + */ + public static HttpPipeline createPipeline() { + return createPipeline(new AnonymousCredentials(), new PipelineOptions()); + } + + /** + * Creates an pipeline to process the HTTP requests and Responses. + * + * @apiNote + * ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_url "Sample code for StorageURL.createPipeline")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/New-Storage-SDK-V10-Preview/src/test/java/com/microsoft/azure/storage/Samples.java) + * + * @param credentials + * The credentials the pipeline will use to authenticate the requests. + * + * @return The pipeline. + */ + public static HttpPipeline createPipeline(ICredentials credentials) { + return createPipeline(credentials, new PipelineOptions()); + } + + /** + * Creates an pipeline to process the HTTP requests and Responses. + * + * @apiNote + * ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_url "Sample code for StorageURL.createPipeline")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/New-Storage-SDK-V10-Preview/src/test/java/com/microsoft/azure/storage/Samples.java) + * + * @param pipelineOptions + * Configurations for each policy in the pipeline. + * @return The pipeline. + */ + public static HttpPipeline createPipeline(PipelineOptions pipelineOptions) { + return createPipeline(new AnonymousCredentials(), pipelineOptions); + } + + /** + * Creates an pipeline to process the HTTP requests and Responses. + * + * @apiNote + * ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_url "Sample code for StorageURL.createPipeline")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/New-Storage-SDK-V10-Preview/src/test/java/com/microsoft/azure/storage/Samples.java) + * + * @param credentials + * The credentials the pipeline will use to authenticate the requests. + * @param pipelineOptions + * Configurations for each policy in the pipeline. + * + * @return The pipeline. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=service_url "Sample code for StorageURL.createPipeline")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public static HttpPipeline createPipeline(ICredentials credentials, PipelineOptions pipelineOptions) { + /* + PipelineOptions is mutable, but its fields refer to immutable objects. This method can pass the fields to other + methods, but the PipelineOptions object itself can only be used for the duration of this call; it must not be + passed to anything with a longer lifetime. + */ + if (credentials == null) { + throw new IllegalArgumentException( + "Credentials cannot be null. For anonymous access use Anonymous Credentials."); + } + if (pipelineOptions == null) { + throw new IllegalArgumentException("pipelineOptions cannot be null. You must at least specify a client."); + } + + // Closest to API goes first, closest to wire goes last. + ArrayList factories = new ArrayList<>(); + factories.add(new TelemetryFactory(pipelineOptions.telemetryOptions())); + factories.add(new RequestIDFactory()); + factories.add(new RequestRetryFactory(pipelineOptions.requestRetryOptions())); + if (!(credentials instanceof AnonymousCredentials)) { + factories.add(credentials); + } + factories.add(new SetResponseFieldFactory()); + factories.add(new DecodingPolicyFactory()); + factories.add(new LoggingFactory(pipelineOptions.loggingOptions())); + + return HttpPipeline.build(new HttpPipelineOptions().withHttpClient(pipelineOptions.client()) + .withLogger(pipelineOptions.logger()), + factories.toArray(new RequestPolicyFactory[factories.size()])); + } + + @Override + public String toString() { + return this.storageClient.url(); + } + + /** + * @return The underlying url to the resource. + */ + public URL toURL() { + try { + return new URL(this.storageClient.url()); + } catch (MalformedURLException e) { + throw new RuntimeException(e); + } + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/TelemetryFactory.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/TelemetryFactory.java new file mode 100644 index 0000000000000..ad3dbed125dbd --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/TelemetryFactory.java @@ -0,0 +1,73 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import com.microsoft.rest.v2.http.HttpPipeline; +import com.microsoft.rest.v2.http.HttpRequest; +import com.microsoft.rest.v2.http.HttpResponse; +import com.microsoft.rest.v2.policy.RequestPolicy; +import com.microsoft.rest.v2.policy.RequestPolicyFactory; +import com.microsoft.rest.v2.policy.RequestPolicyOptions; +import io.reactivex.Single; + +import java.util.Locale; + +/** + * This is a factory which creates policies in an {@link HttpPipeline} for adding telemetry to a given HTTP request. In + * most cases, it is sufficient to configure a {@link TelemetryOptions} object and set those as a field on a + * {@link PipelineOptions} object to configure a default pipeline. The factory and policy must only be used directly + * when creating a custom pipeline. + */ +public final class TelemetryFactory implements RequestPolicyFactory { + + private final String userAgent; + + /** + * Creates a factory that can create telemetry policy objects which add telemetry information to the outgoing + * HTTP requests. + * + * @param telemetryOptions + * {@link TelemetryOptions} + */ + public TelemetryFactory(TelemetryOptions telemetryOptions) { + telemetryOptions = telemetryOptions == null ? TelemetryOptions.DEFAULT : telemetryOptions; + String userAgentPrefix = telemetryOptions.userAgentPrefix() == null ? + Constants.EMPTY_STRING : telemetryOptions.userAgentPrefix(); + this.userAgent = userAgentPrefix + ' ' + + Constants.HeaderConstants.USER_AGENT_PREFIX + '/' + Constants.HeaderConstants.USER_AGENT_VERSION + + String.format(Locale.ROOT, " (JavaJRE %s; %s %s)", + System.getProperty("java.version"), + System.getProperty("os.name").replaceAll(" ", ""), + System.getProperty("os.version")); + } + + @Override + public RequestPolicy create(RequestPolicy next, RequestPolicyOptions options) { + return new TelemetryPolicy(next, options); + } + + private final class TelemetryPolicy implements RequestPolicy { + private final RequestPolicy nextPolicy; + + private TelemetryPolicy(RequestPolicy nextPolicy, RequestPolicyOptions options) { + this.nextPolicy = nextPolicy; + } + + public Single sendAsync(HttpRequest request) { + request.headers().set(Constants.HeaderConstants.USER_AGENT, userAgent); + return this.nextPolicy.sendAsync(request); + } + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/TelemetryOptions.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/TelemetryOptions.java new file mode 100644 index 0000000000000..0ec208e79bc96 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/TelemetryOptions.java @@ -0,0 +1,41 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +/** + * Options for configuring the {@link TelemetryFactory}. Please refer to the Factory for more information. + */ +public final class TelemetryOptions { + + public static final TelemetryOptions DEFAULT = new TelemetryOptions(Constants.EMPTY_STRING); + + private final String userAgentPrefix; + + /** + * @param userAgentPrefix + * A string prepended to each request's User-Agent and sent to the service. The service records. + * the user-agent in logs for diagnostics and tracking of client requests. + */ + public TelemetryOptions(String userAgentPrefix) { + this.userAgentPrefix = userAgentPrefix; + } + + /** + * @return The user agent prefix. + */ + public String userAgentPrefix() { + return this.userAgentPrefix; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/TokenCredentials.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/TokenCredentials.java new file mode 100644 index 0000000000000..163a633fb38f0 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/TokenCredentials.java @@ -0,0 +1,92 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import com.microsoft.rest.v2.http.HttpRequest; +import com.microsoft.rest.v2.http.HttpResponse; +import com.microsoft.rest.v2.policy.RequestPolicy; +import com.microsoft.rest.v2.policy.RequestPolicyOptions; +import io.reactivex.Single; + +import java.util.concurrent.atomic.AtomicReference; + +/** + * TokenCredentials are a means of authenticating requests to Azure Storage via OAuth user tokens. This is the preferred + * way of authenticating with Azure Storage. + */ +public final class TokenCredentials implements ICredentials { + + /* + This is an atomic reference because it must be thread safe as all parts of the pipeline must be. It however cannot + be final as most factory fields are because in order to actually be useful, the token has to be renewed every few + hours, which requires updating the value here. + */ + private AtomicReference token; + + /** + * Creates a token credential for use with role-based access control (RBAC) access to Azure Storage resources. + * + * @param token + * A {@code String} of the token to use for authentication. + */ + public TokenCredentials(String token) { + this.token = new AtomicReference<>(token); + } + + /** + * Retrieve the value of the token used by this factory. + * + * @return A {@code String} with the token's value. + */ + public String getToken() { + return this.token.get(); + } + + /** + * Update the token to a new value. + * + * @param token + * A {@code String} containing the new token's value. + */ + public void setToken(String token) { + this.token.set(token); + } + + @Override + public RequestPolicy create(RequestPolicy next, RequestPolicyOptions options) { + return new TokenCredentialsPolicy(this, next); + } + + private final class TokenCredentialsPolicy implements RequestPolicy { + + private final TokenCredentials factory; + + private final RequestPolicy nextPolicy; + + private TokenCredentialsPolicy(TokenCredentials factory, RequestPolicy nextPolicy) { + this.factory = factory; + this.nextPolicy = nextPolicy; + } + + public Single sendAsync(HttpRequest request) { + if (!request.url().getProtocol().equals(Constants.HTTPS)) { + throw new Error("Token credentials require a URL using the https protocol scheme"); + } + request.withHeader(Constants.HeaderConstants.AUTHORIZATION, + "Bearer " + this.factory.getToken()); + return this.nextPolicy.sendAsync(request); + } + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/TransferManager.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/TransferManager.java new file mode 100644 index 0000000000000..695a96c9cadf8 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/TransferManager.java @@ -0,0 +1,421 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.microsoft.azure.storage.blob; + +import com.microsoft.azure.storage.blob.models.BlobDownloadHeaders; +import com.microsoft.azure.storage.blob.models.BlockBlobCommitBlockListResponse; +import com.microsoft.azure.storage.blob.models.ModifiedAccessConditions; +import com.microsoft.rest.v2.util.FlowableUtil; +import io.reactivex.Flowable; +import io.reactivex.Observable; +import io.reactivex.Single; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.AsynchronousFileChannel; +import java.util.*; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +import static java.lang.StrictMath.toIntExact; + +/** + * This class contains a collection of methods (and structures associated with those methods) which perform higher-level + * operations. Whereas operations on the URL types guarantee a single REST request and make no assumptions on desired + * behavior, these methods will often compose several requests to provide a convenient way of performing more complex + * operations. Further, we will make our own assumptions and optimizations for common cases that may not be ideal for + * rarer cases. + */ +public final class TransferManager { + + /** + * The default size of a download chunk for download large blobs. + */ + public static final int BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE = 4 * Constants.MB; + + /** + * Uploads the contents of a file to a block blob in parallel, breaking it into block-size chunks if necessary. + * + * @param file + * The file to upload. + * @param blockBlobURL + * Points to the blob to which the data should be uploaded. + * @param blockLength + * If the data must be broken up into blocks, this value determines what size those blocks will be. This + * will affect the total number of service requests made as each REST request uploads exactly one block in + * full. This value will be ignored if the data can be uploaded in a single put-blob operation. Must be + * between 1 and {@link BlockBlobURL#MAX_STAGE_BLOCK_BYTES}. Note as well that + * {@code fileLength/blockLength} must be less than or equal to {@link BlockBlobURL#MAX_BLOCKS}. + * @param options + * {@link TransferManagerUploadToBlockBlobOptions} + * + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=tm_file "Sample code for TransferManager.uploadFileToBlockBlob")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public static Single uploadFileToBlockBlob( + final AsynchronousFileChannel file, final BlockBlobURL blockBlobURL, final int blockLength, + final TransferManagerUploadToBlockBlobOptions options) throws IOException { + Utility.assertNotNull("file", file); + Utility.assertNotNull("blockBlobURL", blockBlobURL); + Utility.assertInBounds("blockLength", blockLength, 1, BlockBlobURL.MAX_STAGE_BLOCK_BYTES); + TransferManagerUploadToBlockBlobOptions optionsReal = options == null ? + TransferManagerUploadToBlockBlobOptions.DEFAULT : options; + + // See ProgressReporter for an explanation on why this lock is necessary and why we use AtomicLong. + AtomicLong totalProgress = new AtomicLong(0); + Lock progressLock = new ReentrantLock(); + + // If the size of the file can fit in a single upload, do it this way. + if (file.size() < BlockBlobURL.MAX_UPLOAD_BLOB_BYTES) { + Flowable data = FlowableUtil.readFile(file); + + data = ProgressReporter.addProgressReporting(data, optionsReal.progressReceiver()); + + return blockBlobURL.upload(data, file.size(), optionsReal.httpHeaders(), + optionsReal.metadata(), optionsReal.accessConditions(), null) + // Transform the specific RestResponse into a CommonRestResponse. + .map(CommonRestResponse::createFromPutBlobResponse); + } + + // Calculate and validate the number of blocks. + int numBlocks = calculateNumBlocks(file.size(), blockLength); + if (numBlocks > BlockBlobURL.MAX_BLOCKS) { + throw new IllegalArgumentException(SR.BLOB_OVER_MAX_BLOCK_LIMIT); + } + + return Observable.range(0, numBlocks) + /* + For each block, make a call to stageBlock as follows. concatMap ensures that the items emitted + by this Observable are in the same sequence as they are begun, which will be important for composing + the list of Ids later. Eager ensures parallelism but may require some internal buffering. + */ + .concatMapEager(i -> { + // The max number of bytes for a block is currently 100MB, so the final result must be an int. + int count = (int) Math.min((long)blockLength, (file.size() - i * (long)blockLength)); + // i * blockLength could be a long, so we need a cast to prevent overflow. + Flowable data = FlowableUtil.readFile(file, i * (long)blockLength, count); + + // Report progress as necessary. + data = ProgressReporter.addParallelProgressReporting(data, optionsReal.progressReceiver(), + progressLock, totalProgress); + + final String blockId = Base64.getEncoder().encodeToString( + UUID.randomUUID().toString().getBytes()); + + /* + Make a call to stageBlock. Instead of emitting the response, which we don't care about other + than that it was successful, emit the blockId for this request. These will be collected below. + Turn that into an Observable which emits one item to comply with the signature of + concatMapEager. + */ + return blockBlobURL.stageBlock(blockId, data, + count, optionsReal.accessConditions().leaseAccessConditions(), null) + .map(x -> blockId).toObservable(); + + /* + Specify the number of concurrent subscribers to this map. This determines how many concurrent + rest calls are made. This is so because maxConcurrency is the number of internal subscribers + available to subscribe to the Observables emitted by the source. A subscriber is not released + for a new subscription until its Observable calls onComplete, which here means that the call to + stageBlock is finished. Prefetch is a hint that each of the Observables emitted by the source + will emit only one value, which is true here because we have converted from a Single. + */ + }, optionsReal.parallelism(), 1) + /* + collectInto will gather each of the emitted blockIds into a list. Because we used concatMap, the Ids + will be emitted according to their block number, which means the list generated here will be + properly ordered. This also converts into a Single. + */ + .collectInto(new ArrayList(numBlocks), ArrayList::add) + /* + collectInto will not emit the list until its source calls onComplete. This means that by the time we + call stageBlock list, all of the stageBlock calls will have finished. By flatMapping the list, we + can "map" it into a call to commitBlockList. + */ + .flatMap(ids -> + blockBlobURL.commitBlockList(ids, optionsReal.httpHeaders(), optionsReal.metadata(), + optionsReal.accessConditions(), null)) + + // Finally, we must turn the specific response type into a CommonRestResponse by mapping. + .map(CommonRestResponse::createFromPutBlockListResponse); + } + + private static int calculateNumBlocks(long dataSize, long blockLength) { + // Can successfully cast to an int because MaxBlockSize is an int, which this expression must be less than. + int numBlocks = toIntExact(dataSize / blockLength); + // Include an extra block for trailing data. + if (dataSize % blockLength != 0) { + numBlocks++; + } + return numBlocks; + } + + /** + * Downloads a file directly into a file, splitting the download into chunks and parallelizing as necessary. + * + * @param file + * The destination file to which the blob will be written. + * @param blobURL + * The URL to the blob to download. + * @param range + * {@link BlobRange} + * @param options + * {@link TransferManagerDownloadFromBlobOptions} + * + * @return A {@code Completable} that will signal when the download is complete. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=tm_file "Sample code for TransferManager.downloadBlobToFile")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public static Single downloadBlobToFile(AsynchronousFileChannel file, BlobURL blobURL, + BlobRange range, TransferManagerDownloadFromBlobOptions options) { + BlobRange rangeReal = range == null ? BlobRange.DEFAULT : range; + TransferManagerDownloadFromBlobOptions optionsReal = options == null ? + TransferManagerDownloadFromBlobOptions.DEFAULT : options; + Utility.assertNotNull("blobURL", blobURL); + Utility.assertNotNull("file", file); + + // See ProgressReporter for an explanation on why this lock is necessary and why we use AtomicLong. + Lock progressLock = new ReentrantLock(); + AtomicLong totalProgress = new AtomicLong(0); + + // Get the size of the data and etag if not specified by the user. + Single> setupSingle = getSetupSingle(blobURL, rangeReal, optionsReal); + + return setupSingle.flatMap(setupPair -> { + Long dataSize = (Long)setupPair.get(0); + BlobAccessConditions realConditions = (BlobAccessConditions)setupPair.get(1); + + int numChunks = calculateNumBlocks(dataSize, optionsReal.chunkSize()); + + // In case it is an empty blob, this ensures we still actually perform a download operation. + numChunks = numChunks == 0 ? 1 : numChunks; + + return Observable.range(0, numChunks) + .flatMap(chunkNum -> { + // Calculate whether we need a full chunk or something smaller because we are at the end. + long chunkSizeActual = Math.min(optionsReal.chunkSize(), + dataSize - (chunkNum * optionsReal.chunkSize())); + BlobRange chunkRange = new BlobRange().withOffset( + rangeReal.offset() + (chunkNum * optionsReal.chunkSize())) + .withCount(chunkSizeActual); + + // Make the download call. + return blobURL.download(chunkRange, realConditions, false, null) + // Extract the body. + .flatMapObservable(response -> { + Flowable data = response.body( + optionsReal.reliableDownloadOptionsPerBlock()); + + // Report progress as necessary. + data = ProgressReporter.addParallelProgressReporting(data, + optionsReal.progressReceiver(), progressLock, totalProgress); + + // Write to the file. + return FlowableUtil.writeFile(data, file, + chunkNum * optionsReal.chunkSize()) + /* + Satisfy the return type. Observable required for flatmap to accept + maxConcurrency. We want to eventually give the user back the headers. + */ + .andThen(Single.just(response.headers())) + .toObservable(); + }); + }, optionsReal.parallelism()) + // All the headers will be the same, so we just pick the last one. + .lastOrError(); + }); + } + + private static Single> getSetupSingle(BlobURL blobURL, BlobRange r, + TransferManagerDownloadFromBlobOptions o) { + /* + Construct a Single which will emit the total count of bytes to be downloaded and retrieve an etag to lock on to + if one was not specified. We use a single for this because we may have to make a REST call to get the length to + calculate the count and we need to maintain asynchronicity. + */ + if (r.count() == null || o.accessConditions().modifiedAccessConditions().ifMatch() == null) { + return blobURL.getProperties(o.accessConditions(), null) + .map(response -> { + BlobAccessConditions newConditions; + if (o.accessConditions().modifiedAccessConditions().ifMatch() == null) { + newConditions = new BlobAccessConditions() + .withModifiedAccessConditions(new ModifiedAccessConditions() + .withIfModifiedSince( + o.accessConditions().modifiedAccessConditions().ifModifiedSince()) + .withIfUnmodifiedSince( + o.accessConditions().modifiedAccessConditions().ifUnmodifiedSince()) + .withIfMatch(response.headers().eTag()) + .withIfNoneMatch( + o.accessConditions().modifiedAccessConditions().ifNoneMatch())) + .withLeaseAccessConditions(o.accessConditions().leaseAccessConditions()); + } else { + newConditions = o.accessConditions(); + } + long newCount; + /* + If the user either didn't specify a count or they specified a count greater than the size of the + remaining data, take the size of the remaining data. This is to prevent the case where the count + is much much larger than the size of the blob and we could try to download at an invalid offset. + */ + if (r.count() == null || r.count() > response.headers().contentLength() - r.offset()) { + newCount = response.headers().contentLength() - r.offset(); + } else { + newCount = r.count(); + } + return Arrays.asList(newCount, newConditions); + }); + } else { + return Single.just(Arrays.asList(r.count(), o.accessConditions())); + } + } + + /** + * Uploads the contents of an arbitrary {@code Flowable} to a block blob. This Flowable need not be replayable and + * therefore it may have as its source a network stream or any other data for which the replay behavior is unknown + * (non-replayable meaning the Flowable may not return the exact same data on each subscription). + * + * To eliminate the need for replayability on the source, the client must perform some buffering in order to ensure + * the actual data passed to the network is replayable. This is important in order to support retries, which are + * crucial for reliable data transfer. Typically, the greater the number of buffers used, the greater the possible + * parallelism. Larger buffers means we will have to stage fewer blocks. The tradeoffs between these values are + * context-dependent, so some experimentation may be required to optimize inputs for a given scenario. + * + * Note that buffering must be strictly sequential. Only the upload portion of this operation may be parallelized; + * the reads cannot be. Therefore, this method is not as optimal as + * {@link #uploadFileToBlockBlob(AsynchronousFileChannel, BlockBlobURL, int, TransferManagerUploadToBlockBlobOptions)} + * and if the source is known to be a file, that method should be preferred. + * + * @param source + * Contains the data to upload. Unlike other upload methods in this library, this method does not require + * that the Flowable be replayable. + * @param blockBlobURL + * Points to the blob to which the data should be uploaded. + * @param blockSize + * The size of each block that will be staged. This value also determines the size that each buffer used by + * this method will be and determines the number of requests that need to be made. The amount of memory + * consumed by this method may be up to blockSize * numBuffers. If block size is large, this method will + * make fewer network calls, but each individual call will send more data and will therefore take longer. + * @param numBuffers + * The maximum number of buffers this method should allocate. Must be at least two. Generally this value + * should have some relationship to the value for parallelism passed via the options. If the number of + * available buffers is smaller than the level of parallelism, then this method will not be able to make + * full use of the available parallelism. It is unlikely that the value need be more than two times the + * level of parallelism as such a value means that (assuming buffering is fast enough) there are enough + * available buffers to have both one occupied for each worker and one ready for all workers should they + * all complete the current request at approximately the same time. The amount of memory consumed by this + * method may be up to blockSize * numBuffers. + * @param options + * {@link TransferManagerUploadToBlockBlobOptions} + * @return Emits the successful response. + * + * @apiNote ## Sample Code \n + * [!code-java[Sample_Code](../azure-storage-java/src/test/java/com/microsoft/azure/storage/Samples.java?name=tm_nrf "Sample code for TransferManager.uploadFromNonReplayableFlowable")] \n + * For more samples, please see the [Samples file](%https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java) + */ + public static Single uploadFromNonReplayableFlowable( + final Flowable source, final BlockBlobURL blockBlobURL, final int blockSize, + final int numBuffers, final TransferManagerUploadToBlockBlobOptions options) { + Utility.assertNotNull("source", source); + Utility.assertNotNull("blockBlobURL", blockBlobURL); + + TransferManagerUploadToBlockBlobOptions optionsReal = options == null ? + TransferManagerUploadToBlockBlobOptions.DEFAULT : options; + + // See ProgressReporter for an explanation on why this lock is necessary and why we use AtomicLong. + AtomicLong totalProgress = new AtomicLong(0); + Lock progressLock = new ReentrantLock(); + + // Validation done in the constructor. + UploadFromNRFBufferPool pool = new UploadFromNRFBufferPool(numBuffers, blockSize); + + /* + Break the source flowable into chunks that are <= chunk size. This makes filling the pooled buffers much easier + as we can guarantee we only need at most two buffers for any call to write (two in the case of one pool buffer + filling up with more data to write) + */ + Flowable chunkedSource = source.flatMap(buffer -> { + if (buffer.remaining() <= blockSize) { + return Flowable.just(buffer); + } + List smallerChunks = new ArrayList<>(); + for (int i=0; i < Math.ceil(buffer.remaining() / (double)blockSize); i++) { + // Note that duplicate does not duplicate data. It simply creates a duplicate view of the data. + ByteBuffer duplicate = buffer.duplicate(); + duplicate.position(i * blockSize); + duplicate.limit(Math.min(duplicate.limit(), (i+1) * blockSize)); + smallerChunks.add(duplicate); + } + return Flowable.fromIterable(smallerChunks); + }, false, 1); + + /* + Write each buffer from the chunkedSource to the pool and call flush at the end to get the last bits. + */ + return chunkedSource.flatMap(pool::write, false, 1) + .concatWith(Flowable.defer(pool::flush)) + .concatMapEager(buffer -> { + // Report progress as necessary. + Flowable data = ProgressReporter.addParallelProgressReporting(Flowable.just(buffer), + optionsReal.progressReceiver(), progressLock, totalProgress); + + final String blockId = Base64.getEncoder().encodeToString( + UUID.randomUUID().toString().getBytes()); + + /* + Make a call to stageBlock. Instead of emitting the response, which we don't care about other + than that it was successful, emit the blockId for this request. These will be collected below. + Turn that into an Observable which emits one item to comply with the signature of + concatMapEager. + */ + return blockBlobURL.stageBlock(blockId, data, + buffer.remaining(), optionsReal.accessConditions().leaseAccessConditions(), null) + .map(x -> { + pool.returnBuffer(buffer); + return blockId; + }).toFlowable(); + + /* + Specify the number of concurrent subscribers to this map. This determines how many concurrent + rest calls are made. This is so because maxConcurrency is the number of internal subscribers + available to subscribe to the Observables emitted by the source. A subscriber is not released + for a new subscription until its Observable calls onComplete, which here means that the call to + stageBlock is finished. Prefetch is a hint that each of the Observables emitted by the source + will emit only one value, which is true here because we have converted from a Single. + */ + }, optionsReal.parallelism(), 1) + /* + collectInto will gather each of the emitted blockIds into a list. Because we used concatMap, the Ids + will be emitted according to their block number, which means the list generated here will be + properly ordered. This also converts into a Single. + */ + .collectInto(new ArrayList(), ArrayList::add) + /* + collectInto will not emit the list until its source calls onComplete. This means that by the time we + call stageBlock list, all of the stageBlock calls will have finished. By flatMapping the list, we + can "map" it into a call to commitBlockList. + */ + .flatMap(ids -> + blockBlobURL.commitBlockList(ids, optionsReal.httpHeaders(), optionsReal.metadata(), + optionsReal.accessConditions(), null)); + + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/TransferManagerDownloadFromBlobOptions.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/TransferManagerDownloadFromBlobOptions.java new file mode 100644 index 0000000000000..84c0bf1ccbcb7 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/TransferManagerDownloadFromBlobOptions.java @@ -0,0 +1,119 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.microsoft.azure.storage.blob; + +/** + * Configures the parallel download behavior for methods on the {@link TransferManager}. + */ +public final class TransferManagerDownloadFromBlobOptions { + + /** + * The default download options. + */ + public static final TransferManagerDownloadFromBlobOptions DEFAULT = + new TransferManagerDownloadFromBlobOptions(null, null, null, null, null); + + private final long chunkSize; + + private final IProgressReceiver progressReceiver; + + private final int parallelism; + + private final ReliableDownloadOptions reliableDownloadOptionsPerBlock; + + // Cannot be final because we may have to set this property in order to lock on the etag. + private BlobAccessConditions accessConditions; + + /** + * Returns an object that configures the parallel download behavior for methods on the {@link TransferManager}. + * + * @param chunkSize + * The size of the chunk into which large download operations will be broken into. Note that if the + * chunkSize is large, fewer but larger requests will be made as each REST request will download a + * single chunk in full. For larger chunk sizes, it may be helpful to configure the + * {@code reliableDownloadOptions} to allow more retries. + * @param progressReceiver + * {@link IProgressReceiver} + * @param accessConditions + * {@link BlobAccessConditions} + * @param reliableDownloadOptions + * {@link ReliableDownloadOptions} + * @param parallelism + * A {@code int} that indicates the maximum number of chunks to download in parallel. Must be greater + * than 0. May be null to accept default behavior. + */ + public TransferManagerDownloadFromBlobOptions(Long chunkSize, IProgressReceiver progressReceiver, + BlobAccessConditions accessConditions, ReliableDownloadOptions reliableDownloadOptions, + Integer parallelism) { + this.progressReceiver = progressReceiver; + + if (chunkSize != null) { + Utility.assertInBounds("chunkSize", chunkSize, 1, Long.MAX_VALUE); + this.chunkSize = chunkSize; + } else { + this.chunkSize = TransferManager.BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE; + } + + if (parallelism != null) { + Utility.assertInBounds("parallelism", parallelism, 1, Integer.MAX_VALUE); + this.parallelism = parallelism; + } else { + this.parallelism = Constants.TRANSFER_MANAGER_DEFAULT_PARALLELISM; + } + + this.accessConditions = accessConditions == null ? BlobAccessConditions.NONE : accessConditions; + this.reliableDownloadOptionsPerBlock = reliableDownloadOptions == null ? + new ReliableDownloadOptions() : reliableDownloadOptions; + } + + /** + * The size of the chunk into which large download operations will be broken into. Note that if the chunkSize is + * large, fewer but larger requests will be made as each REST request will download a single chunk in full. For + * larger chunk sizes, it may be helpful to configure the{@code reliableDownloadOptions} to allow more retries. + */ + public long chunkSize() { + return chunkSize; + } + + /** + * {@link IProgressReceiver} + */ + public IProgressReceiver progressReceiver() { + return progressReceiver; + } + + /** + * A {@code int} that indicates the maximum number of chunks to download in parallel. Must be greater than 0. May be + * null to accept default behavior. + */ + public int parallelism() { + return parallelism; + } + + /** + * {@link ReliableDownloadOptions} + */ + public ReliableDownloadOptions reliableDownloadOptionsPerBlock() { + return reliableDownloadOptionsPerBlock; + } + + /** + * {@link BlobAccessConditions} + */ + public BlobAccessConditions accessConditions() { + return accessConditions; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/TransferManagerUploadToBlockBlobOptions.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/TransferManagerUploadToBlockBlobOptions.java new file mode 100644 index 0000000000000..62cf49577bc86 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/TransferManagerUploadToBlockBlobOptions.java @@ -0,0 +1,110 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.microsoft.azure.storage.blob; + +import com.microsoft.azure.storage.blob.models.BlobHTTPHeaders; + +/** + * Configures the parallel upload behavior for methods on the {@link TransferManager}. + */ +public class TransferManagerUploadToBlockBlobOptions { + + /** + * An object which represents the default parallel upload options. + */ + public static final TransferManagerUploadToBlockBlobOptions DEFAULT = new TransferManagerUploadToBlockBlobOptions( + null, null, null, null, null); + + private final IProgressReceiver progressReceiver; + + private final BlobHTTPHeaders httpHeaders; + + private final Metadata metadata; + + private final BlobAccessConditions accessConditions; + + private final int parallelism; + + /** + * Creates a new object that configures the parallel upload behavior. Null may be passed to accept the default + * behavior. + * + * @param progressReceiver + * {@link IProgressReceiver} + * @param httpHeaders + * Most often used when creating a blob or setting its properties, this class contains fields for typical + * HTTP properties, which, if specified, will be attached to the target blob. Null may be passed to any API. + * @param metadata + * {@link Metadata} + * @param accessConditions + * {@link BlobAccessConditions} + * @param parallelism + * A {@code int} that indicates the maximum number of blocks to upload in parallel. Must be greater than 0. + * May be null to accept default behavior. + */ + public TransferManagerUploadToBlockBlobOptions(IProgressReceiver progressReceiver, BlobHTTPHeaders httpHeaders, + Metadata metadata, BlobAccessConditions accessConditions, Integer parallelism) { + this.progressReceiver = progressReceiver; + if (parallelism == null) { + this.parallelism = Constants.TRANSFER_MANAGER_DEFAULT_PARALLELISM; + } else if (parallelism <= 0) { + throw new IllegalArgumentException("Parallelism must be > 0"); + } else { + this.parallelism = parallelism; + } + + this.httpHeaders = httpHeaders; + this.metadata = metadata; + this.accessConditions = accessConditions == null ? BlobAccessConditions.NONE : accessConditions; + } + + /** + * {@link IProgressReceiver} + */ + public IProgressReceiver progressReceiver() { + return progressReceiver; + } + + /** + * Most often used when creating a blob or setting its properties, this class contains fields for typical HTTP + * properties, which, if specified, will be attached to the target blob. Null may be passed to any API. + */ + public BlobHTTPHeaders httpHeaders() { + return httpHeaders; + } + + /** + * {@link Metadata} + */ + public Metadata metadata() { + return metadata; + } + + /** + * {@link BlobAccessConditions} + */ + public BlobAccessConditions accessConditions() { + return accessConditions; + } + + /** + * A {@code int} that indicates the maximum number of blocks to upload in parallel. Must be greater than 0. May be + * null to accept default behavior. + */ + public int parallelism() { + return parallelism; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/URLParser.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/URLParser.java new file mode 100644 index 0000000000000..f43ab7fb1e3fb --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/URLParser.java @@ -0,0 +1,141 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import java.net.URL; +import java.net.UnknownHostException; +import java.util.Comparator; +import java.util.Locale; +import java.util.Map; +import java.util.TreeMap; + +/** + * A class used to conveniently parse URLs into {@link BlobURLParts} to modify the components of the URL. + */ +public final class URLParser { + + /** + * URLParser parses a URL initializing BlobURLParts' fields including any SAS-related and snapshot query parameters. + * Any other query parameters remain in the UnparsedParams field. This method overwrites all fields in the + * BlobURLParts object. + * + * @param url + * The {@code URL} to be parsed. + * + * @return A {@link BlobURLParts} object containing all the components of a BlobURL. + * + * @throws UnknownHostException + * If the url contains an improperly formatted ipaddress or unknown host address. + */ + public static BlobURLParts parse(URL url) throws UnknownHostException { + + final String scheme = url.getProtocol(); + final String host = url.getHost(); + + String containerName = null; + String blobName = null; + + // find the container & blob names (if any) + String path = url.getPath(); + if (!Utility.isNullOrEmpty(path)) { + // if the path starts with a slash remove it + if (path.charAt(0) == '/') { + path = path.substring(1); + } + + int containerEndIndex = path.indexOf('/'); + if (containerEndIndex == -1) { + // path contains only a container name and no blob name + containerName = path; + } else { + // path contains the container name up until the slash and blob name is everything after the slash + containerName = path.substring(0, containerEndIndex); + blobName = path.substring(containerEndIndex + 1); + } + } + Map queryParamsMap = parseQueryString(url.getQuery()); + + String snapshot = null; + String[] snapshotArray = queryParamsMap.get("snapshot"); + if (snapshotArray != null) { + snapshot = snapshotArray[0]; + queryParamsMap.remove("snapshot"); + } + + SASQueryParameters sasQueryParameters = new SASQueryParameters(queryParamsMap, true); + + return new BlobURLParts() + .withScheme(scheme) + .withHost(host) + .withContainerName(containerName) + .withBlobName(blobName) + .withSnapshot(snapshot) + .withSasQueryParameters(sasQueryParameters) + .withUnparsedParameters(queryParamsMap); + } + + /** + * Parses a query string into a one to many hashmap. + * + * @param queryParams + * The string of query params to parse. + * + * @return A {@code HashMap} of the key values. + */ + private static TreeMap parseQueryString(String queryParams) { + + final TreeMap retVals = new TreeMap(new Comparator() { + @Override + public int compare(String s1, String s2) { + return s1.compareTo(s2); + } + }); + + if (Utility.isNullOrEmpty(queryParams)) { + return retVals; + } + + // split name value pairs by splitting on the 'c&' character + final String[] valuePairs = queryParams.split("&"); + + // for each field value pair parse into appropriate map entries + for (int m = 0; m < valuePairs.length; m++) { + // Getting key and value for a single query parameter + final int equalDex = valuePairs[m].indexOf("="); + String key = Utility.safeURLDecode(valuePairs[m].substring(0, equalDex)).toLowerCase(Locale.ROOT); + String value = Utility.safeURLDecode(valuePairs[m].substring(equalDex + 1)); + + // add to map + String[] keyValues = retVals.get(key); + + // check if map already contains key + if (keyValues == null) { + // map does not contain this key + keyValues = new String[]{value}; + retVals.put(key, keyValues); + } else { + // map contains this key already so append + final String[] newValues = new String[keyValues.length + 1]; + for (int j = 0; j < keyValues.length; j++) { + newValues[j] = keyValues[j]; + } + + newValues[newValues.length - 1] = value; + } + } + + return retVals; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/UploadFromNRFBufferPool.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/UploadFromNRFBufferPool.java new file mode 100644 index 0000000000000..c8ca41dbf77f6 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/UploadFromNRFBufferPool.java @@ -0,0 +1,174 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.microsoft.azure.storage.blob; + +import io.reactivex.Flowable; + +import java.nio.ByteBuffer; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +/** + * This type is to support the implementation of uploadFromNonReplaybleFlowable only. It is mandatory that the caller + * has broken the source into ByteBuffers that are no greater than the size of a chunk and therefore a buffer in the + * pool. This is necessary because it upper bounds the number of buffers we need for a given call to write() to 2. If + * the size of ByteBuffer passed into write() were unbounded, the pool could stall as it would run out of buffers before + * it is able to return a result, and if it is unable to return, no data can be uploaded and therefore no pools + * returned. + * + * It is incumbent upon the caller to return the buffers after an upload is completed. It is also the caller's + * responsibility to signal to the pool when the stream is empty and call flush to return any data still sitting in + * the pool. + * + * Broadly, the workflow of this operation is to chunk the source into reasonable sized pieces. On each piece, one + * thread will call write on the pool. The pool will grab a buffer from the queue to write to, possibly waiting for one + * to be available, and either store the incomplete buffer to be filled on the next write or return the buffer to be + * sent. Filled buffers can be uploaded in parallel and should return buffers to the pool after the upload completes. + * Once the source terminates, it should call flush. + */ +final class UploadFromNRFBufferPool { + + private final BlockingQueue buffers; + + private final int maxBuffs; + + private int numBuffs = 0; + + private final int buffSize; + + private ByteBuffer currentBuf; + + UploadFromNRFBufferPool(final int numBuffs, final int buffSize) { + /* + We require at least two buffers because it is possible that a given write will spill over into a second buffer. + We only need one overflow buffer because the max size of a ByteBuffer is assumed to be the size as a buffer in + the pool. + */ + Utility.assertInBounds("numBuffs", numBuffs, 2, Integer.MAX_VALUE); + this.maxBuffs = numBuffs; + buffers = new LinkedBlockingQueue<>(numBuffs); + + + //These buffers will be used in calls to stageBlock, so they must be no greater than block size. + Utility.assertInBounds("buffSize", buffSize, 1, BlockBlobURL.MAX_STAGE_BLOCK_BYTES); + this.buffSize = buffSize; + + //We prep the queue with two buffers in case there is overflow. + buffers.add(ByteBuffer.allocate(this.buffSize)); + buffers.add(ByteBuffer.allocate(this.buffSize)); + this.numBuffs = 2; + } + + public Flowable write(ByteBuffer buf) { + // Check if there's a buffer holding any data from a previous call to write. If not, get a new one. + if (this.currentBuf == null) { + this.currentBuf = this.getBuffer(); + } + + Flowable result; + // We can fit this whole write in the buffer we currently have. + if (this.currentBuf.remaining() >= buf.remaining()) { + this.currentBuf.put(buf); + if (this.currentBuf.remaining() == 0) { + // Reset the position so that we can read the whole thing then return this buffer. + this.currentBuf.position(0); + result = Flowable.just(this.currentBuf); + // This will force us to get a new buffer next time we try to write. + this.currentBuf = null; + } + else { + /* + We are still filling the current buffer, so we have no data to return. We will return the buffer once it + is filled + */ + result = Flowable.empty(); + } + } + // We will overflow the current buffer and require another one. + else { + // Adjust the window of buf so that we fill up currentBuf without going out of bounds. + int oldLimit = buf.limit(); + buf.limit(buf.position() + this.currentBuf.remaining()); + this.currentBuf.put(buf); + // Set the old limit so we can read to the end in the next buffer. + buf.limit(oldLimit); + + // Reset the position so we can read the buffer. + this.currentBuf.position(0); + result = Flowable.just(this.currentBuf); + + /* + Get a new buffer and fill it with whatever is left from buf. Note that this relies on the assumption that + the source Flowable has been split up into buffers that are no bigger than chunk size. This assumption + means we'll only have to over flow once, and the buffer we overflow into will not be filled. This is the + buffer we will write to on the next call to write(). + */ + this.currentBuf = this.getBuffer(); + this.currentBuf.put(buf); + } + return result; + } + + private ByteBuffer getBuffer() { + ByteBuffer result; + // There are no buffers in the queue and we have space to allocate one. + if (this.buffers.isEmpty() && this.numBuffs < this.maxBuffs) { + result = ByteBuffer.allocate(this.buffSize); + this.numBuffs++; + } + else { + try { + // If empty, this will wait for an upload to finish and return a buffer. + result = this.buffers.take(); + + } catch (InterruptedException e) { + throw new IllegalStateException("UploadFromStream thread interrupted." + " Thread:" + + Thread.currentThread().getId()); + } + } + return result; + } + + Flowable flush() { + /* + Prep and return any data left in the pool. It is important to set the limit so that we don't read beyond the + actual data as this buffer may have been used before and therefore may have some garbage at the end. + */ + if (this.currentBuf != null) { + this.currentBuf.flip(); + ByteBuffer last = this.currentBuf; + // If there is an accidental duplicate call to flush, this prevents sending the last buffer twice + this.currentBuf = null; + return Flowable.just(last); + } + return Flowable.empty(); + } + + void returnBuffer(ByteBuffer b) { + // Reset the buffer. + b.position(0); + b.limit(b.capacity()); + + try { + this.buffers.put(b); + } + catch (InterruptedException e) { + throw new IllegalStateException("UploadFromStream thread interrupted."); + } + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/Utility.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/Utility.java new file mode 100644 index 0000000000000..1809b4546e57a --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/Utility.java @@ -0,0 +1,253 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob; + +import com.microsoft.azure.storage.blob.models.StorageErrorException; +import io.reactivex.Single; + +import java.io.UnsupportedEncodingException; +import java.net.URLDecoder; +import java.net.URLEncoder; +import java.time.LocalDateTime; +import java.time.OffsetDateTime; +import java.time.ZoneId; +import java.time.format.DateTimeFormatter; +import java.util.Locale; + +final class Utility { + + static final DateTimeFormatter RFC1123GMTDateFormatter = + DateTimeFormatter.ofPattern("EEE, dd MMM yyyy HH:mm:ss z", Locale.ROOT).withZone(ZoneId.of("GMT")); + + static final DateTimeFormatter ISO8601UTCDateFormatter = + DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss'Z'", Locale.ROOT).withZone(ZoneId.of("UTC")); + /** + * Stores a reference to the UTC time zone. + */ + static final ZoneId UTC_ZONE = ZoneId.of("UTC"); + /** + * Stores a reference to the date/time pattern with the greatest precision Java.util.Date is capable of expressing. + */ + private static final String MAX_PRECISION_PATTERN = "yyyy-MM-dd'T'HH:mm:ss.SSS"; + /** + * Stores a reference to the ISO8601 date/time pattern. + */ + private static final String ISO8601_PATTERN = "yyyy-MM-dd'T'HH:mm:ss'Z'"; + /** + * Stores a reference to the ISO8601 date/time pattern. + */ + private static final String ISO8601_PATTERN_NO_SECONDS = "yyyy-MM-dd'T'HH:mm'Z'"; + /** + * The length of a datestring that matches the MAX_PRECISION_PATTERN. + */ + private static final int MAX_PRECISION_DATESTRING_LENGTH = MAX_PRECISION_PATTERN.replaceAll("'", "").length(); + + /** + * Asserts that a value is not null. + * + * @param param + * A {@code String} that represents the name of the parameter, which becomes the exception message + * text if the value parameter is null. + * @param value + * An Object object that represents the value of the specified parameter. This is the value + * being asserted as not null. + */ + static void assertNotNull(final String param, final Object value) { + if (value == null) { + throw new IllegalArgumentException(String.format(Locale.ROOT, SR.ARGUMENT_NULL_OR_EMPTY, param)); + } + } + + /** + * Returns a value that indicates whether the specified string is null or empty. + * + * @param value + * A {@code String} being examined for null or empty. + * + * @return true if the specified value is null or empty; otherwise, false + */ + static boolean isNullOrEmpty(final String value) { + return value == null || value.length() == 0; + } + + /** + * Performs safe decoding of the specified string, taking care to preserve each + character, rather + * than replacing it with a space character. + * + * @param stringToDecode + * A {@code String} that represents the string to decode. + * + * @return A {@code String} that represents the decoded string. + */ + static String safeURLDecode(final String stringToDecode) { + if (stringToDecode.length() == 0) { + return Constants.EMPTY_STRING; + } + + // '+' are decoded as ' ' so preserve before decoding + if (stringToDecode.contains("+")) { + final StringBuilder outBuilder = new StringBuilder(); + + int startDex = 0; + for (int m = 0; m < stringToDecode.length(); m++) { + if (stringToDecode.charAt(m) == '+') { + if (m > startDex) { + try { + outBuilder.append(URLDecoder.decode(stringToDecode.substring(startDex, m), + Constants.UTF8_CHARSET)); + } catch (UnsupportedEncodingException e) { + throw new Error(e); + } + } + + outBuilder.append("+"); + startDex = m + 1; + } + } + + if (startDex != stringToDecode.length()) { + try { + outBuilder.append(URLDecoder.decode(stringToDecode.substring(startDex, stringToDecode.length()), + Constants.UTF8_CHARSET)); + } catch (UnsupportedEncodingException e) { + throw new Error(e); + } + } + + return outBuilder.toString(); + } else { + try { + return URLDecoder.decode(stringToDecode, Constants.UTF8_CHARSET); + } catch (UnsupportedEncodingException e) { + throw new Error(e); + } + } + } + + /** + * Given a String representing a date in a form of the ISO8601 pattern, generates a Date representing it + * with up to millisecond precision. + * + * @param dateString + * the {@code String} to be interpreted as a Date + * + * @return the corresponding Date object + */ + public static OffsetDateTime parseDate(String dateString) { + String pattern = MAX_PRECISION_PATTERN; + switch (dateString.length()) { + case 28: // "yyyy-MM-dd'T'HH:mm:ss.SSSSSSS'Z'"-> [2012-01-04T23:21:59.1234567Z] length = 28 + case 27: // "yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"-> [2012-01-04T23:21:59.123456Z] length = 27 + case 26: // "yyyy-MM-dd'T'HH:mm:ss.SSSSS'Z'"-> [2012-01-04T23:21:59.12345Z] length = 26 + case 25: // "yyyy-MM-dd'T'HH:mm:ss.SSSS'Z'"-> [2012-01-04T23:21:59.1234Z] length = 25 + case 24: // "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"-> [2012-01-04T23:21:59.123Z] length = 24 + dateString = dateString.substring(0, MAX_PRECISION_DATESTRING_LENGTH); + break; + case 23: // "yyyy-MM-dd'T'HH:mm:ss.SS'Z'"-> [2012-01-04T23:21:59.12Z] length = 23 + // SS is assumed to be milliseconds, so a trailing 0 is necessary + dateString = dateString.replace("Z", "0"); + break; + case 22: // "yyyy-MM-dd'T'HH:mm:ss.S'Z'"-> [2012-01-04T23:21:59.1Z] length = 22 + // S is assumed to be milliseconds, so trailing 0's are necessary + dateString = dateString.replace("Z", "00"); + break; + case 20: // "yyyy-MM-dd'T'HH:mm:ss'Z'"-> [2012-01-04T23:21:59Z] length = 20 + pattern = Utility.ISO8601_PATTERN; + break; + case 17: // "yyyy-MM-dd'T'HH:mm'Z'"-> [2012-01-04T23:21Z] length = 17 + pattern = Utility.ISO8601_PATTERN_NO_SECONDS; + break; + default: + throw new IllegalArgumentException(String.format(Locale.ROOT, SR.INVALID_DATE_STRING, dateString)); + } + + DateTimeFormatter formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT); + return LocalDateTime.parse(dateString, formatter).atZone(UTC_ZONE).toOffsetDateTime(); + } + + /** + * Asserts that the specified integer is in the valid range. + * + * @param param + * A String that represents the name of the parameter, which becomes the exception message + * text if the value parameter is out of bounds. + * @param value + * The value of the specified parameter. + * @param min + * The minimum value for the specified parameter. + * @param max + * The maximum value for the specified parameter. + */ + public static void assertInBounds(final String param, final long value, final long min, final long max) { + if (value < min || value > max) { + throw new IllegalArgumentException(String.format(Locale.ROOT, SR.PARAMETER_NOT_IN_RANGE, param, min, max)); + } + } + + /** + * Performs safe encoding of the specified string, taking care to insert %20 for each space character, + * instead of inserting the + character. + */ + static String safeURLEncode(final String stringToEncode) { + if (stringToEncode == null) { + return null; + } + if (stringToEncode.length() == 0) { + return Constants.EMPTY_STRING; + } + + try { + final String tString = URLEncoder.encode(stringToEncode, Constants.UTF8_CHARSET); + + if (stringToEncode.contains(" ")) { + final StringBuilder outBuilder = new StringBuilder(); + + int startDex = 0; + for (int m = 0; m < stringToEncode.length(); m++) { + if (stringToEncode.charAt(m) == ' ') { + if (m > startDex) { + outBuilder.append(URLEncoder.encode(stringToEncode.substring(startDex, m), + Constants.UTF8_CHARSET)); + } + + outBuilder.append("%20"); + startDex = m + 1; + } + } + + if (startDex != stringToEncode.length()) { + outBuilder.append(URLEncoder.encode(stringToEncode.substring(startDex, stringToEncode.length()), + Constants.UTF8_CHARSET)); + } + + return outBuilder.toString(); + } else { + return tString; + } + + } catch (final UnsupportedEncodingException e) { + throw new Error(e); // If we can't encode UTF-8, we fail. + } + } + + static Single addErrorWrappingToSingle(Single s) { + return s.onErrorResumeNext(e -> { + if (e instanceof StorageErrorException) { + return Single.error(new StorageException((StorageErrorException) e)); + } + return Single.error(e); + }); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/implementation/ListBlobsIncludeItemWrapper.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/implementation/ListBlobsIncludeItemWrapper.java new file mode 100644 index 0000000000000..3aa3f8f0d365b --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/implementation/ListBlobsIncludeItemWrapper.java @@ -0,0 +1,47 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.implementation; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.azure.storage.blob.models.ListBlobsIncludeItem; +import java.util.ArrayList; +import java.util.List; + +/** + * A wrapper around List<ListBlobsIncludeItem> which provides top-level metadata for serialization. + */ +@JacksonXmlRootElement(localName = "ListBlobsIncludeItem") +public final class ListBlobsIncludeItemWrapper { + @JacksonXmlProperty(localName = "ListBlobsIncludeItem") + private final List listBlobsIncludeItem; + + /** + * Creates an instance of ListBlobsIncludeItemWrapper. + * + * @param listBlobsIncludeItem the list. + */ + @JsonCreator + public ListBlobsIncludeItemWrapper(@JsonProperty("ListBlobsIncludeItem") List listBlobsIncludeItem) { + this.listBlobsIncludeItem = listBlobsIncludeItem; + } + + /** + * Get the List<ListBlobsIncludeItem> contained in this wrapper. + * + * @return the List<ListBlobsIncludeItem>. + */ + public List items() { + return listBlobsIncludeItem; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/implementation/SignedIdentifierWrapper.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/implementation/SignedIdentifierWrapper.java new file mode 100644 index 0000000000000..b74605fea5b36 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/implementation/SignedIdentifierWrapper.java @@ -0,0 +1,47 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.implementation; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.azure.storage.blob.models.SignedIdentifier; +import java.util.ArrayList; +import java.util.List; + +/** + * A wrapper around List<SignedIdentifier> which provides top-level metadata for serialization. + */ +@JacksonXmlRootElement(localName = "SignedIdentifier") +public final class SignedIdentifierWrapper { + @JacksonXmlProperty(localName = "SignedIdentifier") + private final List signedIdentifier; + + /** + * Creates an instance of SignedIdentifierWrapper. + * + * @param signedIdentifier the list. + */ + @JsonCreator + public SignedIdentifierWrapper(@JsonProperty("SignedIdentifier") List signedIdentifier) { + this.signedIdentifier = signedIdentifier; + } + + /** + * Get the List<SignedIdentifier> contained in this wrapper. + * + * @return the List<SignedIdentifier>. + */ + public List items() { + return signedIdentifier; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/implementation/SignedIdentifiersWrapper.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/implementation/SignedIdentifiersWrapper.java new file mode 100644 index 0000000000000..3707fe17cc119 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/implementation/SignedIdentifiersWrapper.java @@ -0,0 +1,47 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.implementation; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.azure.storage.blob.models.SignedIdentifier; +import java.util.ArrayList; +import java.util.List; + +/** + * A wrapper around List<SignedIdentifier> which provides top-level metadata for serialization. + */ +@JacksonXmlRootElement(localName = "SignedIdentifiers") +public final class SignedIdentifiersWrapper { + @JacksonXmlProperty(localName = "SignedIdentifier") + private final List signedIdentifiers; + + /** + * Creates an instance of SignedIdentifiersWrapper. + * + * @param signedIdentifiers the list. + */ + @JsonCreator + public SignedIdentifiersWrapper(@JsonProperty("SignedIdentifier") List signedIdentifiers) { + this.signedIdentifiers = signedIdentifiers; + } + + /** + * Get the List<SignedIdentifier> contained in this wrapper. + * + * @return the List<SignedIdentifier>. + */ + public List items() { + return signedIdentifiers; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/implementation/package-info.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/implementation/package-info.java new file mode 100644 index 0000000000000..9f3932cf0d78b --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/implementation/package-info.java @@ -0,0 +1,13 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for +// license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +/** + * This package contains the blob.implementation classes for StorageClient. + * Storage Client. + */ +package com.microsoft.azure.storage.blob.implementation; diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/AccessPolicy.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/AccessPolicy.java new file mode 100644 index 0000000000000..71b1aef8a1c86 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/AccessPolicy.java @@ -0,0 +1,100 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import java.time.OffsetDateTime; + +/** + * An Access policy. + */ +@JacksonXmlRootElement(localName = "AccessPolicy") +public final class AccessPolicy { + /** + * the date-time the policy is active. + */ + @JsonProperty(value = "Start", required = true) + private OffsetDateTime start; + + /** + * the date-time the policy expires. + */ + @JsonProperty(value = "Expiry", required = true) + private OffsetDateTime expiry; + + /** + * the permissions for the acl policy. + */ + @JsonProperty(value = "Permission", required = true) + private String permission; + + /** + * Get the start value. + * + * @return the start value. + */ + public OffsetDateTime start() { + return this.start; + } + + /** + * Set the start value. + * + * @param start the start value to set. + * @return the AccessPolicy object itself. + */ + public AccessPolicy withStart(OffsetDateTime start) { + this.start = start; + return this; + } + + /** + * Get the expiry value. + * + * @return the expiry value. + */ + public OffsetDateTime expiry() { + return this.expiry; + } + + /** + * Set the expiry value. + * + * @param expiry the expiry value to set. + * @return the AccessPolicy object itself. + */ + public AccessPolicy withExpiry(OffsetDateTime expiry) { + this.expiry = expiry; + return this; + } + + /** + * Get the permission value. + * + * @return the permission value. + */ + public String permission() { + return this.permission; + } + + /** + * Set the permission value. + * + * @param permission the permission value to set. + * @return the AccessPolicy object itself. + */ + public AccessPolicy withPermission(String permission) { + this.permission = permission; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/AccessTier.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/AccessTier.java new file mode 100644 index 0000000000000..52b6a041c8d0d --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/AccessTier.java @@ -0,0 +1,88 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.microsoft.rest.v2.ExpandableStringEnum; +import java.util.Collection; + +/** + * Defines values for AccessTier. + */ +public final class AccessTier extends ExpandableStringEnum { + /** + * Static value P4 for AccessTier. + */ + public static final AccessTier P4 = fromString("P4"); + + /** + * Static value P6 for AccessTier. + */ + public static final AccessTier P6 = fromString("P6"); + + /** + * Static value P10 for AccessTier. + */ + public static final AccessTier P10 = fromString("P10"); + + /** + * Static value P20 for AccessTier. + */ + public static final AccessTier P20 = fromString("P20"); + + /** + * Static value P30 for AccessTier. + */ + public static final AccessTier P30 = fromString("P30"); + + /** + * Static value P40 for AccessTier. + */ + public static final AccessTier P40 = fromString("P40"); + + /** + * Static value P50 for AccessTier. + */ + public static final AccessTier P50 = fromString("P50"); + + /** + * Static value Hot for AccessTier. + */ + public static final AccessTier HOT = fromString("Hot"); + + /** + * Static value Cool for AccessTier. + */ + public static final AccessTier COOL = fromString("Cool"); + + /** + * Static value Archive for AccessTier. + */ + public static final AccessTier ARCHIVE = fromString("Archive"); + + /** + * Creates or finds a AccessTier from its string representation. + * + * @param name a name to look for. + * @return the corresponding AccessTier. + */ + @JsonCreator + public static AccessTier fromString(String name) { + return fromString(name, AccessTier.class); + } + + /** + * @return known AccessTier values. + */ + public static Collection values() { + return values(AccessTier.class); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/AccountKind.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/AccountKind.java new file mode 100644 index 0000000000000..c6bdc109cace1 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/AccountKind.java @@ -0,0 +1,66 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; + +/** + * Defines values for AccountKind. + */ +public enum AccountKind { + /** + * Enum value Storage. + */ + STORAGE("Storage"), + + /** + * Enum value BlobStorage. + */ + BLOB_STORAGE("BlobStorage"), + + /** + * Enum value StorageV2. + */ + STORAGE_V2("StorageV2"); + + /** + * The actual serialized value for a AccountKind instance. + */ + private final String value; + + private AccountKind(String value) { + this.value = value; + } + + /** + * Parses a serialized value to a AccountKind instance. + * + * @param value the serialized value to parse. + * @return the parsed AccountKind object, or null if unable to parse. + */ + @JsonCreator + public static AccountKind fromString(String value) { + AccountKind[] items = AccountKind.values(); + for (AccountKind item : items) { + if (item.toString().equalsIgnoreCase(value)) { + return item; + } + } + return null; + } + + @JsonValue + @Override + public String toString() { + return this.value; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/AppendBlobAppendBlockHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/AppendBlobAppendBlockHeaders.java new file mode 100644 index 0000000000000..1a5fb28a1c0ab --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/AppendBlobAppendBlockHeaders.java @@ -0,0 +1,257 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for AppendBlock operation. + */ +@JacksonXmlRootElement(localName = "AppendBlob-AppendBlock-Headers") +public final class AppendBlobAppendBlockHeaders { + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * Returns the date and time the container was last modified. Any operation + * that modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * If the blob has an MD5 hash and this operation is to read the full blob, + * this response header is returned so that the client can check for + * message content integrity. + */ + @JsonProperty(value = "Content-MD5") + private byte[] contentMD5; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * This response header is returned only for append operations. It returns + * the offset at which the block was committed, in bytes. + */ + @JsonProperty(value = "x-ms-blob-append-offset") + private String blobAppendOffset; + + /** + * The number of committed blocks present in the blob. This header is + * returned only for append blobs. + */ + @JsonProperty(value = "x-ms-blob-committed-block-count") + private Integer blobCommittedBlockCount; + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the AppendBlobAppendBlockHeaders object itself. + */ + public AppendBlobAppendBlockHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the AppendBlobAppendBlockHeaders object itself. + */ + public AppendBlobAppendBlockHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the contentMD5 value. + * + * @return the contentMD5 value. + */ + public byte[] contentMD5() { + return this.contentMD5; + } + + /** + * Set the contentMD5 value. + * + * @param contentMD5 the contentMD5 value to set. + * @return the AppendBlobAppendBlockHeaders object itself. + */ + public AppendBlobAppendBlockHeaders withContentMD5(byte[] contentMD5) { + this.contentMD5 = contentMD5; + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the AppendBlobAppendBlockHeaders object itself. + */ + public AppendBlobAppendBlockHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the AppendBlobAppendBlockHeaders object itself. + */ + public AppendBlobAppendBlockHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the AppendBlobAppendBlockHeaders object itself. + */ + public AppendBlobAppendBlockHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } + + /** + * Get the blobAppendOffset value. + * + * @return the blobAppendOffset value. + */ + public String blobAppendOffset() { + return this.blobAppendOffset; + } + + /** + * Set the blobAppendOffset value. + * + * @param blobAppendOffset the blobAppendOffset value to set. + * @return the AppendBlobAppendBlockHeaders object itself. + */ + public AppendBlobAppendBlockHeaders withBlobAppendOffset(String blobAppendOffset) { + this.blobAppendOffset = blobAppendOffset; + return this; + } + + /** + * Get the blobCommittedBlockCount value. + * + * @return the blobCommittedBlockCount value. + */ + public Integer blobCommittedBlockCount() { + return this.blobCommittedBlockCount; + } + + /** + * Set the blobCommittedBlockCount value. + * + * @param blobCommittedBlockCount the blobCommittedBlockCount value to set. + * @return the AppendBlobAppendBlockHeaders object itself. + */ + public AppendBlobAppendBlockHeaders withBlobCommittedBlockCount(Integer blobCommittedBlockCount) { + this.blobCommittedBlockCount = blobCommittedBlockCount; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/AppendBlobAppendBlockResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/AppendBlobAppendBlockResponse.java new file mode 100644 index 0000000000000..f612a47ecaa53 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/AppendBlobAppendBlockResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the appendBlock operation. + */ +public final class AppendBlobAppendBlockResponse extends RestResponse { + /** + * Creates an instance of AppendBlobAppendBlockResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public AppendBlobAppendBlockResponse(HttpRequest request, int statusCode, AppendBlobAppendBlockHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public AppendBlobAppendBlockHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/AppendBlobCreateHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/AppendBlobCreateHeaders.java new file mode 100644 index 0000000000000..128a14de454b7 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/AppendBlobCreateHeaders.java @@ -0,0 +1,231 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for Create operation. + */ +@JacksonXmlRootElement(localName = "AppendBlob-Create-Headers") +public final class AppendBlobCreateHeaders { + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * Returns the date and time the container was last modified. Any operation + * that modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * If the blob has an MD5 hash and this operation is to read the full blob, + * this response header is returned so that the client can check for + * message content integrity. + */ + @JsonProperty(value = "Content-MD5") + private byte[] contentMD5; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * The value of this header is set to true if the contents of the request + * are successfully encrypted using the specified algorithm, and false + * otherwise. + */ + @JsonProperty(value = "x-ms-request-server-encrypted") + private Boolean isServerEncrypted; + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the AppendBlobCreateHeaders object itself. + */ + public AppendBlobCreateHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the AppendBlobCreateHeaders object itself. + */ + public AppendBlobCreateHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the contentMD5 value. + * + * @return the contentMD5 value. + */ + public byte[] contentMD5() { + return this.contentMD5; + } + + /** + * Set the contentMD5 value. + * + * @param contentMD5 the contentMD5 value to set. + * @return the AppendBlobCreateHeaders object itself. + */ + public AppendBlobCreateHeaders withContentMD5(byte[] contentMD5) { + this.contentMD5 = contentMD5; + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the AppendBlobCreateHeaders object itself. + */ + public AppendBlobCreateHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the AppendBlobCreateHeaders object itself. + */ + public AppendBlobCreateHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the AppendBlobCreateHeaders object itself. + */ + public AppendBlobCreateHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } + + /** + * Get the isServerEncrypted value. + * + * @return the isServerEncrypted value. + */ + public Boolean isServerEncrypted() { + return this.isServerEncrypted; + } + + /** + * Set the isServerEncrypted value. + * + * @param isServerEncrypted the isServerEncrypted value to set. + * @return the AppendBlobCreateHeaders object itself. + */ + public AppendBlobCreateHeaders withIsServerEncrypted(Boolean isServerEncrypted) { + this.isServerEncrypted = isServerEncrypted; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/AppendBlobCreateResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/AppendBlobCreateResponse.java new file mode 100644 index 0000000000000..41b2b3b6df994 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/AppendBlobCreateResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the create operation. + */ +public final class AppendBlobCreateResponse extends RestResponse { + /** + * Creates an instance of AppendBlobCreateResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public AppendBlobCreateResponse(HttpRequest request, int statusCode, AppendBlobCreateHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public AppendBlobCreateHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/AppendPositionAccessConditions.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/AppendPositionAccessConditions.java new file mode 100644 index 0000000000000..761b7fe5e63a7 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/AppendPositionAccessConditions.java @@ -0,0 +1,82 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; + +/** + * Additional parameters for appendBlock operation. + */ +@JacksonXmlRootElement(localName = "append-position-access-conditions") +public final class AppendPositionAccessConditions { + /** + * Optional conditional header. The max length in bytes permitted for the + * append blob. If the Append Block operation would cause the blob to + * exceed that limit or if the blob size is already greater than the value + * specified in this header, the request will fail with + * MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition + * Failed). + */ + @JsonProperty(value = "MaxSize") + private Long maxSize; + + /** + * Optional conditional header, used only for the Append Block operation. A + * number indicating the byte offset to compare. Append Block will succeed + * only if the append position is equal to this number. If it is not, the + * request will fail with the AppendPositionConditionNotMet error (HTTP + * status code 412 - Precondition Failed). + */ + @JsonProperty(value = "AppendPosition") + private Long appendPosition; + + /** + * Get the maxSize value. + * + * @return the maxSize value. + */ + public Long maxSize() { + return this.maxSize; + } + + /** + * Set the maxSize value. + * + * @param maxSize the maxSize value to set. + * @return the AppendPositionAccessConditions object itself. + */ + public AppendPositionAccessConditions withMaxSize(Long maxSize) { + this.maxSize = maxSize; + return this; + } + + /** + * Get the appendPosition value. + * + * @return the appendPosition value. + */ + public Long appendPosition() { + return this.appendPosition; + } + + /** + * Set the appendPosition value. + * + * @param appendPosition the appendPosition value to set. + * @return the AppendPositionAccessConditions object itself. + */ + public AppendPositionAccessConditions withAppendPosition(Long appendPosition) { + this.appendPosition = appendPosition; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ArchiveStatus.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ArchiveStatus.java new file mode 100644 index 0000000000000..3da5ba56ed35e --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ArchiveStatus.java @@ -0,0 +1,48 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.microsoft.rest.v2.ExpandableStringEnum; +import java.util.Collection; + +/** + * Defines values for ArchiveStatus. + */ +public final class ArchiveStatus extends ExpandableStringEnum { + /** + * Static value rehydrate-pending-to-hot for ArchiveStatus. + */ + public static final ArchiveStatus REHYDRATE_PENDING_TO_HOT = fromString("rehydrate-pending-to-hot"); + + /** + * Static value rehydrate-pending-to-cool for ArchiveStatus. + */ + public static final ArchiveStatus REHYDRATE_PENDING_TO_COOL = fromString("rehydrate-pending-to-cool"); + + /** + * Creates or finds a ArchiveStatus from its string representation. + * + * @param name a name to look for. + * @return the corresponding ArchiveStatus. + */ + @JsonCreator + public static ArchiveStatus fromString(String name) { + return fromString(name, ArchiveStatus.class); + } + + /** + * @return known ArchiveStatus values. + */ + public static Collection values() { + return values(ArchiveStatus.class); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobAbortCopyFromURLHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobAbortCopyFromURLHeaders.java new file mode 100644 index 0000000000000..a1380974f0a23 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobAbortCopyFromURLHeaders.java @@ -0,0 +1,112 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for AbortCopyFromURL operation. + */ +@JacksonXmlRootElement(localName = "Blob-AbortCopyFromURL-Headers") +public final class BlobAbortCopyFromURLHeaders { + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the BlobAbortCopyFromURLHeaders object itself. + */ + public BlobAbortCopyFromURLHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the BlobAbortCopyFromURLHeaders object itself. + */ + public BlobAbortCopyFromURLHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the BlobAbortCopyFromURLHeaders object itself. + */ + public BlobAbortCopyFromURLHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobAbortCopyFromURLResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobAbortCopyFromURLResponse.java new file mode 100644 index 0000000000000..09f57267623a7 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobAbortCopyFromURLResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the abortCopyFromURL operation. + */ +public final class BlobAbortCopyFromURLResponse extends RestResponse { + /** + * Creates an instance of BlobAbortCopyFromURLResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public BlobAbortCopyFromURLResponse(HttpRequest request, int statusCode, BlobAbortCopyFromURLHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public BlobAbortCopyFromURLHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobAcquireLeaseHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobAcquireLeaseHeaders.java new file mode 100644 index 0000000000000..b3fa9ebc3b991 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobAcquireLeaseHeaders.java @@ -0,0 +1,201 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for AcquireLease operation. + */ +@JacksonXmlRootElement(localName = "Blob-AcquireLease-Headers") +public final class BlobAcquireLeaseHeaders { + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * Returns the date and time the blob was last modified. Any operation that + * modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * Uniquely identifies a blobs's lease. + */ + @JsonProperty(value = "x-ms-lease-id") + private String leaseId; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the BlobAcquireLeaseHeaders object itself. + */ + public BlobAcquireLeaseHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the BlobAcquireLeaseHeaders object itself. + */ + public BlobAcquireLeaseHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the leaseId value. + * + * @return the leaseId value. + */ + public String leaseId() { + return this.leaseId; + } + + /** + * Set the leaseId value. + * + * @param leaseId the leaseId value to set. + * @return the BlobAcquireLeaseHeaders object itself. + */ + public BlobAcquireLeaseHeaders withLeaseId(String leaseId) { + this.leaseId = leaseId; + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the BlobAcquireLeaseHeaders object itself. + */ + public BlobAcquireLeaseHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the BlobAcquireLeaseHeaders object itself. + */ + public BlobAcquireLeaseHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the BlobAcquireLeaseHeaders object itself. + */ + public BlobAcquireLeaseHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobAcquireLeaseResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobAcquireLeaseResponse.java new file mode 100644 index 0000000000000..aa08a418244d6 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobAcquireLeaseResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the acquireLease operation. + */ +public final class BlobAcquireLeaseResponse extends RestResponse { + /** + * Creates an instance of BlobAcquireLeaseResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public BlobAcquireLeaseResponse(HttpRequest request, int statusCode, BlobAcquireLeaseHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public BlobAcquireLeaseHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobBreakLeaseHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobBreakLeaseHeaders.java new file mode 100644 index 0000000000000..0c0bbcec7f955 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobBreakLeaseHeaders.java @@ -0,0 +1,201 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for BreakLease operation. + */ +@JacksonXmlRootElement(localName = "Blob-BreakLease-Headers") +public final class BlobBreakLeaseHeaders { + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * Returns the date and time the blob was last modified. Any operation that + * modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * Approximate time remaining in the lease period, in seconds. + */ + @JsonProperty(value = "x-ms-lease-time") + private Integer leaseTime; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the BlobBreakLeaseHeaders object itself. + */ + public BlobBreakLeaseHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the BlobBreakLeaseHeaders object itself. + */ + public BlobBreakLeaseHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the leaseTime value. + * + * @return the leaseTime value. + */ + public Integer leaseTime() { + return this.leaseTime; + } + + /** + * Set the leaseTime value. + * + * @param leaseTime the leaseTime value to set. + * @return the BlobBreakLeaseHeaders object itself. + */ + public BlobBreakLeaseHeaders withLeaseTime(Integer leaseTime) { + this.leaseTime = leaseTime; + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the BlobBreakLeaseHeaders object itself. + */ + public BlobBreakLeaseHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the BlobBreakLeaseHeaders object itself. + */ + public BlobBreakLeaseHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the BlobBreakLeaseHeaders object itself. + */ + public BlobBreakLeaseHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobBreakLeaseResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobBreakLeaseResponse.java new file mode 100644 index 0000000000000..53c9e84960edb --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobBreakLeaseResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the breakLease operation. + */ +public final class BlobBreakLeaseResponse extends RestResponse { + /** + * Creates an instance of BlobBreakLeaseResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public BlobBreakLeaseResponse(HttpRequest request, int statusCode, BlobBreakLeaseHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public BlobBreakLeaseHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobChangeLeaseHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobChangeLeaseHeaders.java new file mode 100644 index 0000000000000..55fdf6cf92e34 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobChangeLeaseHeaders.java @@ -0,0 +1,201 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for ChangeLease operation. + */ +@JacksonXmlRootElement(localName = "Blob-ChangeLease-Headers") +public final class BlobChangeLeaseHeaders { + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * Returns the date and time the blob was last modified. Any operation that + * modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Uniquely identifies a blobs's lease. + */ + @JsonProperty(value = "x-ms-lease-id") + private String leaseId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the BlobChangeLeaseHeaders object itself. + */ + public BlobChangeLeaseHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the BlobChangeLeaseHeaders object itself. + */ + public BlobChangeLeaseHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the BlobChangeLeaseHeaders object itself. + */ + public BlobChangeLeaseHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the leaseId value. + * + * @return the leaseId value. + */ + public String leaseId() { + return this.leaseId; + } + + /** + * Set the leaseId value. + * + * @param leaseId the leaseId value to set. + * @return the BlobChangeLeaseHeaders object itself. + */ + public BlobChangeLeaseHeaders withLeaseId(String leaseId) { + this.leaseId = leaseId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the BlobChangeLeaseHeaders object itself. + */ + public BlobChangeLeaseHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the BlobChangeLeaseHeaders object itself. + */ + public BlobChangeLeaseHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobChangeLeaseResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobChangeLeaseResponse.java new file mode 100644 index 0000000000000..91ce29d40c36a --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobChangeLeaseResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the changeLease operation. + */ +public final class BlobChangeLeaseResponse extends RestResponse { + /** + * Creates an instance of BlobChangeLeaseResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public BlobChangeLeaseResponse(HttpRequest request, int statusCode, BlobChangeLeaseHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public BlobChangeLeaseHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobCopyFromURLHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobCopyFromURLHeaders.java new file mode 100644 index 0000000000000..94c4adbcd5a92 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobCopyFromURLHeaders.java @@ -0,0 +1,228 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for CopyFromURL operation. + */ +@JacksonXmlRootElement(localName = "Blob-CopyFromURL-Headers") +public final class BlobCopyFromURLHeaders { + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * Returns the date and time the container was last modified. Any operation + * that modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * String identifier for this copy operation. + */ + @JsonProperty(value = "x-ms-copy-id") + private String copyId; + + /** + * State of the copy operation identified by x-ms-copy-id. Possible values + * include: 'success'. + */ + @JsonProperty(value = "x-ms-copy-status") + private SyncCopyStatusType copyStatus; + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the BlobCopyFromURLHeaders object itself. + */ + public BlobCopyFromURLHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the BlobCopyFromURLHeaders object itself. + */ + public BlobCopyFromURLHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the BlobCopyFromURLHeaders object itself. + */ + public BlobCopyFromURLHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the BlobCopyFromURLHeaders object itself. + */ + public BlobCopyFromURLHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the BlobCopyFromURLHeaders object itself. + */ + public BlobCopyFromURLHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } + + /** + * Get the copyId value. + * + * @return the copyId value. + */ + public String copyId() { + return this.copyId; + } + + /** + * Set the copyId value. + * + * @param copyId the copyId value to set. + * @return the BlobCopyFromURLHeaders object itself. + */ + public BlobCopyFromURLHeaders withCopyId(String copyId) { + this.copyId = copyId; + return this; + } + + /** + * Get the copyStatus value. + * + * @return the copyStatus value. + */ + public SyncCopyStatusType copyStatus() { + return this.copyStatus; + } + + /** + * Set the copyStatus value. + * + * @param copyStatus the copyStatus value to set. + * @return the BlobCopyFromURLHeaders object itself. + */ + public BlobCopyFromURLHeaders withCopyStatus(SyncCopyStatusType copyStatus) { + this.copyStatus = copyStatus; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobCopyFromURLResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobCopyFromURLResponse.java new file mode 100644 index 0000000000000..6f9be366745ed --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobCopyFromURLResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the copyFromURL operation. + */ +public final class BlobCopyFromURLResponse extends RestResponse { + /** + * Creates an instance of BlobCopyFromURLResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public BlobCopyFromURLResponse(HttpRequest request, int statusCode, BlobCopyFromURLHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public BlobCopyFromURLHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobCreateSnapshotHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobCreateSnapshotHeaders.java new file mode 100644 index 0000000000000..10a8abc8463b1 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobCreateSnapshotHeaders.java @@ -0,0 +1,202 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for CreateSnapshot operation. + */ +@JacksonXmlRootElement(localName = "Blob-CreateSnapshot-Headers") +public final class BlobCreateSnapshotHeaders { + /** + * Uniquely identifies the snapshot and indicates the snapshot version. It + * may be used in subsequent requests to access the snapshot. + */ + @JsonProperty(value = "x-ms-snapshot") + private String snapshot; + + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * Returns the date and time the container was last modified. Any operation + * that modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Get the snapshot value. + * + * @return the snapshot value. + */ + public String snapshot() { + return this.snapshot; + } + + /** + * Set the snapshot value. + * + * @param snapshot the snapshot value to set. + * @return the BlobCreateSnapshotHeaders object itself. + */ + public BlobCreateSnapshotHeaders withSnapshot(String snapshot) { + this.snapshot = snapshot; + return this; + } + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the BlobCreateSnapshotHeaders object itself. + */ + public BlobCreateSnapshotHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the BlobCreateSnapshotHeaders object itself. + */ + public BlobCreateSnapshotHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the BlobCreateSnapshotHeaders object itself. + */ + public BlobCreateSnapshotHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the BlobCreateSnapshotHeaders object itself. + */ + public BlobCreateSnapshotHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the BlobCreateSnapshotHeaders object itself. + */ + public BlobCreateSnapshotHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobCreateSnapshotResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobCreateSnapshotResponse.java new file mode 100644 index 0000000000000..d4e3ece3cc7d6 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobCreateSnapshotResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the createSnapshot operation. + */ +public final class BlobCreateSnapshotResponse extends RestResponse { + /** + * Creates an instance of BlobCreateSnapshotResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public BlobCreateSnapshotResponse(HttpRequest request, int statusCode, BlobCreateSnapshotHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public BlobCreateSnapshotHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobDeleteHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobDeleteHeaders.java new file mode 100644 index 0000000000000..b0af90dc32eb0 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobDeleteHeaders.java @@ -0,0 +1,112 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for Delete operation. + */ +@JacksonXmlRootElement(localName = "Blob-Delete-Headers") +public final class BlobDeleteHeaders { + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the BlobDeleteHeaders object itself. + */ + public BlobDeleteHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the BlobDeleteHeaders object itself. + */ + public BlobDeleteHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the BlobDeleteHeaders object itself. + */ + public BlobDeleteHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobDeleteResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobDeleteResponse.java new file mode 100644 index 0000000000000..dd452f42b26d3 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobDeleteResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the delete operation. + */ +public final class BlobDeleteResponse extends RestResponse { + /** + * Creates an instance of BlobDeleteResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public BlobDeleteResponse(HttpRequest request, int statusCode, BlobDeleteHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public BlobDeleteHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobDownloadHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobDownloadHeaders.java new file mode 100644 index 0000000000000..9f3c69a29b906 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobDownloadHeaders.java @@ -0,0 +1,857 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import com.microsoft.rest.v2.annotations.HeaderCollection; +import java.time.OffsetDateTime; +import java.util.Map; + +/** + * Defines headers for Download operation. + */ +@JacksonXmlRootElement(localName = "Blob-Download-Headers") +public final class BlobDownloadHeaders { + /** + * Returns the date and time the container was last modified. Any operation + * that modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * The metadata property. + */ + @HeaderCollection("x-ms-meta-") + private Map metadata; + + /** + * The number of bytes present in the response body. + */ + @JsonProperty(value = "Content-Length") + private Long contentLength; + + /** + * The media type of the body of the response. For Download Blob this is + * 'application/octet-stream'. + */ + @JsonProperty(value = "Content-Type") + private String contentType; + + /** + * Indicates the range of bytes returned in the event that the client + * requested a subset of the blob by setting the 'Range' request header. + */ + @JsonProperty(value = "Content-Range") + private String contentRange; + + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * If the blob has an MD5 hash and this operation is to read the full blob, + * this response header is returned so that the client can check for + * message content integrity. + */ + @JsonProperty(value = "Content-MD5") + private byte[] contentMD5; + + /** + * This header returns the value that was specified for the + * Content-Encoding request header. + */ + @JsonProperty(value = "Content-Encoding") + private String contentEncoding; + + /** + * This header is returned if it was previously specified for the blob. + */ + @JsonProperty(value = "Cache-Control") + private String cacheControl; + + /** + * This header returns the value that was specified for the + * 'x-ms-blob-content-disposition' header. The Content-Disposition response + * header field conveys additional information about how to process the + * response payload, and also can be used to attach additional metadata. + * For example, if set to attachment, it indicates that the user-agent + * should not display the response, but instead show a Save As dialog with + * a filename other than the blob name specified. + */ + @JsonProperty(value = "Content-Disposition") + private String contentDisposition; + + /** + * This header returns the value that was specified for the + * Content-Language request header. + */ + @JsonProperty(value = "Content-Language") + private String contentLanguage; + + /** + * The current sequence number for a page blob. This header is not returned + * for block blobs or append blobs. + */ + @JsonProperty(value = "x-ms-blob-sequence-number") + private Long blobSequenceNumber; + + /** + * The blob's type. Possible values include: 'BlockBlob', 'PageBlob', + * 'AppendBlob'. + */ + @JsonProperty(value = "x-ms-blob-type") + private BlobType blobType; + + /** + * Conclusion time of the last attempted Copy Blob operation where this + * blob was the destination blob. This value can specify the time of a + * completed, aborted, or failed copy attempt. This header does not appear + * if a copy is pending, if this blob has never been the destination in a + * Copy Blob operation, or if this blob has been modified after a concluded + * Copy Blob operation using Set Blob Properties, Put Blob, or Put Block + * List. + */ + @JsonProperty(value = "x-ms-copy-completion-time") + private DateTimeRfc1123 copyCompletionTime; + + /** + * Only appears when x-ms-copy-status is failed or pending. Describes the + * cause of the last fatal or non-fatal copy operation failure. This header + * does not appear if this blob has never been the destination in a Copy + * Blob operation, or if this blob has been modified after a concluded Copy + * Blob operation using Set Blob Properties, Put Blob, or Put Block List. + */ + @JsonProperty(value = "x-ms-copy-status-description") + private String copyStatusDescription; + + /** + * String identifier for this copy operation. Use with Get Blob Properties + * to check the status of this copy operation, or pass to Abort Copy Blob + * to abort a pending copy. + */ + @JsonProperty(value = "x-ms-copy-id") + private String copyId; + + /** + * Contains the number of bytes copied and the total bytes in the source in + * the last attempted Copy Blob operation where this blob was the + * destination blob. Can show between 0 and Content-Length bytes copied. + * This header does not appear if this blob has never been the destination + * in a Copy Blob operation, or if this blob has been modified after a + * concluded Copy Blob operation using Set Blob Properties, Put Blob, or + * Put Block List. + */ + @JsonProperty(value = "x-ms-copy-progress") + private String copyProgress; + + /** + * URL up to 2 KB in length that specifies the source blob or file used in + * the last attempted Copy Blob operation where this blob was the + * destination blob. This header does not appear if this blob has never + * been the destination in a Copy Blob operation, or if this blob has been + * modified after a concluded Copy Blob operation using Set Blob + * Properties, Put Blob, or Put Block List. + */ + @JsonProperty(value = "x-ms-copy-source") + private String copySource; + + /** + * State of the copy operation identified by x-ms-copy-id. Possible values + * include: 'pending', 'success', 'aborted', 'failed'. + */ + @JsonProperty(value = "x-ms-copy-status") + private CopyStatusType copyStatus; + + /** + * When a blob is leased, specifies whether the lease is of infinite or + * fixed duration. Possible values include: 'infinite', 'fixed'. + */ + @JsonProperty(value = "x-ms-lease-duration") + private LeaseDurationType leaseDuration; + + /** + * Lease state of the blob. Possible values include: 'available', 'leased', + * 'expired', 'breaking', 'broken'. + */ + @JsonProperty(value = "x-ms-lease-state") + private LeaseStateType leaseState; + + /** + * The current lease status of the blob. Possible values include: 'locked', + * 'unlocked'. + */ + @JsonProperty(value = "x-ms-lease-status") + private LeaseStatusType leaseStatus; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * Indicates that the service supports requests for partial blob content. + */ + @JsonProperty(value = "Accept-Ranges") + private String acceptRanges; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * The number of committed blocks present in the blob. This header is + * returned only for append blobs. + */ + @JsonProperty(value = "x-ms-blob-committed-block-count") + private Integer blobCommittedBlockCount; + + /** + * The value of this header is set to true if the blob data and application + * metadata are completely encrypted using the specified algorithm. + * Otherwise, the value is set to false (when the blob is unencrypted, or + * if only parts of the blob/application metadata are encrypted). + */ + @JsonProperty(value = "x-ms-server-encrypted") + private Boolean isServerEncrypted; + + /** + * If the blob has a MD5 hash, and if request contains range header (Range + * or x-ms-range), this response header is returned with the value of the + * whole blob's MD5 value. This value may or may not be equal to the value + * returned in Content-MD5 header, with the latter calculated from the + * requested range. + */ + @JsonProperty(value = "x-ms-blob-content-md5") + private byte[] blobContentMD5; + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the BlobDownloadHeaders object itself. + */ + public BlobDownloadHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the metadata value. + * + * @return the metadata value. + */ + public Map metadata() { + return this.metadata; + } + + /** + * Set the metadata value. + * + * @param metadata the metadata value to set. + * @return the BlobDownloadHeaders object itself. + */ + public BlobDownloadHeaders withMetadata(Map metadata) { + this.metadata = metadata; + return this; + } + + /** + * Get the contentLength value. + * + * @return the contentLength value. + */ + public Long contentLength() { + return this.contentLength; + } + + /** + * Set the contentLength value. + * + * @param contentLength the contentLength value to set. + * @return the BlobDownloadHeaders object itself. + */ + public BlobDownloadHeaders withContentLength(Long contentLength) { + this.contentLength = contentLength; + return this; + } + + /** + * Get the contentType value. + * + * @return the contentType value. + */ + public String contentType() { + return this.contentType; + } + + /** + * Set the contentType value. + * + * @param contentType the contentType value to set. + * @return the BlobDownloadHeaders object itself. + */ + public BlobDownloadHeaders withContentType(String contentType) { + this.contentType = contentType; + return this; + } + + /** + * Get the contentRange value. + * + * @return the contentRange value. + */ + public String contentRange() { + return this.contentRange; + } + + /** + * Set the contentRange value. + * + * @param contentRange the contentRange value to set. + * @return the BlobDownloadHeaders object itself. + */ + public BlobDownloadHeaders withContentRange(String contentRange) { + this.contentRange = contentRange; + return this; + } + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the BlobDownloadHeaders object itself. + */ + public BlobDownloadHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the contentMD5 value. + * + * @return the contentMD5 value. + */ + public byte[] contentMD5() { + return this.contentMD5; + } + + /** + * Set the contentMD5 value. + * + * @param contentMD5 the contentMD5 value to set. + * @return the BlobDownloadHeaders object itself. + */ + public BlobDownloadHeaders withContentMD5(byte[] contentMD5) { + this.contentMD5 = contentMD5; + return this; + } + + /** + * Get the contentEncoding value. + * + * @return the contentEncoding value. + */ + public String contentEncoding() { + return this.contentEncoding; + } + + /** + * Set the contentEncoding value. + * + * @param contentEncoding the contentEncoding value to set. + * @return the BlobDownloadHeaders object itself. + */ + public BlobDownloadHeaders withContentEncoding(String contentEncoding) { + this.contentEncoding = contentEncoding; + return this; + } + + /** + * Get the cacheControl value. + * + * @return the cacheControl value. + */ + public String cacheControl() { + return this.cacheControl; + } + + /** + * Set the cacheControl value. + * + * @param cacheControl the cacheControl value to set. + * @return the BlobDownloadHeaders object itself. + */ + public BlobDownloadHeaders withCacheControl(String cacheControl) { + this.cacheControl = cacheControl; + return this; + } + + /** + * Get the contentDisposition value. + * + * @return the contentDisposition value. + */ + public String contentDisposition() { + return this.contentDisposition; + } + + /** + * Set the contentDisposition value. + * + * @param contentDisposition the contentDisposition value to set. + * @return the BlobDownloadHeaders object itself. + */ + public BlobDownloadHeaders withContentDisposition(String contentDisposition) { + this.contentDisposition = contentDisposition; + return this; + } + + /** + * Get the contentLanguage value. + * + * @return the contentLanguage value. + */ + public String contentLanguage() { + return this.contentLanguage; + } + + /** + * Set the contentLanguage value. + * + * @param contentLanguage the contentLanguage value to set. + * @return the BlobDownloadHeaders object itself. + */ + public BlobDownloadHeaders withContentLanguage(String contentLanguage) { + this.contentLanguage = contentLanguage; + return this; + } + + /** + * Get the blobSequenceNumber value. + * + * @return the blobSequenceNumber value. + */ + public Long blobSequenceNumber() { + return this.blobSequenceNumber; + } + + /** + * Set the blobSequenceNumber value. + * + * @param blobSequenceNumber the blobSequenceNumber value to set. + * @return the BlobDownloadHeaders object itself. + */ + public BlobDownloadHeaders withBlobSequenceNumber(Long blobSequenceNumber) { + this.blobSequenceNumber = blobSequenceNumber; + return this; + } + + /** + * Get the blobType value. + * + * @return the blobType value. + */ + public BlobType blobType() { + return this.blobType; + } + + /** + * Set the blobType value. + * + * @param blobType the blobType value to set. + * @return the BlobDownloadHeaders object itself. + */ + public BlobDownloadHeaders withBlobType(BlobType blobType) { + this.blobType = blobType; + return this; + } + + /** + * Get the copyCompletionTime value. + * + * @return the copyCompletionTime value. + */ + public OffsetDateTime copyCompletionTime() { + if (this.copyCompletionTime == null) { + return null; + } + return this.copyCompletionTime.dateTime(); + } + + /** + * Set the copyCompletionTime value. + * + * @param copyCompletionTime the copyCompletionTime value to set. + * @return the BlobDownloadHeaders object itself. + */ + public BlobDownloadHeaders withCopyCompletionTime(OffsetDateTime copyCompletionTime) { + if (copyCompletionTime == null) { + this.copyCompletionTime = null; + } else { + this.copyCompletionTime = new DateTimeRfc1123(copyCompletionTime); + } + return this; + } + + /** + * Get the copyStatusDescription value. + * + * @return the copyStatusDescription value. + */ + public String copyStatusDescription() { + return this.copyStatusDescription; + } + + /** + * Set the copyStatusDescription value. + * + * @param copyStatusDescription the copyStatusDescription value to set. + * @return the BlobDownloadHeaders object itself. + */ + public BlobDownloadHeaders withCopyStatusDescription(String copyStatusDescription) { + this.copyStatusDescription = copyStatusDescription; + return this; + } + + /** + * Get the copyId value. + * + * @return the copyId value. + */ + public String copyId() { + return this.copyId; + } + + /** + * Set the copyId value. + * + * @param copyId the copyId value to set. + * @return the BlobDownloadHeaders object itself. + */ + public BlobDownloadHeaders withCopyId(String copyId) { + this.copyId = copyId; + return this; + } + + /** + * Get the copyProgress value. + * + * @return the copyProgress value. + */ + public String copyProgress() { + return this.copyProgress; + } + + /** + * Set the copyProgress value. + * + * @param copyProgress the copyProgress value to set. + * @return the BlobDownloadHeaders object itself. + */ + public BlobDownloadHeaders withCopyProgress(String copyProgress) { + this.copyProgress = copyProgress; + return this; + } + + /** + * Get the copySource value. + * + * @return the copySource value. + */ + public String copySource() { + return this.copySource; + } + + /** + * Set the copySource value. + * + * @param copySource the copySource value to set. + * @return the BlobDownloadHeaders object itself. + */ + public BlobDownloadHeaders withCopySource(String copySource) { + this.copySource = copySource; + return this; + } + + /** + * Get the copyStatus value. + * + * @return the copyStatus value. + */ + public CopyStatusType copyStatus() { + return this.copyStatus; + } + + /** + * Set the copyStatus value. + * + * @param copyStatus the copyStatus value to set. + * @return the BlobDownloadHeaders object itself. + */ + public BlobDownloadHeaders withCopyStatus(CopyStatusType copyStatus) { + this.copyStatus = copyStatus; + return this; + } + + /** + * Get the leaseDuration value. + * + * @return the leaseDuration value. + */ + public LeaseDurationType leaseDuration() { + return this.leaseDuration; + } + + /** + * Set the leaseDuration value. + * + * @param leaseDuration the leaseDuration value to set. + * @return the BlobDownloadHeaders object itself. + */ + public BlobDownloadHeaders withLeaseDuration(LeaseDurationType leaseDuration) { + this.leaseDuration = leaseDuration; + return this; + } + + /** + * Get the leaseState value. + * + * @return the leaseState value. + */ + public LeaseStateType leaseState() { + return this.leaseState; + } + + /** + * Set the leaseState value. + * + * @param leaseState the leaseState value to set. + * @return the BlobDownloadHeaders object itself. + */ + public BlobDownloadHeaders withLeaseState(LeaseStateType leaseState) { + this.leaseState = leaseState; + return this; + } + + /** + * Get the leaseStatus value. + * + * @return the leaseStatus value. + */ + public LeaseStatusType leaseStatus() { + return this.leaseStatus; + } + + /** + * Set the leaseStatus value. + * + * @param leaseStatus the leaseStatus value to set. + * @return the BlobDownloadHeaders object itself. + */ + public BlobDownloadHeaders withLeaseStatus(LeaseStatusType leaseStatus) { + this.leaseStatus = leaseStatus; + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the BlobDownloadHeaders object itself. + */ + public BlobDownloadHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the BlobDownloadHeaders object itself. + */ + public BlobDownloadHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the acceptRanges value. + * + * @return the acceptRanges value. + */ + public String acceptRanges() { + return this.acceptRanges; + } + + /** + * Set the acceptRanges value. + * + * @param acceptRanges the acceptRanges value to set. + * @return the BlobDownloadHeaders object itself. + */ + public BlobDownloadHeaders withAcceptRanges(String acceptRanges) { + this.acceptRanges = acceptRanges; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the BlobDownloadHeaders object itself. + */ + public BlobDownloadHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } + + /** + * Get the blobCommittedBlockCount value. + * + * @return the blobCommittedBlockCount value. + */ + public Integer blobCommittedBlockCount() { + return this.blobCommittedBlockCount; + } + + /** + * Set the blobCommittedBlockCount value. + * + * @param blobCommittedBlockCount the blobCommittedBlockCount value to set. + * @return the BlobDownloadHeaders object itself. + */ + public BlobDownloadHeaders withBlobCommittedBlockCount(Integer blobCommittedBlockCount) { + this.blobCommittedBlockCount = blobCommittedBlockCount; + return this; + } + + /** + * Get the isServerEncrypted value. + * + * @return the isServerEncrypted value. + */ + public Boolean isServerEncrypted() { + return this.isServerEncrypted; + } + + /** + * Set the isServerEncrypted value. + * + * @param isServerEncrypted the isServerEncrypted value to set. + * @return the BlobDownloadHeaders object itself. + */ + public BlobDownloadHeaders withIsServerEncrypted(Boolean isServerEncrypted) { + this.isServerEncrypted = isServerEncrypted; + return this; + } + + /** + * Get the blobContentMD5 value. + * + * @return the blobContentMD5 value. + */ + public byte[] blobContentMD5() { + return this.blobContentMD5; + } + + /** + * Set the blobContentMD5 value. + * + * @param blobContentMD5 the blobContentMD5 value to set. + * @return the BlobDownloadHeaders object itself. + */ + public BlobDownloadHeaders withBlobContentMD5(byte[] blobContentMD5) { + this.blobContentMD5 = blobContentMD5; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobDownloadResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobDownloadResponse.java new file mode 100644 index 0000000000000..227b178df4202 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobDownloadResponse.java @@ -0,0 +1,61 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import io.reactivex.Flowable; +import io.reactivex.internal.functions.Functions; +import java.io.Closeable; +import java.nio.ByteBuffer; +import java.util.Map; + +/** + * Contains all response data for the download operation. + */ +public final class BlobDownloadResponse extends RestResponse> implements Closeable { + /** + * Creates an instance of BlobDownloadResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the body content stream. + */ + public BlobDownloadResponse(HttpRequest request, int statusCode, BlobDownloadHeaders headers, Map rawHeaders, Flowable body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public BlobDownloadHeaders headers() { + return super.headers(); + } + + /** + * @return the response content stream. + */ + @Override + public Flowable body() { + return super.body(); + } + + /** + * Disposes of the connection associated with this stream response. + */ + @Override + public void close() { + body().subscribe(Functions.emptyConsumer(), Functions.emptyConsumer()).dispose(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobFlatListSegment.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobFlatListSegment.java new file mode 100644 index 0000000000000..8aa2b7bf370a2 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobFlatListSegment.java @@ -0,0 +1,49 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import java.util.ArrayList; +import java.util.List; + +/** + * The BlobFlatListSegment model. + */ +@JacksonXmlRootElement(localName = "Blobs") +public final class BlobFlatListSegment { + /** + * The blobItems property. + */ + @JsonProperty("Blob") + private List blobItems = new ArrayList<>(); + + /** + * Get the blobItems value. + * + * @return the blobItems value. + */ + public List blobItems() { + return this.blobItems; + } + + /** + * Set the blobItems value. + * + * @param blobItems the blobItems value to set. + * @return the BlobFlatListSegment object itself. + */ + public BlobFlatListSegment withBlobItems(List blobItems) { + this.blobItems = blobItems; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobGetAccountInfoHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobGetAccountInfoHeaders.java new file mode 100644 index 0000000000000..87da30dcb6dd8 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobGetAccountInfoHeaders.java @@ -0,0 +1,167 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for GetAccountInfo operation. + */ +@JacksonXmlRootElement(localName = "Blob-GetAccountInfo-Headers") +public final class BlobGetAccountInfoHeaders { + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Identifies the sku name of the account. Possible values include: + * 'Standard_LRS', 'Standard_GRS', 'Standard_RAGRS', 'Standard_ZRS', + * 'Premium_LRS'. + */ + @JsonProperty(value = "x-ms-sku-name") + private SkuName skuName; + + /** + * Identifies the account kind. Possible values include: 'Storage', + * 'BlobStorage', 'StorageV2'. + */ + @JsonProperty(value = "x-ms-account-kind") + private AccountKind accountKind; + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the BlobGetAccountInfoHeaders object itself. + */ + public BlobGetAccountInfoHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the BlobGetAccountInfoHeaders object itself. + */ + public BlobGetAccountInfoHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the BlobGetAccountInfoHeaders object itself. + */ + public BlobGetAccountInfoHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } + + /** + * Get the skuName value. + * + * @return the skuName value. + */ + public SkuName skuName() { + return this.skuName; + } + + /** + * Set the skuName value. + * + * @param skuName the skuName value to set. + * @return the BlobGetAccountInfoHeaders object itself. + */ + public BlobGetAccountInfoHeaders withSkuName(SkuName skuName) { + this.skuName = skuName; + return this; + } + + /** + * Get the accountKind value. + * + * @return the accountKind value. + */ + public AccountKind accountKind() { + return this.accountKind; + } + + /** + * Set the accountKind value. + * + * @param accountKind the accountKind value to set. + * @return the BlobGetAccountInfoHeaders object itself. + */ + public BlobGetAccountInfoHeaders withAccountKind(AccountKind accountKind) { + this.accountKind = accountKind; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobGetAccountInfoResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobGetAccountInfoResponse.java new file mode 100644 index 0000000000000..d774469ca0547 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobGetAccountInfoResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the getAccountInfo operation. + */ +public final class BlobGetAccountInfoResponse extends RestResponse { + /** + * Creates an instance of BlobGetAccountInfoResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public BlobGetAccountInfoResponse(HttpRequest request, int statusCode, BlobGetAccountInfoHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public BlobGetAccountInfoHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobGetPropertiesHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobGetPropertiesHeaders.java new file mode 100644 index 0000000000000..d787264eddbec --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobGetPropertiesHeaders.java @@ -0,0 +1,1008 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import com.microsoft.rest.v2.annotations.HeaderCollection; +import java.time.OffsetDateTime; +import java.util.Map; + +/** + * Defines headers for GetProperties operation. + */ +@JacksonXmlRootElement(localName = "Blob-GetProperties-Headers") +public final class BlobGetPropertiesHeaders { + /** + * Returns the date and time the blob was last modified. Any operation that + * modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * Returns the date and time the blob was created. + */ + @JsonProperty(value = "x-ms-creation-time") + private DateTimeRfc1123 creationTime; + + /** + * The metadata property. + */ + @HeaderCollection("x-ms-meta-") + private Map metadata; + + /** + * The blob's type. Possible values include: 'BlockBlob', 'PageBlob', + * 'AppendBlob'. + */ + @JsonProperty(value = "x-ms-blob-type") + private BlobType blobType; + + /** + * Conclusion time of the last attempted Copy Blob operation where this + * blob was the destination blob. This value can specify the time of a + * completed, aborted, or failed copy attempt. This header does not appear + * if a copy is pending, if this blob has never been the destination in a + * Copy Blob operation, or if this blob has been modified after a concluded + * Copy Blob operation using Set Blob Properties, Put Blob, or Put Block + * List. + */ + @JsonProperty(value = "x-ms-copy-completion-time") + private DateTimeRfc1123 copyCompletionTime; + + /** + * Only appears when x-ms-copy-status is failed or pending. Describes the + * cause of the last fatal or non-fatal copy operation failure. This header + * does not appear if this blob has never been the destination in a Copy + * Blob operation, or if this blob has been modified after a concluded Copy + * Blob operation using Set Blob Properties, Put Blob, or Put Block List. + */ + @JsonProperty(value = "x-ms-copy-status-description") + private String copyStatusDescription; + + /** + * String identifier for this copy operation. Use with Get Blob Properties + * to check the status of this copy operation, or pass to Abort Copy Blob + * to abort a pending copy. + */ + @JsonProperty(value = "x-ms-copy-id") + private String copyId; + + /** + * Contains the number of bytes copied and the total bytes in the source in + * the last attempted Copy Blob operation where this blob was the + * destination blob. Can show between 0 and Content-Length bytes copied. + * This header does not appear if this blob has never been the destination + * in a Copy Blob operation, or if this blob has been modified after a + * concluded Copy Blob operation using Set Blob Properties, Put Blob, or + * Put Block List. + */ + @JsonProperty(value = "x-ms-copy-progress") + private String copyProgress; + + /** + * URL up to 2 KB in length that specifies the source blob or file used in + * the last attempted Copy Blob operation where this blob was the + * destination blob. This header does not appear if this blob has never + * been the destination in a Copy Blob operation, or if this blob has been + * modified after a concluded Copy Blob operation using Set Blob + * Properties, Put Blob, or Put Block List. + */ + @JsonProperty(value = "x-ms-copy-source") + private String copySource; + + /** + * State of the copy operation identified by x-ms-copy-id. Possible values + * include: 'pending', 'success', 'aborted', 'failed'. + */ + @JsonProperty(value = "x-ms-copy-status") + private CopyStatusType copyStatus; + + /** + * Included if the blob is incremental copy blob. + */ + @JsonProperty(value = "x-ms-incremental-copy") + private Boolean isIncrementalCopy; + + /** + * Included if the blob is incremental copy blob or incremental copy + * snapshot, if x-ms-copy-status is success. Snapshot time of the last + * successful incremental copy snapshot for this blob. + */ + @JsonProperty(value = "x-ms-copy-destination-snapshot") + private String destinationSnapshot; + + /** + * When a blob is leased, specifies whether the lease is of infinite or + * fixed duration. Possible values include: 'infinite', 'fixed'. + */ + @JsonProperty(value = "x-ms-lease-duration") + private LeaseDurationType leaseDuration; + + /** + * Lease state of the blob. Possible values include: 'available', 'leased', + * 'expired', 'breaking', 'broken'. + */ + @JsonProperty(value = "x-ms-lease-state") + private LeaseStateType leaseState; + + /** + * The current lease status of the blob. Possible values include: 'locked', + * 'unlocked'. + */ + @JsonProperty(value = "x-ms-lease-status") + private LeaseStatusType leaseStatus; + + /** + * The number of bytes present in the response body. + */ + @JsonProperty(value = "Content-Length") + private Long contentLength; + + /** + * The content type specified for the blob. The default content type is + * 'application/octet-stream'. + */ + @JsonProperty(value = "Content-Type") + private String contentType; + + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * If the blob has an MD5 hash and this operation is to read the full blob, + * this response header is returned so that the client can check for + * message content integrity. + */ + @JsonProperty(value = "Content-MD5") + private byte[] contentMD5; + + /** + * This header returns the value that was specified for the + * Content-Encoding request header. + */ + @JsonProperty(value = "Content-Encoding") + private String contentEncoding; + + /** + * This header returns the value that was specified for the + * 'x-ms-blob-content-disposition' header. The Content-Disposition response + * header field conveys additional information about how to process the + * response payload, and also can be used to attach additional metadata. + * For example, if set to attachment, it indicates that the user-agent + * should not display the response, but instead show a Save As dialog with + * a filename other than the blob name specified. + */ + @JsonProperty(value = "Content-Disposition") + private String contentDisposition; + + /** + * This header returns the value that was specified for the + * Content-Language request header. + */ + @JsonProperty(value = "Content-Language") + private String contentLanguage; + + /** + * This header is returned if it was previously specified for the blob. + */ + @JsonProperty(value = "Cache-Control") + private String cacheControl; + + /** + * The current sequence number for a page blob. This header is not returned + * for block blobs or append blobs. + */ + @JsonProperty(value = "x-ms-blob-sequence-number") + private Long blobSequenceNumber; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Indicates that the service supports requests for partial blob content. + */ + @JsonProperty(value = "Accept-Ranges") + private String acceptRanges; + + /** + * The number of committed blocks present in the blob. This header is + * returned only for append blobs. + */ + @JsonProperty(value = "x-ms-blob-committed-block-count") + private Integer blobCommittedBlockCount; + + /** + * The value of this header is set to true if the blob data and application + * metadata are completely encrypted using the specified algorithm. + * Otherwise, the value is set to false (when the blob is unencrypted, or + * if only parts of the blob/application metadata are encrypted). + */ + @JsonProperty(value = "x-ms-server-encrypted") + private Boolean isServerEncrypted; + + /** + * The tier of page blob on a premium storage account or tier of block blob + * on blob storage LRS accounts. For a list of allowed premium page blob + * tiers, see + * https://docs.microsoft.com/en-us/azure/virtual-machines/windows/premium-storage#features. + * For blob storage LRS accounts, valid values are Hot/Cool/Archive. + */ + @JsonProperty(value = "x-ms-access-tier") + private String accessTier; + + /** + * For page blobs on a premium storage account only. If the access tier is + * not explicitly set on the blob, the tier is inferred based on its + * content length and this header will be returned with true value. + */ + @JsonProperty(value = "x-ms-access-tier-inferred") + private Boolean accessTierInferred; + + /** + * For blob storage LRS accounts, valid values are + * rehydrate-pending-to-hot/rehydrate-pending-to-cool. If the blob is being + * rehydrated and is not complete then this header is returned indicating + * that rehydrate is pending and also tells the destination tier. + */ + @JsonProperty(value = "x-ms-archive-status") + private String archiveStatus; + + /** + * The time the tier was changed on the object. This is only returned if + * the tier on the block blob was ever set. + */ + @JsonProperty(value = "x-ms-access-tier-change-time") + private DateTimeRfc1123 accessTierChangeTime; + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the creationTime value. + * + * @return the creationTime value. + */ + public OffsetDateTime creationTime() { + if (this.creationTime == null) { + return null; + } + return this.creationTime.dateTime(); + } + + /** + * Set the creationTime value. + * + * @param creationTime the creationTime value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withCreationTime(OffsetDateTime creationTime) { + if (creationTime == null) { + this.creationTime = null; + } else { + this.creationTime = new DateTimeRfc1123(creationTime); + } + return this; + } + + /** + * Get the metadata value. + * + * @return the metadata value. + */ + public Map metadata() { + return this.metadata; + } + + /** + * Set the metadata value. + * + * @param metadata the metadata value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withMetadata(Map metadata) { + this.metadata = metadata; + return this; + } + + /** + * Get the blobType value. + * + * @return the blobType value. + */ + public BlobType blobType() { + return this.blobType; + } + + /** + * Set the blobType value. + * + * @param blobType the blobType value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withBlobType(BlobType blobType) { + this.blobType = blobType; + return this; + } + + /** + * Get the copyCompletionTime value. + * + * @return the copyCompletionTime value. + */ + public OffsetDateTime copyCompletionTime() { + if (this.copyCompletionTime == null) { + return null; + } + return this.copyCompletionTime.dateTime(); + } + + /** + * Set the copyCompletionTime value. + * + * @param copyCompletionTime the copyCompletionTime value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withCopyCompletionTime(OffsetDateTime copyCompletionTime) { + if (copyCompletionTime == null) { + this.copyCompletionTime = null; + } else { + this.copyCompletionTime = new DateTimeRfc1123(copyCompletionTime); + } + return this; + } + + /** + * Get the copyStatusDescription value. + * + * @return the copyStatusDescription value. + */ + public String copyStatusDescription() { + return this.copyStatusDescription; + } + + /** + * Set the copyStatusDescription value. + * + * @param copyStatusDescription the copyStatusDescription value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withCopyStatusDescription(String copyStatusDescription) { + this.copyStatusDescription = copyStatusDescription; + return this; + } + + /** + * Get the copyId value. + * + * @return the copyId value. + */ + public String copyId() { + return this.copyId; + } + + /** + * Set the copyId value. + * + * @param copyId the copyId value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withCopyId(String copyId) { + this.copyId = copyId; + return this; + } + + /** + * Get the copyProgress value. + * + * @return the copyProgress value. + */ + public String copyProgress() { + return this.copyProgress; + } + + /** + * Set the copyProgress value. + * + * @param copyProgress the copyProgress value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withCopyProgress(String copyProgress) { + this.copyProgress = copyProgress; + return this; + } + + /** + * Get the copySource value. + * + * @return the copySource value. + */ + public String copySource() { + return this.copySource; + } + + /** + * Set the copySource value. + * + * @param copySource the copySource value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withCopySource(String copySource) { + this.copySource = copySource; + return this; + } + + /** + * Get the copyStatus value. + * + * @return the copyStatus value. + */ + public CopyStatusType copyStatus() { + return this.copyStatus; + } + + /** + * Set the copyStatus value. + * + * @param copyStatus the copyStatus value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withCopyStatus(CopyStatusType copyStatus) { + this.copyStatus = copyStatus; + return this; + } + + /** + * Get the isIncrementalCopy value. + * + * @return the isIncrementalCopy value. + */ + public Boolean isIncrementalCopy() { + return this.isIncrementalCopy; + } + + /** + * Set the isIncrementalCopy value. + * + * @param isIncrementalCopy the isIncrementalCopy value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withIsIncrementalCopy(Boolean isIncrementalCopy) { + this.isIncrementalCopy = isIncrementalCopy; + return this; + } + + /** + * Get the destinationSnapshot value. + * + * @return the destinationSnapshot value. + */ + public String destinationSnapshot() { + return this.destinationSnapshot; + } + + /** + * Set the destinationSnapshot value. + * + * @param destinationSnapshot the destinationSnapshot value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withDestinationSnapshot(String destinationSnapshot) { + this.destinationSnapshot = destinationSnapshot; + return this; + } + + /** + * Get the leaseDuration value. + * + * @return the leaseDuration value. + */ + public LeaseDurationType leaseDuration() { + return this.leaseDuration; + } + + /** + * Set the leaseDuration value. + * + * @param leaseDuration the leaseDuration value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withLeaseDuration(LeaseDurationType leaseDuration) { + this.leaseDuration = leaseDuration; + return this; + } + + /** + * Get the leaseState value. + * + * @return the leaseState value. + */ + public LeaseStateType leaseState() { + return this.leaseState; + } + + /** + * Set the leaseState value. + * + * @param leaseState the leaseState value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withLeaseState(LeaseStateType leaseState) { + this.leaseState = leaseState; + return this; + } + + /** + * Get the leaseStatus value. + * + * @return the leaseStatus value. + */ + public LeaseStatusType leaseStatus() { + return this.leaseStatus; + } + + /** + * Set the leaseStatus value. + * + * @param leaseStatus the leaseStatus value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withLeaseStatus(LeaseStatusType leaseStatus) { + this.leaseStatus = leaseStatus; + return this; + } + + /** + * Get the contentLength value. + * + * @return the contentLength value. + */ + public Long contentLength() { + return this.contentLength; + } + + /** + * Set the contentLength value. + * + * @param contentLength the contentLength value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withContentLength(Long contentLength) { + this.contentLength = contentLength; + return this; + } + + /** + * Get the contentType value. + * + * @return the contentType value. + */ + public String contentType() { + return this.contentType; + } + + /** + * Set the contentType value. + * + * @param contentType the contentType value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withContentType(String contentType) { + this.contentType = contentType; + return this; + } + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the contentMD5 value. + * + * @return the contentMD5 value. + */ + public byte[] contentMD5() { + return this.contentMD5; + } + + /** + * Set the contentMD5 value. + * + * @param contentMD5 the contentMD5 value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withContentMD5(byte[] contentMD5) { + this.contentMD5 = contentMD5; + return this; + } + + /** + * Get the contentEncoding value. + * + * @return the contentEncoding value. + */ + public String contentEncoding() { + return this.contentEncoding; + } + + /** + * Set the contentEncoding value. + * + * @param contentEncoding the contentEncoding value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withContentEncoding(String contentEncoding) { + this.contentEncoding = contentEncoding; + return this; + } + + /** + * Get the contentDisposition value. + * + * @return the contentDisposition value. + */ + public String contentDisposition() { + return this.contentDisposition; + } + + /** + * Set the contentDisposition value. + * + * @param contentDisposition the contentDisposition value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withContentDisposition(String contentDisposition) { + this.contentDisposition = contentDisposition; + return this; + } + + /** + * Get the contentLanguage value. + * + * @return the contentLanguage value. + */ + public String contentLanguage() { + return this.contentLanguage; + } + + /** + * Set the contentLanguage value. + * + * @param contentLanguage the contentLanguage value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withContentLanguage(String contentLanguage) { + this.contentLanguage = contentLanguage; + return this; + } + + /** + * Get the cacheControl value. + * + * @return the cacheControl value. + */ + public String cacheControl() { + return this.cacheControl; + } + + /** + * Set the cacheControl value. + * + * @param cacheControl the cacheControl value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withCacheControl(String cacheControl) { + this.cacheControl = cacheControl; + return this; + } + + /** + * Get the blobSequenceNumber value. + * + * @return the blobSequenceNumber value. + */ + public Long blobSequenceNumber() { + return this.blobSequenceNumber; + } + + /** + * Set the blobSequenceNumber value. + * + * @param blobSequenceNumber the blobSequenceNumber value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withBlobSequenceNumber(Long blobSequenceNumber) { + this.blobSequenceNumber = blobSequenceNumber; + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } + + /** + * Get the acceptRanges value. + * + * @return the acceptRanges value. + */ + public String acceptRanges() { + return this.acceptRanges; + } + + /** + * Set the acceptRanges value. + * + * @param acceptRanges the acceptRanges value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withAcceptRanges(String acceptRanges) { + this.acceptRanges = acceptRanges; + return this; + } + + /** + * Get the blobCommittedBlockCount value. + * + * @return the blobCommittedBlockCount value. + */ + public Integer blobCommittedBlockCount() { + return this.blobCommittedBlockCount; + } + + /** + * Set the blobCommittedBlockCount value. + * + * @param blobCommittedBlockCount the blobCommittedBlockCount value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withBlobCommittedBlockCount(Integer blobCommittedBlockCount) { + this.blobCommittedBlockCount = blobCommittedBlockCount; + return this; + } + + /** + * Get the isServerEncrypted value. + * + * @return the isServerEncrypted value. + */ + public Boolean isServerEncrypted() { + return this.isServerEncrypted; + } + + /** + * Set the isServerEncrypted value. + * + * @param isServerEncrypted the isServerEncrypted value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withIsServerEncrypted(Boolean isServerEncrypted) { + this.isServerEncrypted = isServerEncrypted; + return this; + } + + /** + * Get the accessTier value. + * + * @return the accessTier value. + */ + public String accessTier() { + return this.accessTier; + } + + /** + * Set the accessTier value. + * + * @param accessTier the accessTier value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withAccessTier(String accessTier) { + this.accessTier = accessTier; + return this; + } + + /** + * Get the accessTierInferred value. + * + * @return the accessTierInferred value. + */ + public Boolean accessTierInferred() { + return this.accessTierInferred; + } + + /** + * Set the accessTierInferred value. + * + * @param accessTierInferred the accessTierInferred value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withAccessTierInferred(Boolean accessTierInferred) { + this.accessTierInferred = accessTierInferred; + return this; + } + + /** + * Get the archiveStatus value. + * + * @return the archiveStatus value. + */ + public String archiveStatus() { + return this.archiveStatus; + } + + /** + * Set the archiveStatus value. + * + * @param archiveStatus the archiveStatus value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withArchiveStatus(String archiveStatus) { + this.archiveStatus = archiveStatus; + return this; + } + + /** + * Get the accessTierChangeTime value. + * + * @return the accessTierChangeTime value. + */ + public OffsetDateTime accessTierChangeTime() { + if (this.accessTierChangeTime == null) { + return null; + } + return this.accessTierChangeTime.dateTime(); + } + + /** + * Set the accessTierChangeTime value. + * + * @param accessTierChangeTime the accessTierChangeTime value to set. + * @return the BlobGetPropertiesHeaders object itself. + */ + public BlobGetPropertiesHeaders withAccessTierChangeTime(OffsetDateTime accessTierChangeTime) { + if (accessTierChangeTime == null) { + this.accessTierChangeTime = null; + } else { + this.accessTierChangeTime = new DateTimeRfc1123(accessTierChangeTime); + } + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobGetPropertiesResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobGetPropertiesResponse.java new file mode 100644 index 0000000000000..49a7631cd7c90 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobGetPropertiesResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the getProperties operation. + */ +public final class BlobGetPropertiesResponse extends RestResponse { + /** + * Creates an instance of BlobGetPropertiesResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public BlobGetPropertiesResponse(HttpRequest request, int statusCode, BlobGetPropertiesHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public BlobGetPropertiesHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobHTTPHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobHTTPHeaders.java new file mode 100644 index 0000000000000..477dcd80edaf5 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobHTTPHeaders.java @@ -0,0 +1,183 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; + +/** + * Additional parameters for a set of operations. + */ +@JacksonXmlRootElement(localName = "blob-HTTP-headers") +public final class BlobHTTPHeaders { + /** + * Optional. Sets the blob's cache control. If specified, this property is + * stored with the blob and returned with a read request. + */ + @JsonProperty(value = "BlobCacheControl") + private String blobCacheControl; + + /** + * Optional. Sets the blob's content type. If specified, this property is + * stored with the blob and returned with a read request. + */ + @JsonProperty(value = "BlobContentType") + private String blobContentType; + + /** + * Optional. An MD5 hash of the blob content. Note that this hash is not + * validated, as the hashes for the individual blocks were validated when + * each was uploaded. + */ + @JsonProperty(value = "BlobContentMD5") + private byte[] blobContentMD5; + + /** + * Optional. Sets the blob's content encoding. If specified, this property + * is stored with the blob and returned with a read request. + */ + @JsonProperty(value = "BlobContentEncoding") + private String blobContentEncoding; + + /** + * Optional. Set the blob's content language. If specified, this property + * is stored with the blob and returned with a read request. + */ + @JsonProperty(value = "BlobContentLanguage") + private String blobContentLanguage; + + /** + * Optional. Sets the blob's Content-Disposition header. + */ + @JsonProperty(value = "BlobContentDisposition") + private String blobContentDisposition; + + /** + * Get the blobCacheControl value. + * + * @return the blobCacheControl value. + */ + public String blobCacheControl() { + return this.blobCacheControl; + } + + /** + * Set the blobCacheControl value. + * + * @param blobCacheControl the blobCacheControl value to set. + * @return the BlobHTTPHeaders object itself. + */ + public BlobHTTPHeaders withBlobCacheControl(String blobCacheControl) { + this.blobCacheControl = blobCacheControl; + return this; + } + + /** + * Get the blobContentType value. + * + * @return the blobContentType value. + */ + public String blobContentType() { + return this.blobContentType; + } + + /** + * Set the blobContentType value. + * + * @param blobContentType the blobContentType value to set. + * @return the BlobHTTPHeaders object itself. + */ + public BlobHTTPHeaders withBlobContentType(String blobContentType) { + this.blobContentType = blobContentType; + return this; + } + + /** + * Get the blobContentMD5 value. + * + * @return the blobContentMD5 value. + */ + public byte[] blobContentMD5() { + return this.blobContentMD5; + } + + /** + * Set the blobContentMD5 value. + * + * @param blobContentMD5 the blobContentMD5 value to set. + * @return the BlobHTTPHeaders object itself. + */ + public BlobHTTPHeaders withBlobContentMD5(byte[] blobContentMD5) { + this.blobContentMD5 = blobContentMD5; + return this; + } + + /** + * Get the blobContentEncoding value. + * + * @return the blobContentEncoding value. + */ + public String blobContentEncoding() { + return this.blobContentEncoding; + } + + /** + * Set the blobContentEncoding value. + * + * @param blobContentEncoding the blobContentEncoding value to set. + * @return the BlobHTTPHeaders object itself. + */ + public BlobHTTPHeaders withBlobContentEncoding(String blobContentEncoding) { + this.blobContentEncoding = blobContentEncoding; + return this; + } + + /** + * Get the blobContentLanguage value. + * + * @return the blobContentLanguage value. + */ + public String blobContentLanguage() { + return this.blobContentLanguage; + } + + /** + * Set the blobContentLanguage value. + * + * @param blobContentLanguage the blobContentLanguage value to set. + * @return the BlobHTTPHeaders object itself. + */ + public BlobHTTPHeaders withBlobContentLanguage(String blobContentLanguage) { + this.blobContentLanguage = blobContentLanguage; + return this; + } + + /** + * Get the blobContentDisposition value. + * + * @return the blobContentDisposition value. + */ + public String blobContentDisposition() { + return this.blobContentDisposition; + } + + /** + * Set the blobContentDisposition value. + * + * @param blobContentDisposition the blobContentDisposition value to set. + * @return the BlobHTTPHeaders object itself. + */ + public BlobHTTPHeaders withBlobContentDisposition(String blobContentDisposition) { + this.blobContentDisposition = blobContentDisposition; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobHierarchyListSegment.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobHierarchyListSegment.java new file mode 100644 index 0000000000000..892c55ce54ed2 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobHierarchyListSegment.java @@ -0,0 +1,76 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import java.util.ArrayList; +import java.util.List; + +/** + * The BlobHierarchyListSegment model. + */ +@JacksonXmlRootElement(localName = "Blobs") +@JsonDeserialize(using = CustomHierarchicalListingDeserializer.class) +public final class BlobHierarchyListSegment { + /** + * The blobPrefixes property. + */ + @JsonProperty("BlobPrefix") + private List blobPrefixes = new ArrayList<>(); + + /** + * The blobItems property. + */ + @JsonProperty("Blob") + private List blobItems = new ArrayList<>(); + + /** + * Get the blobPrefixes value. + * + * @return the blobPrefixes value. + */ + public List blobPrefixes() { + return this.blobPrefixes; + } + + /** + * Set the blobPrefixes value. + * + * @param blobPrefixes the blobPrefixes value to set. + * @return the BlobHierarchyListSegment object itself. + */ + public BlobHierarchyListSegment withBlobPrefixes(List blobPrefixes) { + this.blobPrefixes = blobPrefixes; + return this; + } + + /** + * Get the blobItems value. + * + * @return the blobItems value. + */ + public List blobItems() { + return this.blobItems; + } + + /** + * Set the blobItems value. + * + * @param blobItems the blobItems value to set. + * @return the BlobHierarchyListSegment object itself. + */ + public BlobHierarchyListSegment withBlobItems(List blobItems) { + this.blobItems = blobItems; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobItem.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobItem.java new file mode 100644 index 0000000000000..f244ca4ead84a --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobItem.java @@ -0,0 +1,152 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import java.util.Map; + +/** + * An Azure Storage blob. + */ +@JacksonXmlRootElement(localName = "Blob") +public final class BlobItem { + /** + * The name property. + */ + @JsonProperty(value = "Name", required = true) + private String name; + + /** + * The deleted property. + */ + @JsonProperty(value = "Deleted", required = true) + private boolean deleted; + + /** + * The snapshot property. + */ + @JsonProperty(value = "Snapshot", required = true) + private String snapshot; + + /** + * The properties property. + */ + @JsonProperty(value = "Properties", required = true) + private BlobProperties properties; + + /** + * The metadata property. + */ + @JsonProperty(value = "Metadata") + private Map metadata; + + /** + * Get the name value. + * + * @return the name value. + */ + public String name() { + return this.name; + } + + /** + * Set the name value. + * + * @param name the name value to set. + * @return the BlobItem object itself. + */ + public BlobItem withName(String name) { + this.name = name; + return this; + } + + /** + * Get the deleted value. + * + * @return the deleted value. + */ + public boolean deleted() { + return this.deleted; + } + + /** + * Set the deleted value. + * + * @param deleted the deleted value to set. + * @return the BlobItem object itself. + */ + public BlobItem withDeleted(boolean deleted) { + this.deleted = deleted; + return this; + } + + /** + * Get the snapshot value. + * + * @return the snapshot value. + */ + public String snapshot() { + return this.snapshot; + } + + /** + * Set the snapshot value. + * + * @param snapshot the snapshot value to set. + * @return the BlobItem object itself. + */ + public BlobItem withSnapshot(String snapshot) { + this.snapshot = snapshot; + return this; + } + + /** + * Get the properties value. + * + * @return the properties value. + */ + public BlobProperties properties() { + return this.properties; + } + + /** + * Set the properties value. + * + * @param properties the properties value to set. + * @return the BlobItem object itself. + */ + public BlobItem withProperties(BlobProperties properties) { + this.properties = properties; + return this; + } + + /** + * Get the metadata value. + * + * @return the metadata value. + */ + public Map metadata() { + return this.metadata; + } + + /** + * Set the metadata value. + * + * @param metadata the metadata value to set. + * @return the BlobItem object itself. + */ + public BlobItem withMetadata(Map metadata) { + this.metadata = metadata; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobPrefix.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobPrefix.java new file mode 100644 index 0000000000000..4d7283ee47537 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobPrefix.java @@ -0,0 +1,47 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; + +/** + * The BlobPrefix model. + */ +@JacksonXmlRootElement(localName = "BlobPrefix") +public final class BlobPrefix { + /** + * The name property. + */ + @JsonProperty(value = "Name", required = true) + private String name; + + /** + * Get the name value. + * + * @return the name value. + */ + public String name() { + return this.name; + } + + /** + * Set the name value. + * + * @param name the name value to set. + * @return the BlobPrefix object itself. + */ + public BlobPrefix withName(String name) { + this.name = name; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobProperties.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobProperties.java new file mode 100644 index 0000000000000..46d179a4411dc --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobProperties.java @@ -0,0 +1,841 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Properties of a blob. + */ +@JacksonXmlRootElement(localName = "Properties") +public final class BlobProperties { + /** + * The creationTime property. + */ + @JsonProperty(value = "Creation-Time") + private DateTimeRfc1123 creationTime; + + /** + * The lastModified property. + */ + @JsonProperty(value = "Last-Modified", required = true) + private DateTimeRfc1123 lastModified; + + /** + * The etag property. + */ + @JsonProperty(value = "Etag", required = true) + private String etag; + + /** + * Size in bytes. + */ + @JsonProperty(value = "Content-Length") + private Long contentLength; + + /** + * The contentType property. + */ + @JsonProperty(value = "Content-Type") + private String contentType; + + /** + * The contentEncoding property. + */ + @JsonProperty(value = "Content-Encoding") + private String contentEncoding; + + /** + * The contentLanguage property. + */ + @JsonProperty(value = "Content-Language") + private String contentLanguage; + + /** + * The contentMD5 property. + */ + @JsonProperty(value = "Content-MD5") + private byte[] contentMD5; + + /** + * The contentDisposition property. + */ + @JsonProperty(value = "Content-Disposition") + private String contentDisposition; + + /** + * The cacheControl property. + */ + @JsonProperty(value = "Cache-Control") + private String cacheControl; + + /** + * The blobSequenceNumber property. + */ + @JsonProperty(value = "x-ms-blob-sequence-number") + private Long blobSequenceNumber; + + /** + * Possible values include: 'BlockBlob', 'PageBlob', 'AppendBlob'. + */ + @JsonProperty(value = "BlobType") + private BlobType blobType; + + /** + * Possible values include: 'locked', 'unlocked'. + */ + @JsonProperty(value = "LeaseStatus") + private LeaseStatusType leaseStatus; + + /** + * Possible values include: 'available', 'leased', 'expired', 'breaking', + * 'broken'. + */ + @JsonProperty(value = "LeaseState") + private LeaseStateType leaseState; + + /** + * Possible values include: 'infinite', 'fixed'. + */ + @JsonProperty(value = "LeaseDuration") + private LeaseDurationType leaseDuration; + + /** + * The copyId property. + */ + @JsonProperty(value = "CopyId") + private String copyId; + + /** + * Possible values include: 'pending', 'success', 'aborted', 'failed'. + */ + @JsonProperty(value = "CopyStatus") + private CopyStatusType copyStatus; + + /** + * The copySource property. + */ + @JsonProperty(value = "CopySource") + private String copySource; + + /** + * The copyProgress property. + */ + @JsonProperty(value = "CopyProgress") + private String copyProgress; + + /** + * The copyCompletionTime property. + */ + @JsonProperty(value = "CopyCompletionTime") + private DateTimeRfc1123 copyCompletionTime; + + /** + * The copyStatusDescription property. + */ + @JsonProperty(value = "CopyStatusDescription") + private String copyStatusDescription; + + /** + * The serverEncrypted property. + */ + @JsonProperty(value = "ServerEncrypted") + private Boolean serverEncrypted; + + /** + * The incrementalCopy property. + */ + @JsonProperty(value = "IncrementalCopy") + private Boolean incrementalCopy; + + /** + * The destinationSnapshot property. + */ + @JsonProperty(value = "DestinationSnapshot") + private String destinationSnapshot; + + /** + * The deletedTime property. + */ + @JsonProperty(value = "DeletedTime") + private DateTimeRfc1123 deletedTime; + + /** + * The remainingRetentionDays property. + */ + @JsonProperty(value = "RemainingRetentionDays") + private Integer remainingRetentionDays; + + /** + * Possible values include: 'P4', 'P6', 'P10', 'P20', 'P30', 'P40', 'P50', + * 'Hot', 'Cool', 'Archive'. + */ + @JsonProperty(value = "AccessTier") + private AccessTier accessTier; + + /** + * The accessTierInferred property. + */ + @JsonProperty(value = "AccessTierInferred") + private Boolean accessTierInferred; + + /** + * Possible values include: 'rehydrate-pending-to-hot', + * 'rehydrate-pending-to-cool'. + */ + @JsonProperty(value = "ArchiveStatus") + private ArchiveStatus archiveStatus; + + /** + * The accessTierChangeTime property. + */ + @JsonProperty(value = "AccessTierChangeTime") + private DateTimeRfc1123 accessTierChangeTime; + + /** + * Get the creationTime value. + * + * @return the creationTime value. + */ + public OffsetDateTime creationTime() { + if (this.creationTime == null) { + return null; + } + return this.creationTime.dateTime(); + } + + /** + * Set the creationTime value. + * + * @param creationTime the creationTime value to set. + * @return the BlobProperties object itself. + */ + public BlobProperties withCreationTime(OffsetDateTime creationTime) { + if (creationTime == null) { + this.creationTime = null; + } else { + this.creationTime = new DateTimeRfc1123(creationTime); + } + return this; + } + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the BlobProperties object itself. + */ + public BlobProperties withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the etag value. + * + * @return the etag value. + */ + public String etag() { + return this.etag; + } + + /** + * Set the etag value. + * + * @param etag the etag value to set. + * @return the BlobProperties object itself. + */ + public BlobProperties withEtag(String etag) { + this.etag = etag; + return this; + } + + /** + * Get the contentLength value. + * + * @return the contentLength value. + */ + public Long contentLength() { + return this.contentLength; + } + + /** + * Set the contentLength value. + * + * @param contentLength the contentLength value to set. + * @return the BlobProperties object itself. + */ + public BlobProperties withContentLength(Long contentLength) { + this.contentLength = contentLength; + return this; + } + + /** + * Get the contentType value. + * + * @return the contentType value. + */ + public String contentType() { + return this.contentType; + } + + /** + * Set the contentType value. + * + * @param contentType the contentType value to set. + * @return the BlobProperties object itself. + */ + public BlobProperties withContentType(String contentType) { + this.contentType = contentType; + return this; + } + + /** + * Get the contentEncoding value. + * + * @return the contentEncoding value. + */ + public String contentEncoding() { + return this.contentEncoding; + } + + /** + * Set the contentEncoding value. + * + * @param contentEncoding the contentEncoding value to set. + * @return the BlobProperties object itself. + */ + public BlobProperties withContentEncoding(String contentEncoding) { + this.contentEncoding = contentEncoding; + return this; + } + + /** + * Get the contentLanguage value. + * + * @return the contentLanguage value. + */ + public String contentLanguage() { + return this.contentLanguage; + } + + /** + * Set the contentLanguage value. + * + * @param contentLanguage the contentLanguage value to set. + * @return the BlobProperties object itself. + */ + public BlobProperties withContentLanguage(String contentLanguage) { + this.contentLanguage = contentLanguage; + return this; + } + + /** + * Get the contentMD5 value. + * + * @return the contentMD5 value. + */ + public byte[] contentMD5() { + return this.contentMD5; + } + + /** + * Set the contentMD5 value. + * + * @param contentMD5 the contentMD5 value to set. + * @return the BlobProperties object itself. + */ + public BlobProperties withContentMD5(byte[] contentMD5) { + this.contentMD5 = contentMD5; + return this; + } + + /** + * Get the contentDisposition value. + * + * @return the contentDisposition value. + */ + public String contentDisposition() { + return this.contentDisposition; + } + + /** + * Set the contentDisposition value. + * + * @param contentDisposition the contentDisposition value to set. + * @return the BlobProperties object itself. + */ + public BlobProperties withContentDisposition(String contentDisposition) { + this.contentDisposition = contentDisposition; + return this; + } + + /** + * Get the cacheControl value. + * + * @return the cacheControl value. + */ + public String cacheControl() { + return this.cacheControl; + } + + /** + * Set the cacheControl value. + * + * @param cacheControl the cacheControl value to set. + * @return the BlobProperties object itself. + */ + public BlobProperties withCacheControl(String cacheControl) { + this.cacheControl = cacheControl; + return this; + } + + /** + * Get the blobSequenceNumber value. + * + * @return the blobSequenceNumber value. + */ + public Long blobSequenceNumber() { + return this.blobSequenceNumber; + } + + /** + * Set the blobSequenceNumber value. + * + * @param blobSequenceNumber the blobSequenceNumber value to set. + * @return the BlobProperties object itself. + */ + public BlobProperties withBlobSequenceNumber(Long blobSequenceNumber) { + this.blobSequenceNumber = blobSequenceNumber; + return this; + } + + /** + * Get the blobType value. + * + * @return the blobType value. + */ + public BlobType blobType() { + return this.blobType; + } + + /** + * Set the blobType value. + * + * @param blobType the blobType value to set. + * @return the BlobProperties object itself. + */ + public BlobProperties withBlobType(BlobType blobType) { + this.blobType = blobType; + return this; + } + + /** + * Get the leaseStatus value. + * + * @return the leaseStatus value. + */ + public LeaseStatusType leaseStatus() { + return this.leaseStatus; + } + + /** + * Set the leaseStatus value. + * + * @param leaseStatus the leaseStatus value to set. + * @return the BlobProperties object itself. + */ + public BlobProperties withLeaseStatus(LeaseStatusType leaseStatus) { + this.leaseStatus = leaseStatus; + return this; + } + + /** + * Get the leaseState value. + * + * @return the leaseState value. + */ + public LeaseStateType leaseState() { + return this.leaseState; + } + + /** + * Set the leaseState value. + * + * @param leaseState the leaseState value to set. + * @return the BlobProperties object itself. + */ + public BlobProperties withLeaseState(LeaseStateType leaseState) { + this.leaseState = leaseState; + return this; + } + + /** + * Get the leaseDuration value. + * + * @return the leaseDuration value. + */ + public LeaseDurationType leaseDuration() { + return this.leaseDuration; + } + + /** + * Set the leaseDuration value. + * + * @param leaseDuration the leaseDuration value to set. + * @return the BlobProperties object itself. + */ + public BlobProperties withLeaseDuration(LeaseDurationType leaseDuration) { + this.leaseDuration = leaseDuration; + return this; + } + + /** + * Get the copyId value. + * + * @return the copyId value. + */ + public String copyId() { + return this.copyId; + } + + /** + * Set the copyId value. + * + * @param copyId the copyId value to set. + * @return the BlobProperties object itself. + */ + public BlobProperties withCopyId(String copyId) { + this.copyId = copyId; + return this; + } + + /** + * Get the copyStatus value. + * + * @return the copyStatus value. + */ + public CopyStatusType copyStatus() { + return this.copyStatus; + } + + /** + * Set the copyStatus value. + * + * @param copyStatus the copyStatus value to set. + * @return the BlobProperties object itself. + */ + public BlobProperties withCopyStatus(CopyStatusType copyStatus) { + this.copyStatus = copyStatus; + return this; + } + + /** + * Get the copySource value. + * + * @return the copySource value. + */ + public String copySource() { + return this.copySource; + } + + /** + * Set the copySource value. + * + * @param copySource the copySource value to set. + * @return the BlobProperties object itself. + */ + public BlobProperties withCopySource(String copySource) { + this.copySource = copySource; + return this; + } + + /** + * Get the copyProgress value. + * + * @return the copyProgress value. + */ + public String copyProgress() { + return this.copyProgress; + } + + /** + * Set the copyProgress value. + * + * @param copyProgress the copyProgress value to set. + * @return the BlobProperties object itself. + */ + public BlobProperties withCopyProgress(String copyProgress) { + this.copyProgress = copyProgress; + return this; + } + + /** + * Get the copyCompletionTime value. + * + * @return the copyCompletionTime value. + */ + public OffsetDateTime copyCompletionTime() { + if (this.copyCompletionTime == null) { + return null; + } + return this.copyCompletionTime.dateTime(); + } + + /** + * Set the copyCompletionTime value. + * + * @param copyCompletionTime the copyCompletionTime value to set. + * @return the BlobProperties object itself. + */ + public BlobProperties withCopyCompletionTime(OffsetDateTime copyCompletionTime) { + if (copyCompletionTime == null) { + this.copyCompletionTime = null; + } else { + this.copyCompletionTime = new DateTimeRfc1123(copyCompletionTime); + } + return this; + } + + /** + * Get the copyStatusDescription value. + * + * @return the copyStatusDescription value. + */ + public String copyStatusDescription() { + return this.copyStatusDescription; + } + + /** + * Set the copyStatusDescription value. + * + * @param copyStatusDescription the copyStatusDescription value to set. + * @return the BlobProperties object itself. + */ + public BlobProperties withCopyStatusDescription(String copyStatusDescription) { + this.copyStatusDescription = copyStatusDescription; + return this; + } + + /** + * Get the serverEncrypted value. + * + * @return the serverEncrypted value. + */ + public Boolean serverEncrypted() { + return this.serverEncrypted; + } + + /** + * Set the serverEncrypted value. + * + * @param serverEncrypted the serverEncrypted value to set. + * @return the BlobProperties object itself. + */ + public BlobProperties withServerEncrypted(Boolean serverEncrypted) { + this.serverEncrypted = serverEncrypted; + return this; + } + + /** + * Get the incrementalCopy value. + * + * @return the incrementalCopy value. + */ + public Boolean incrementalCopy() { + return this.incrementalCopy; + } + + /** + * Set the incrementalCopy value. + * + * @param incrementalCopy the incrementalCopy value to set. + * @return the BlobProperties object itself. + */ + public BlobProperties withIncrementalCopy(Boolean incrementalCopy) { + this.incrementalCopy = incrementalCopy; + return this; + } + + /** + * Get the destinationSnapshot value. + * + * @return the destinationSnapshot value. + */ + public String destinationSnapshot() { + return this.destinationSnapshot; + } + + /** + * Set the destinationSnapshot value. + * + * @param destinationSnapshot the destinationSnapshot value to set. + * @return the BlobProperties object itself. + */ + public BlobProperties withDestinationSnapshot(String destinationSnapshot) { + this.destinationSnapshot = destinationSnapshot; + return this; + } + + /** + * Get the deletedTime value. + * + * @return the deletedTime value. + */ + public OffsetDateTime deletedTime() { + if (this.deletedTime == null) { + return null; + } + return this.deletedTime.dateTime(); + } + + /** + * Set the deletedTime value. + * + * @param deletedTime the deletedTime value to set. + * @return the BlobProperties object itself. + */ + public BlobProperties withDeletedTime(OffsetDateTime deletedTime) { + if (deletedTime == null) { + this.deletedTime = null; + } else { + this.deletedTime = new DateTimeRfc1123(deletedTime); + } + return this; + } + + /** + * Get the remainingRetentionDays value. + * + * @return the remainingRetentionDays value. + */ + public Integer remainingRetentionDays() { + return this.remainingRetentionDays; + } + + /** + * Set the remainingRetentionDays value. + * + * @param remainingRetentionDays the remainingRetentionDays value to set. + * @return the BlobProperties object itself. + */ + public BlobProperties withRemainingRetentionDays(Integer remainingRetentionDays) { + this.remainingRetentionDays = remainingRetentionDays; + return this; + } + + /** + * Get the accessTier value. + * + * @return the accessTier value. + */ + public AccessTier accessTier() { + return this.accessTier; + } + + /** + * Set the accessTier value. + * + * @param accessTier the accessTier value to set. + * @return the BlobProperties object itself. + */ + public BlobProperties withAccessTier(AccessTier accessTier) { + this.accessTier = accessTier; + return this; + } + + /** + * Get the accessTierInferred value. + * + * @return the accessTierInferred value. + */ + public Boolean accessTierInferred() { + return this.accessTierInferred; + } + + /** + * Set the accessTierInferred value. + * + * @param accessTierInferred the accessTierInferred value to set. + * @return the BlobProperties object itself. + */ + public BlobProperties withAccessTierInferred(Boolean accessTierInferred) { + this.accessTierInferred = accessTierInferred; + return this; + } + + /** + * Get the archiveStatus value. + * + * @return the archiveStatus value. + */ + public ArchiveStatus archiveStatus() { + return this.archiveStatus; + } + + /** + * Set the archiveStatus value. + * + * @param archiveStatus the archiveStatus value to set. + * @return the BlobProperties object itself. + */ + public BlobProperties withArchiveStatus(ArchiveStatus archiveStatus) { + this.archiveStatus = archiveStatus; + return this; + } + + /** + * Get the accessTierChangeTime value. + * + * @return the accessTierChangeTime value. + */ + public OffsetDateTime accessTierChangeTime() { + if (this.accessTierChangeTime == null) { + return null; + } + return this.accessTierChangeTime.dateTime(); + } + + /** + * Set the accessTierChangeTime value. + * + * @param accessTierChangeTime the accessTierChangeTime value to set. + * @return the BlobProperties object itself. + */ + public BlobProperties withAccessTierChangeTime(OffsetDateTime accessTierChangeTime) { + if (accessTierChangeTime == null) { + this.accessTierChangeTime = null; + } else { + this.accessTierChangeTime = new DateTimeRfc1123(accessTierChangeTime); + } + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobReleaseLeaseHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobReleaseLeaseHeaders.java new file mode 100644 index 0000000000000..4635bc05e4cab --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobReleaseLeaseHeaders.java @@ -0,0 +1,175 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for ReleaseLease operation. + */ +@JacksonXmlRootElement(localName = "Blob-ReleaseLease-Headers") +public final class BlobReleaseLeaseHeaders { + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * Returns the date and time the blob was last modified. Any operation that + * modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the BlobReleaseLeaseHeaders object itself. + */ + public BlobReleaseLeaseHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the BlobReleaseLeaseHeaders object itself. + */ + public BlobReleaseLeaseHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the BlobReleaseLeaseHeaders object itself. + */ + public BlobReleaseLeaseHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the BlobReleaseLeaseHeaders object itself. + */ + public BlobReleaseLeaseHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the BlobReleaseLeaseHeaders object itself. + */ + public BlobReleaseLeaseHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobReleaseLeaseResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobReleaseLeaseResponse.java new file mode 100644 index 0000000000000..aeae2c5e6d526 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobReleaseLeaseResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the releaseLease operation. + */ +public final class BlobReleaseLeaseResponse extends RestResponse { + /** + * Creates an instance of BlobReleaseLeaseResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public BlobReleaseLeaseResponse(HttpRequest request, int statusCode, BlobReleaseLeaseHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public BlobReleaseLeaseHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobRenewLeaseHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobRenewLeaseHeaders.java new file mode 100644 index 0000000000000..c531d64a53116 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobRenewLeaseHeaders.java @@ -0,0 +1,201 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for RenewLease operation. + */ +@JacksonXmlRootElement(localName = "Blob-RenewLease-Headers") +public final class BlobRenewLeaseHeaders { + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * Returns the date and time the blob was last modified. Any operation that + * modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * Uniquely identifies a blobs's lease. + */ + @JsonProperty(value = "x-ms-lease-id") + private String leaseId; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the BlobRenewLeaseHeaders object itself. + */ + public BlobRenewLeaseHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the BlobRenewLeaseHeaders object itself. + */ + public BlobRenewLeaseHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the leaseId value. + * + * @return the leaseId value. + */ + public String leaseId() { + return this.leaseId; + } + + /** + * Set the leaseId value. + * + * @param leaseId the leaseId value to set. + * @return the BlobRenewLeaseHeaders object itself. + */ + public BlobRenewLeaseHeaders withLeaseId(String leaseId) { + this.leaseId = leaseId; + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the BlobRenewLeaseHeaders object itself. + */ + public BlobRenewLeaseHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the BlobRenewLeaseHeaders object itself. + */ + public BlobRenewLeaseHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the BlobRenewLeaseHeaders object itself. + */ + public BlobRenewLeaseHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobRenewLeaseResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobRenewLeaseResponse.java new file mode 100644 index 0000000000000..2bd6354d6ef20 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobRenewLeaseResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the renewLease operation. + */ +public final class BlobRenewLeaseResponse extends RestResponse { + /** + * Creates an instance of BlobRenewLeaseResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public BlobRenewLeaseResponse(HttpRequest request, int statusCode, BlobRenewLeaseHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public BlobRenewLeaseHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobSetHTTPHeadersHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobSetHTTPHeadersHeaders.java new file mode 100644 index 0000000000000..745f62a7f74a4 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobSetHTTPHeadersHeaders.java @@ -0,0 +1,202 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for SetHTTPHeaders operation. + */ +@JacksonXmlRootElement(localName = "Blob-SetHTTPHeaders-Headers") +public final class BlobSetHTTPHeadersHeaders { + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * Returns the date and time the container was last modified. Any operation + * that modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * The current sequence number for a page blob. This header is not returned + * for block blobs or append blobs. + */ + @JsonProperty(value = "x-ms-blob-sequence-number") + private Long blobSequenceNumber; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the BlobSetHTTPHeadersHeaders object itself. + */ + public BlobSetHTTPHeadersHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the BlobSetHTTPHeadersHeaders object itself. + */ + public BlobSetHTTPHeadersHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the blobSequenceNumber value. + * + * @return the blobSequenceNumber value. + */ + public Long blobSequenceNumber() { + return this.blobSequenceNumber; + } + + /** + * Set the blobSequenceNumber value. + * + * @param blobSequenceNumber the blobSequenceNumber value to set. + * @return the BlobSetHTTPHeadersHeaders object itself. + */ + public BlobSetHTTPHeadersHeaders withBlobSequenceNumber(Long blobSequenceNumber) { + this.blobSequenceNumber = blobSequenceNumber; + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the BlobSetHTTPHeadersHeaders object itself. + */ + public BlobSetHTTPHeadersHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the BlobSetHTTPHeadersHeaders object itself. + */ + public BlobSetHTTPHeadersHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the BlobSetHTTPHeadersHeaders object itself. + */ + public BlobSetHTTPHeadersHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobSetHTTPHeadersResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobSetHTTPHeadersResponse.java new file mode 100644 index 0000000000000..8c92290fd4252 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobSetHTTPHeadersResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the setHTTPHeaders operation. + */ +public final class BlobSetHTTPHeadersResponse extends RestResponse { + /** + * Creates an instance of BlobSetHTTPHeadersResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public BlobSetHTTPHeadersResponse(HttpRequest request, int statusCode, BlobSetHTTPHeadersHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public BlobSetHTTPHeadersHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobSetMetadataHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobSetMetadataHeaders.java new file mode 100644 index 0000000000000..eced8a038c088 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobSetMetadataHeaders.java @@ -0,0 +1,203 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for SetMetadata operation. + */ +@JacksonXmlRootElement(localName = "Blob-SetMetadata-Headers") +public final class BlobSetMetadataHeaders { + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * Returns the date and time the container was last modified. Any operation + * that modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * The value of this header is set to true if the contents of the request + * are successfully encrypted using the specified algorithm, and false + * otherwise. + */ + @JsonProperty(value = "x-ms-request-server-encrypted") + private Boolean isServerEncrypted; + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the BlobSetMetadataHeaders object itself. + */ + public BlobSetMetadataHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the BlobSetMetadataHeaders object itself. + */ + public BlobSetMetadataHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the BlobSetMetadataHeaders object itself. + */ + public BlobSetMetadataHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the BlobSetMetadataHeaders object itself. + */ + public BlobSetMetadataHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the BlobSetMetadataHeaders object itself. + */ + public BlobSetMetadataHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } + + /** + * Get the isServerEncrypted value. + * + * @return the isServerEncrypted value. + */ + public Boolean isServerEncrypted() { + return this.isServerEncrypted; + } + + /** + * Set the isServerEncrypted value. + * + * @param isServerEncrypted the isServerEncrypted value to set. + * @return the BlobSetMetadataHeaders object itself. + */ + public BlobSetMetadataHeaders withIsServerEncrypted(Boolean isServerEncrypted) { + this.isServerEncrypted = isServerEncrypted; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobSetMetadataResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobSetMetadataResponse.java new file mode 100644 index 0000000000000..c9a9a392c5168 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobSetMetadataResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the setMetadata operation. + */ +public final class BlobSetMetadataResponse extends RestResponse { + /** + * Creates an instance of BlobSetMetadataResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public BlobSetMetadataResponse(HttpRequest request, int statusCode, BlobSetMetadataHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public BlobSetMetadataHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobSetTierHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobSetTierHeaders.java new file mode 100644 index 0000000000000..e26f3f8e5e851 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobSetTierHeaders.java @@ -0,0 +1,76 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; + +/** + * Defines headers for SetTier operation. + */ +@JacksonXmlRootElement(localName = "Blob-SetTier-Headers") +public final class BlobSetTierHeaders { + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * newer. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the BlobSetTierHeaders object itself. + */ + public BlobSetTierHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the BlobSetTierHeaders object itself. + */ + public BlobSetTierHeaders withVersion(String version) { + this.version = version; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobSetTierResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobSetTierResponse.java new file mode 100644 index 0000000000000..12e85f0eac9ba --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobSetTierResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the setTier operation. + */ +public final class BlobSetTierResponse extends RestResponse { + /** + * Creates an instance of BlobSetTierResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public BlobSetTierResponse(HttpRequest request, int statusCode, BlobSetTierHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public BlobSetTierHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobStartCopyFromURLHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobStartCopyFromURLHeaders.java new file mode 100644 index 0000000000000..ceddf0bdd759a --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobStartCopyFromURLHeaders.java @@ -0,0 +1,230 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for StartCopyFromURL operation. + */ +@JacksonXmlRootElement(localName = "Blob-StartCopyFromURL-Headers") +public final class BlobStartCopyFromURLHeaders { + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * Returns the date and time the container was last modified. Any operation + * that modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * String identifier for this copy operation. Use with Get Blob Properties + * to check the status of this copy operation, or pass to Abort Copy Blob + * to abort a pending copy. + */ + @JsonProperty(value = "x-ms-copy-id") + private String copyId; + + /** + * State of the copy operation identified by x-ms-copy-id. Possible values + * include: 'pending', 'success', 'aborted', 'failed'. + */ + @JsonProperty(value = "x-ms-copy-status") + private CopyStatusType copyStatus; + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the BlobStartCopyFromURLHeaders object itself. + */ + public BlobStartCopyFromURLHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the BlobStartCopyFromURLHeaders object itself. + */ + public BlobStartCopyFromURLHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the BlobStartCopyFromURLHeaders object itself. + */ + public BlobStartCopyFromURLHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the BlobStartCopyFromURLHeaders object itself. + */ + public BlobStartCopyFromURLHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the BlobStartCopyFromURLHeaders object itself. + */ + public BlobStartCopyFromURLHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } + + /** + * Get the copyId value. + * + * @return the copyId value. + */ + public String copyId() { + return this.copyId; + } + + /** + * Set the copyId value. + * + * @param copyId the copyId value to set. + * @return the BlobStartCopyFromURLHeaders object itself. + */ + public BlobStartCopyFromURLHeaders withCopyId(String copyId) { + this.copyId = copyId; + return this; + } + + /** + * Get the copyStatus value. + * + * @return the copyStatus value. + */ + public CopyStatusType copyStatus() { + return this.copyStatus; + } + + /** + * Set the copyStatus value. + * + * @param copyStatus the copyStatus value to set. + * @return the BlobStartCopyFromURLHeaders object itself. + */ + public BlobStartCopyFromURLHeaders withCopyStatus(CopyStatusType copyStatus) { + this.copyStatus = copyStatus; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobStartCopyFromURLResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobStartCopyFromURLResponse.java new file mode 100644 index 0000000000000..20b30fcd4dfad --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobStartCopyFromURLResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the startCopyFromURL operation. + */ +public final class BlobStartCopyFromURLResponse extends RestResponse { + /** + * Creates an instance of BlobStartCopyFromURLResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public BlobStartCopyFromURLResponse(HttpRequest request, int statusCode, BlobStartCopyFromURLHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public BlobStartCopyFromURLHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobType.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobType.java new file mode 100644 index 0000000000000..5649f4e254b1b --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobType.java @@ -0,0 +1,66 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; + +/** + * Defines values for BlobType. + */ +public enum BlobType { + /** + * Enum value BlockBlob. + */ + BLOCK_BLOB("BlockBlob"), + + /** + * Enum value PageBlob. + */ + PAGE_BLOB("PageBlob"), + + /** + * Enum value AppendBlob. + */ + APPEND_BLOB("AppendBlob"); + + /** + * The actual serialized value for a BlobType instance. + */ + private final String value; + + private BlobType(String value) { + this.value = value; + } + + /** + * Parses a serialized value to a BlobType instance. + * + * @param value the serialized value to parse. + * @return the parsed BlobType object, or null if unable to parse. + */ + @JsonCreator + public static BlobType fromString(String value) { + BlobType[] items = BlobType.values(); + for (BlobType item : items) { + if (item.toString().equalsIgnoreCase(value)) { + return item; + } + } + return null; + } + + @JsonValue + @Override + public String toString() { + return this.value; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobUndeleteHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobUndeleteHeaders.java new file mode 100644 index 0000000000000..de0c5c8074d1f --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobUndeleteHeaders.java @@ -0,0 +1,112 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for Undelete operation. + */ +@JacksonXmlRootElement(localName = "Blob-Undelete-Headers") +public final class BlobUndeleteHeaders { + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the BlobUndeleteHeaders object itself. + */ + public BlobUndeleteHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the BlobUndeleteHeaders object itself. + */ + public BlobUndeleteHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the BlobUndeleteHeaders object itself. + */ + public BlobUndeleteHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobUndeleteResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobUndeleteResponse.java new file mode 100644 index 0000000000000..7222085caab0a --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlobUndeleteResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the undelete operation. + */ +public final class BlobUndeleteResponse extends RestResponse { + /** + * Creates an instance of BlobUndeleteResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public BlobUndeleteResponse(HttpRequest request, int statusCode, BlobUndeleteHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public BlobUndeleteHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/Block.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/Block.java new file mode 100644 index 0000000000000..e390ee6b8d700 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/Block.java @@ -0,0 +1,74 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; + +/** + * Represents a single block in a block blob. It describes the block's ID and + * size. + */ +@JacksonXmlRootElement(localName = "Block") +public final class Block { + /** + * The base64 encoded block ID. + */ + @JsonProperty(value = "Name", required = true) + private String name; + + /** + * The block size in bytes. + */ + @JsonProperty(value = "Size", required = true) + private int size; + + /** + * Get the name value. + * + * @return the name value. + */ + public String name() { + return this.name; + } + + /** + * Set the name value. + * + * @param name the name value to set. + * @return the Block object itself. + */ + public Block withName(String name) { + this.name = name; + return this; + } + + /** + * Get the size value. + * + * @return the size value. + */ + public int size() { + return this.size; + } + + /** + * Set the size value. + * + * @param size the size value to set. + * @return the Block object itself. + */ + public Block withSize(int size) { + this.size = size; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockBlobCommitBlockListHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockBlobCommitBlockListHeaders.java new file mode 100644 index 0000000000000..c94db9b4162a1 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockBlobCommitBlockListHeaders.java @@ -0,0 +1,231 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for CommitBlockList operation. + */ +@JacksonXmlRootElement(localName = "BlockBlob-CommitBlockList-Headers") +public final class BlockBlobCommitBlockListHeaders { + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * Returns the date and time the container was last modified. Any operation + * that modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * If the blob has an MD5 hash and this operation is to read the full blob, + * this response header is returned so that the client can check for + * message content integrity. + */ + @JsonProperty(value = "Content-MD5") + private byte[] contentMD5; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * The value of this header is set to true if the contents of the request + * are successfully encrypted using the specified algorithm, and false + * otherwise. + */ + @JsonProperty(value = "x-ms-request-server-encrypted") + private Boolean isServerEncrypted; + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the BlockBlobCommitBlockListHeaders object itself. + */ + public BlockBlobCommitBlockListHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the BlockBlobCommitBlockListHeaders object itself. + */ + public BlockBlobCommitBlockListHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the contentMD5 value. + * + * @return the contentMD5 value. + */ + public byte[] contentMD5() { + return this.contentMD5; + } + + /** + * Set the contentMD5 value. + * + * @param contentMD5 the contentMD5 value to set. + * @return the BlockBlobCommitBlockListHeaders object itself. + */ + public BlockBlobCommitBlockListHeaders withContentMD5(byte[] contentMD5) { + this.contentMD5 = contentMD5; + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the BlockBlobCommitBlockListHeaders object itself. + */ + public BlockBlobCommitBlockListHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the BlockBlobCommitBlockListHeaders object itself. + */ + public BlockBlobCommitBlockListHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the BlockBlobCommitBlockListHeaders object itself. + */ + public BlockBlobCommitBlockListHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } + + /** + * Get the isServerEncrypted value. + * + * @return the isServerEncrypted value. + */ + public Boolean isServerEncrypted() { + return this.isServerEncrypted; + } + + /** + * Set the isServerEncrypted value. + * + * @param isServerEncrypted the isServerEncrypted value to set. + * @return the BlockBlobCommitBlockListHeaders object itself. + */ + public BlockBlobCommitBlockListHeaders withIsServerEncrypted(Boolean isServerEncrypted) { + this.isServerEncrypted = isServerEncrypted; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockBlobCommitBlockListResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockBlobCommitBlockListResponse.java new file mode 100644 index 0000000000000..3901f36b9b0f3 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockBlobCommitBlockListResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the commitBlockList operation. + */ +public final class BlockBlobCommitBlockListResponse extends RestResponse { + /** + * Creates an instance of BlockBlobCommitBlockListResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public BlockBlobCommitBlockListResponse(HttpRequest request, int statusCode, BlockBlobCommitBlockListHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public BlockBlobCommitBlockListHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockBlobGetBlockListHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockBlobGetBlockListHeaders.java new file mode 100644 index 0000000000000..27358a7a4bbd7 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockBlobGetBlockListHeaders.java @@ -0,0 +1,228 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for GetBlockList operation. + */ +@JacksonXmlRootElement(localName = "BlockBlob-GetBlockList-Headers") +public final class BlockBlobGetBlockListHeaders { + /** + * Returns the date and time the container was last modified. Any operation + * that modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * The media type of the body of the response. For Get Block List this is + * 'application/xml'. + */ + @JsonProperty(value = "Content-Type") + private String contentType; + + /** + * The size of the blob in bytes. + */ + @JsonProperty(value = "x-ms-blob-content-length") + private Long blobContentLength; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the BlockBlobGetBlockListHeaders object itself. + */ + public BlockBlobGetBlockListHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the BlockBlobGetBlockListHeaders object itself. + */ + public BlockBlobGetBlockListHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the contentType value. + * + * @return the contentType value. + */ + public String contentType() { + return this.contentType; + } + + /** + * Set the contentType value. + * + * @param contentType the contentType value to set. + * @return the BlockBlobGetBlockListHeaders object itself. + */ + public BlockBlobGetBlockListHeaders withContentType(String contentType) { + this.contentType = contentType; + return this; + } + + /** + * Get the blobContentLength value. + * + * @return the blobContentLength value. + */ + public Long blobContentLength() { + return this.blobContentLength; + } + + /** + * Set the blobContentLength value. + * + * @param blobContentLength the blobContentLength value to set. + * @return the BlockBlobGetBlockListHeaders object itself. + */ + public BlockBlobGetBlockListHeaders withBlobContentLength(Long blobContentLength) { + this.blobContentLength = blobContentLength; + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the BlockBlobGetBlockListHeaders object itself. + */ + public BlockBlobGetBlockListHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the BlockBlobGetBlockListHeaders object itself. + */ + public BlockBlobGetBlockListHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the BlockBlobGetBlockListHeaders object itself. + */ + public BlockBlobGetBlockListHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockBlobGetBlockListResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockBlobGetBlockListResponse.java new file mode 100644 index 0000000000000..d8a0d26b329b3 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockBlobGetBlockListResponse.java @@ -0,0 +1,49 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the getBlockList operation. + */ +public final class BlockBlobGetBlockListResponse extends RestResponse { + /** + * Creates an instance of BlockBlobGetBlockListResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public BlockBlobGetBlockListResponse(HttpRequest request, int statusCode, BlockBlobGetBlockListHeaders headers, Map rawHeaders, BlockList body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public BlockBlobGetBlockListHeaders headers() { + return super.headers(); + } + + /** + * @return the deserialized response body. + */ + @Override + public BlockList body() { + return super.body(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockBlobStageBlockFromURLHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockBlobStageBlockFromURLHeaders.java new file mode 100644 index 0000000000000..e6bdfd3c44c65 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockBlobStageBlockFromURLHeaders.java @@ -0,0 +1,168 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for StageBlockFromURL operation. + */ +@JacksonXmlRootElement(localName = "BlockBlob-StageBlockFromURL-Headers") +public final class BlockBlobStageBlockFromURLHeaders { + /** + * If the blob has an MD5 hash and this operation is to read the full blob, + * this response header is returned so that the client can check for + * message content integrity. + */ + @JsonProperty(value = "Content-MD5") + private byte[] contentMD5; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * The value of this header is set to true if the contents of the request + * are successfully encrypted using the specified algorithm, and false + * otherwise. + */ + @JsonProperty(value = "x-ms-request-server-encrypted") + private Boolean isServerEncrypted; + + /** + * Get the contentMD5 value. + * + * @return the contentMD5 value. + */ + public byte[] contentMD5() { + return this.contentMD5; + } + + /** + * Set the contentMD5 value. + * + * @param contentMD5 the contentMD5 value to set. + * @return the BlockBlobStageBlockFromURLHeaders object itself. + */ + public BlockBlobStageBlockFromURLHeaders withContentMD5(byte[] contentMD5) { + this.contentMD5 = contentMD5; + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the BlockBlobStageBlockFromURLHeaders object itself. + */ + public BlockBlobStageBlockFromURLHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the BlockBlobStageBlockFromURLHeaders object itself. + */ + public BlockBlobStageBlockFromURLHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the BlockBlobStageBlockFromURLHeaders object itself. + */ + public BlockBlobStageBlockFromURLHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } + + /** + * Get the isServerEncrypted value. + * + * @return the isServerEncrypted value. + */ + public Boolean isServerEncrypted() { + return this.isServerEncrypted; + } + + /** + * Set the isServerEncrypted value. + * + * @param isServerEncrypted the isServerEncrypted value to set. + * @return the BlockBlobStageBlockFromURLHeaders object itself. + */ + public BlockBlobStageBlockFromURLHeaders withIsServerEncrypted(Boolean isServerEncrypted) { + this.isServerEncrypted = isServerEncrypted; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockBlobStageBlockFromURLResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockBlobStageBlockFromURLResponse.java new file mode 100644 index 0000000000000..eec168743b1e7 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockBlobStageBlockFromURLResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the stageBlockFromURL operation. + */ +public final class BlockBlobStageBlockFromURLResponse extends RestResponse { + /** + * Creates an instance of BlockBlobStageBlockFromURLResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public BlockBlobStageBlockFromURLResponse(HttpRequest request, int statusCode, BlockBlobStageBlockFromURLHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public BlockBlobStageBlockFromURLHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockBlobStageBlockHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockBlobStageBlockHeaders.java new file mode 100644 index 0000000000000..548ff8658754c --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockBlobStageBlockHeaders.java @@ -0,0 +1,168 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for StageBlock operation. + */ +@JacksonXmlRootElement(localName = "BlockBlob-StageBlock-Headers") +public final class BlockBlobStageBlockHeaders { + /** + * If the blob has an MD5 hash and this operation is to read the full blob, + * this response header is returned so that the client can check for + * message content integrity. + */ + @JsonProperty(value = "Content-MD5") + private byte[] contentMD5; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * The value of this header is set to true if the contents of the request + * are successfully encrypted using the specified algorithm, and false + * otherwise. + */ + @JsonProperty(value = "x-ms-request-server-encrypted") + private Boolean isServerEncrypted; + + /** + * Get the contentMD5 value. + * + * @return the contentMD5 value. + */ + public byte[] contentMD5() { + return this.contentMD5; + } + + /** + * Set the contentMD5 value. + * + * @param contentMD5 the contentMD5 value to set. + * @return the BlockBlobStageBlockHeaders object itself. + */ + public BlockBlobStageBlockHeaders withContentMD5(byte[] contentMD5) { + this.contentMD5 = contentMD5; + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the BlockBlobStageBlockHeaders object itself. + */ + public BlockBlobStageBlockHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the BlockBlobStageBlockHeaders object itself. + */ + public BlockBlobStageBlockHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the BlockBlobStageBlockHeaders object itself. + */ + public BlockBlobStageBlockHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } + + /** + * Get the isServerEncrypted value. + * + * @return the isServerEncrypted value. + */ + public Boolean isServerEncrypted() { + return this.isServerEncrypted; + } + + /** + * Set the isServerEncrypted value. + * + * @param isServerEncrypted the isServerEncrypted value to set. + * @return the BlockBlobStageBlockHeaders object itself. + */ + public BlockBlobStageBlockHeaders withIsServerEncrypted(Boolean isServerEncrypted) { + this.isServerEncrypted = isServerEncrypted; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockBlobStageBlockResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockBlobStageBlockResponse.java new file mode 100644 index 0000000000000..64ffddbc024b5 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockBlobStageBlockResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the stageBlock operation. + */ +public final class BlockBlobStageBlockResponse extends RestResponse { + /** + * Creates an instance of BlockBlobStageBlockResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public BlockBlobStageBlockResponse(HttpRequest request, int statusCode, BlockBlobStageBlockHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public BlockBlobStageBlockHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockBlobUploadHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockBlobUploadHeaders.java new file mode 100644 index 0000000000000..8fd34f41974a2 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockBlobUploadHeaders.java @@ -0,0 +1,231 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for Upload operation. + */ +@JacksonXmlRootElement(localName = "BlockBlob-Upload-Headers") +public final class BlockBlobUploadHeaders { + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * Returns the date and time the container was last modified. Any operation + * that modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * If the blob has an MD5 hash and this operation is to read the full blob, + * this response header is returned so that the client can check for + * message content integrity. + */ + @JsonProperty(value = "Content-MD5") + private byte[] contentMD5; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * The value of this header is set to true if the contents of the request + * are successfully encrypted using the specified algorithm, and false + * otherwise. + */ + @JsonProperty(value = "x-ms-request-server-encrypted") + private Boolean isServerEncrypted; + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the BlockBlobUploadHeaders object itself. + */ + public BlockBlobUploadHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the BlockBlobUploadHeaders object itself. + */ + public BlockBlobUploadHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the contentMD5 value. + * + * @return the contentMD5 value. + */ + public byte[] contentMD5() { + return this.contentMD5; + } + + /** + * Set the contentMD5 value. + * + * @param contentMD5 the contentMD5 value to set. + * @return the BlockBlobUploadHeaders object itself. + */ + public BlockBlobUploadHeaders withContentMD5(byte[] contentMD5) { + this.contentMD5 = contentMD5; + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the BlockBlobUploadHeaders object itself. + */ + public BlockBlobUploadHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the BlockBlobUploadHeaders object itself. + */ + public BlockBlobUploadHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the BlockBlobUploadHeaders object itself. + */ + public BlockBlobUploadHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } + + /** + * Get the isServerEncrypted value. + * + * @return the isServerEncrypted value. + */ + public Boolean isServerEncrypted() { + return this.isServerEncrypted; + } + + /** + * Set the isServerEncrypted value. + * + * @param isServerEncrypted the isServerEncrypted value to set. + * @return the BlockBlobUploadHeaders object itself. + */ + public BlockBlobUploadHeaders withIsServerEncrypted(Boolean isServerEncrypted) { + this.isServerEncrypted = isServerEncrypted; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockBlobUploadResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockBlobUploadResponse.java new file mode 100644 index 0000000000000..43461d15a6087 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockBlobUploadResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the upload operation. + */ +public final class BlockBlobUploadResponse extends RestResponse { + /** + * Creates an instance of BlockBlobUploadResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public BlockBlobUploadResponse(HttpRequest request, int statusCode, BlockBlobUploadHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public BlockBlobUploadHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockList.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockList.java new file mode 100644 index 0000000000000..7a4cf0dcf5bf2 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockList.java @@ -0,0 +1,102 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import java.util.ArrayList; +import java.util.List; + +/** + * The BlockList model. + */ +@JacksonXmlRootElement(localName = "BlockList") +public final class BlockList { + private static final class CommittedBlocksWrapper { + @JacksonXmlProperty(localName = "Block") + private final List items; + + @JsonCreator + private CommittedBlocksWrapper(@JacksonXmlProperty(localName = "Block") List items) { + this.items = items; + } + } + + /** + * The committedBlocks property. + */ + @JsonProperty(value = "CommittedBlocks") + private CommittedBlocksWrapper committedBlocks; + + private static final class UncommittedBlocksWrapper { + @JacksonXmlProperty(localName = "Block") + private final List items; + + @JsonCreator + private UncommittedBlocksWrapper(@JacksonXmlProperty(localName = "Block") List items) { + this.items = items; + } + } + + /** + * The uncommittedBlocks property. + */ + @JsonProperty(value = "UncommittedBlocks") + private UncommittedBlocksWrapper uncommittedBlocks; + + /** + * Get the committedBlocks value. + * + * @return the committedBlocks value. + */ + public List committedBlocks() { + if (this.committedBlocks == null) { + this.committedBlocks = new CommittedBlocksWrapper(new ArrayList()); + } + return this.committedBlocks.items; + } + + /** + * Set the committedBlocks value. + * + * @param committedBlocks the committedBlocks value to set. + * @return the BlockList object itself. + */ + public BlockList withCommittedBlocks(List committedBlocks) { + this.committedBlocks = new CommittedBlocksWrapper(committedBlocks); + return this; + } + + /** + * Get the uncommittedBlocks value. + * + * @return the uncommittedBlocks value. + */ + public List uncommittedBlocks() { + if (this.uncommittedBlocks == null) { + this.uncommittedBlocks = new UncommittedBlocksWrapper(new ArrayList()); + } + return this.uncommittedBlocks.items; + } + + /** + * Set the uncommittedBlocks value. + * + * @param uncommittedBlocks the uncommittedBlocks value to set. + * @return the BlockList object itself. + */ + public BlockList withUncommittedBlocks(List uncommittedBlocks) { + this.uncommittedBlocks = new UncommittedBlocksWrapper(uncommittedBlocks); + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockListType.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockListType.java new file mode 100644 index 0000000000000..10431a59a2324 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockListType.java @@ -0,0 +1,66 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; + +/** + * Defines values for BlockListType. + */ +public enum BlockListType { + /** + * Enum value committed. + */ + COMMITTED("committed"), + + /** + * Enum value uncommitted. + */ + UNCOMMITTED("uncommitted"), + + /** + * Enum value all. + */ + ALL("all"); + + /** + * The actual serialized value for a BlockListType instance. + */ + private final String value; + + private BlockListType(String value) { + this.value = value; + } + + /** + * Parses a serialized value to a BlockListType instance. + * + * @param value the serialized value to parse. + * @return the parsed BlockListType object, or null if unable to parse. + */ + @JsonCreator + public static BlockListType fromString(String value) { + BlockListType[] items = BlockListType.values(); + for (BlockListType item : items) { + if (item.toString().equalsIgnoreCase(value)) { + return item; + } + } + return null; + } + + @JsonValue + @Override + public String toString() { + return this.value; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockLookupList.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockLookupList.java new file mode 100644 index 0000000000000..2e3e420b9b3c5 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/BlockLookupList.java @@ -0,0 +1,101 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import java.util.ArrayList; +import java.util.List; + +/** + * The BlockLookupList model. + */ +@JacksonXmlRootElement(localName = "BlockList") +public final class BlockLookupList { + /** + * The committed property. + */ + @JsonProperty("Committed") + private List committed = new ArrayList<>(); + + /** + * The uncommitted property. + */ + @JsonProperty("Uncommitted") + private List uncommitted = new ArrayList<>(); + + /** + * The latest property. + */ + @JsonProperty("Latest") + private List latest = new ArrayList<>(); + + /** + * Get the committed value. + * + * @return the committed value. + */ + public List committed() { + return this.committed; + } + + /** + * Set the committed value. + * + * @param committed the committed value to set. + * @return the BlockLookupList object itself. + */ + public BlockLookupList withCommitted(List committed) { + this.committed = committed; + return this; + } + + /** + * Get the uncommitted value. + * + * @return the uncommitted value. + */ + public List uncommitted() { + return this.uncommitted; + } + + /** + * Set the uncommitted value. + * + * @param uncommitted the uncommitted value to set. + * @return the BlockLookupList object itself. + */ + public BlockLookupList withUncommitted(List uncommitted) { + this.uncommitted = uncommitted; + return this; + } + + /** + * Get the latest value. + * + * @return the latest value. + */ + public List latest() { + return this.latest; + } + + /** + * Set the latest value. + * + * @param latest the latest value to set. + * @return the BlockLookupList object itself. + */ + public BlockLookupList withLatest(List latest) { + this.latest = latest; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ClearRange.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ClearRange.java new file mode 100644 index 0000000000000..c5020a18b9d76 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ClearRange.java @@ -0,0 +1,73 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; + +/** + * The ClearRange model. + */ +@JacksonXmlRootElement(localName = "ClearRange") +public final class ClearRange { + /** + * The start property. + */ + @JsonProperty(value = "Start", required = true) + private long start; + + /** + * The end property. + */ + @JsonProperty(value = "End", required = true) + private long end; + + /** + * Get the start value. + * + * @return the start value. + */ + public long start() { + return this.start; + } + + /** + * Set the start value. + * + * @param start the start value to set. + * @return the ClearRange object itself. + */ + public ClearRange withStart(long start) { + this.start = start; + return this; + } + + /** + * Get the end value. + * + * @return the end value. + */ + public long end() { + return this.end; + } + + /** + * Set the end value. + * + * @param end the end value to set. + * @return the ClearRange object itself. + */ + public ClearRange withEnd(long end) { + this.end = end; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerAcquireLeaseHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerAcquireLeaseHeaders.java new file mode 100644 index 0000000000000..8cd212b6d0446 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerAcquireLeaseHeaders.java @@ -0,0 +1,201 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for AcquireLease operation. + */ +@JacksonXmlRootElement(localName = "Container-AcquireLease-Headers") +public final class ContainerAcquireLeaseHeaders { + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * Returns the date and time the container was last modified. Any operation + * that modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * Uniquely identifies a container's lease. + */ + @JsonProperty(value = "x-ms-lease-id") + private String leaseId; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the ContainerAcquireLeaseHeaders object itself. + */ + public ContainerAcquireLeaseHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the ContainerAcquireLeaseHeaders object itself. + */ + public ContainerAcquireLeaseHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the leaseId value. + * + * @return the leaseId value. + */ + public String leaseId() { + return this.leaseId; + } + + /** + * Set the leaseId value. + * + * @param leaseId the leaseId value to set. + * @return the ContainerAcquireLeaseHeaders object itself. + */ + public ContainerAcquireLeaseHeaders withLeaseId(String leaseId) { + this.leaseId = leaseId; + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the ContainerAcquireLeaseHeaders object itself. + */ + public ContainerAcquireLeaseHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the ContainerAcquireLeaseHeaders object itself. + */ + public ContainerAcquireLeaseHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the ContainerAcquireLeaseHeaders object itself. + */ + public ContainerAcquireLeaseHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerAcquireLeaseResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerAcquireLeaseResponse.java new file mode 100644 index 0000000000000..8379f72229c27 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerAcquireLeaseResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the acquireLease operation. + */ +public final class ContainerAcquireLeaseResponse extends RestResponse { + /** + * Creates an instance of ContainerAcquireLeaseResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public ContainerAcquireLeaseResponse(HttpRequest request, int statusCode, ContainerAcquireLeaseHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public ContainerAcquireLeaseHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerBreakLeaseHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerBreakLeaseHeaders.java new file mode 100644 index 0000000000000..d0ba83943bef9 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerBreakLeaseHeaders.java @@ -0,0 +1,201 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for BreakLease operation. + */ +@JacksonXmlRootElement(localName = "Container-BreakLease-Headers") +public final class ContainerBreakLeaseHeaders { + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * Returns the date and time the container was last modified. Any operation + * that modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * Approximate time remaining in the lease period, in seconds. + */ + @JsonProperty(value = "x-ms-lease-time") + private Integer leaseTime; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the ContainerBreakLeaseHeaders object itself. + */ + public ContainerBreakLeaseHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the ContainerBreakLeaseHeaders object itself. + */ + public ContainerBreakLeaseHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the leaseTime value. + * + * @return the leaseTime value. + */ + public Integer leaseTime() { + return this.leaseTime; + } + + /** + * Set the leaseTime value. + * + * @param leaseTime the leaseTime value to set. + * @return the ContainerBreakLeaseHeaders object itself. + */ + public ContainerBreakLeaseHeaders withLeaseTime(Integer leaseTime) { + this.leaseTime = leaseTime; + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the ContainerBreakLeaseHeaders object itself. + */ + public ContainerBreakLeaseHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the ContainerBreakLeaseHeaders object itself. + */ + public ContainerBreakLeaseHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the ContainerBreakLeaseHeaders object itself. + */ + public ContainerBreakLeaseHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerBreakLeaseResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerBreakLeaseResponse.java new file mode 100644 index 0000000000000..fa8567cde39e5 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerBreakLeaseResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the breakLease operation. + */ +public final class ContainerBreakLeaseResponse extends RestResponse { + /** + * Creates an instance of ContainerBreakLeaseResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public ContainerBreakLeaseResponse(HttpRequest request, int statusCode, ContainerBreakLeaseHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public ContainerBreakLeaseHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerChangeLeaseHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerChangeLeaseHeaders.java new file mode 100644 index 0000000000000..9c8b5a5d8c133 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerChangeLeaseHeaders.java @@ -0,0 +1,201 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for ChangeLease operation. + */ +@JacksonXmlRootElement(localName = "Container-ChangeLease-Headers") +public final class ContainerChangeLeaseHeaders { + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * Returns the date and time the container was last modified. Any operation + * that modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * Uniquely identifies a container's lease. + */ + @JsonProperty(value = "x-ms-lease-id") + private String leaseId; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the ContainerChangeLeaseHeaders object itself. + */ + public ContainerChangeLeaseHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the ContainerChangeLeaseHeaders object itself. + */ + public ContainerChangeLeaseHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the leaseId value. + * + * @return the leaseId value. + */ + public String leaseId() { + return this.leaseId; + } + + /** + * Set the leaseId value. + * + * @param leaseId the leaseId value to set. + * @return the ContainerChangeLeaseHeaders object itself. + */ + public ContainerChangeLeaseHeaders withLeaseId(String leaseId) { + this.leaseId = leaseId; + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the ContainerChangeLeaseHeaders object itself. + */ + public ContainerChangeLeaseHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the ContainerChangeLeaseHeaders object itself. + */ + public ContainerChangeLeaseHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the ContainerChangeLeaseHeaders object itself. + */ + public ContainerChangeLeaseHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerChangeLeaseResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerChangeLeaseResponse.java new file mode 100644 index 0000000000000..d8d0d390d7731 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerChangeLeaseResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the changeLease operation. + */ +public final class ContainerChangeLeaseResponse extends RestResponse { + /** + * Creates an instance of ContainerChangeLeaseResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public ContainerChangeLeaseResponse(HttpRequest request, int statusCode, ContainerChangeLeaseHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public ContainerChangeLeaseHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerCreateHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerCreateHeaders.java new file mode 100644 index 0000000000000..1e636b7f7c8b3 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerCreateHeaders.java @@ -0,0 +1,175 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for Create operation. + */ +@JacksonXmlRootElement(localName = "Container-Create-Headers") +public final class ContainerCreateHeaders { + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * Returns the date and time the container was last modified. Any operation + * that modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the ContainerCreateHeaders object itself. + */ + public ContainerCreateHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the ContainerCreateHeaders object itself. + */ + public ContainerCreateHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the ContainerCreateHeaders object itself. + */ + public ContainerCreateHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the ContainerCreateHeaders object itself. + */ + public ContainerCreateHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the ContainerCreateHeaders object itself. + */ + public ContainerCreateHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerCreateResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerCreateResponse.java new file mode 100644 index 0000000000000..a34625f5e825c --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerCreateResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the create operation. + */ +public final class ContainerCreateResponse extends RestResponse { + /** + * Creates an instance of ContainerCreateResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public ContainerCreateResponse(HttpRequest request, int statusCode, ContainerCreateHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public ContainerCreateHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerDeleteHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerDeleteHeaders.java new file mode 100644 index 0000000000000..adacd2017b609 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerDeleteHeaders.java @@ -0,0 +1,112 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for Delete operation. + */ +@JacksonXmlRootElement(localName = "Container-Delete-Headers") +public final class ContainerDeleteHeaders { + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the ContainerDeleteHeaders object itself. + */ + public ContainerDeleteHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the ContainerDeleteHeaders object itself. + */ + public ContainerDeleteHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the ContainerDeleteHeaders object itself. + */ + public ContainerDeleteHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerDeleteResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerDeleteResponse.java new file mode 100644 index 0000000000000..ddc8e824ce112 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerDeleteResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the delete operation. + */ +public final class ContainerDeleteResponse extends RestResponse { + /** + * Creates an instance of ContainerDeleteResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public ContainerDeleteResponse(HttpRequest request, int statusCode, ContainerDeleteHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public ContainerDeleteHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerGetAccessPolicyHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerGetAccessPolicyHeaders.java new file mode 100644 index 0000000000000..00e7f518b9557 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerGetAccessPolicyHeaders.java @@ -0,0 +1,202 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for GetAccessPolicy operation. + */ +@JacksonXmlRootElement(localName = "Container-GetAccessPolicy-Headers") +public final class ContainerGetAccessPolicyHeaders { + /** + * Indicated whether data in the container may be accessed publicly and the + * level of access. Possible values include: 'container', 'blob'. + */ + @JsonProperty(value = "x-ms-blob-public-access") + private PublicAccessType blobPublicAccess; + + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * Returns the date and time the container was last modified. Any operation + * that modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Get the blobPublicAccess value. + * + * @return the blobPublicAccess value. + */ + public PublicAccessType blobPublicAccess() { + return this.blobPublicAccess; + } + + /** + * Set the blobPublicAccess value. + * + * @param blobPublicAccess the blobPublicAccess value to set. + * @return the ContainerGetAccessPolicyHeaders object itself. + */ + public ContainerGetAccessPolicyHeaders withBlobPublicAccess(PublicAccessType blobPublicAccess) { + this.blobPublicAccess = blobPublicAccess; + return this; + } + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the ContainerGetAccessPolicyHeaders object itself. + */ + public ContainerGetAccessPolicyHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the ContainerGetAccessPolicyHeaders object itself. + */ + public ContainerGetAccessPolicyHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the ContainerGetAccessPolicyHeaders object itself. + */ + public ContainerGetAccessPolicyHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the ContainerGetAccessPolicyHeaders object itself. + */ + public ContainerGetAccessPolicyHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the ContainerGetAccessPolicyHeaders object itself. + */ + public ContainerGetAccessPolicyHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerGetAccessPolicyResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerGetAccessPolicyResponse.java new file mode 100644 index 0000000000000..b30c99d08fa8a --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerGetAccessPolicyResponse.java @@ -0,0 +1,51 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +/** + * Contains all response data for the getAccessPolicy operation. + */ +public final class ContainerGetAccessPolicyResponse extends RestResponse> { + /** + * Creates an instance of ContainerGetAccessPolicyResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public ContainerGetAccessPolicyResponse(HttpRequest request, int statusCode, ContainerGetAccessPolicyHeaders headers, Map rawHeaders, List body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public ContainerGetAccessPolicyHeaders headers() { + return super.headers(); + } + + /** + * @return the deserialized response body. + */ + @Override + public List body() { + return super.body(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerGetAccountInfoHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerGetAccountInfoHeaders.java new file mode 100644 index 0000000000000..baab0c1a676a2 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerGetAccountInfoHeaders.java @@ -0,0 +1,167 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for GetAccountInfo operation. + */ +@JacksonXmlRootElement(localName = "Container-GetAccountInfo-Headers") +public final class ContainerGetAccountInfoHeaders { + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Identifies the sku name of the account. Possible values include: + * 'Standard_LRS', 'Standard_GRS', 'Standard_RAGRS', 'Standard_ZRS', + * 'Premium_LRS'. + */ + @JsonProperty(value = "x-ms-sku-name") + private SkuName skuName; + + /** + * Identifies the account kind. Possible values include: 'Storage', + * 'BlobStorage', 'StorageV2'. + */ + @JsonProperty(value = "x-ms-account-kind") + private AccountKind accountKind; + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the ContainerGetAccountInfoHeaders object itself. + */ + public ContainerGetAccountInfoHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the ContainerGetAccountInfoHeaders object itself. + */ + public ContainerGetAccountInfoHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the ContainerGetAccountInfoHeaders object itself. + */ + public ContainerGetAccountInfoHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } + + /** + * Get the skuName value. + * + * @return the skuName value. + */ + public SkuName skuName() { + return this.skuName; + } + + /** + * Set the skuName value. + * + * @param skuName the skuName value to set. + * @return the ContainerGetAccountInfoHeaders object itself. + */ + public ContainerGetAccountInfoHeaders withSkuName(SkuName skuName) { + this.skuName = skuName; + return this; + } + + /** + * Get the accountKind value. + * + * @return the accountKind value. + */ + public AccountKind accountKind() { + return this.accountKind; + } + + /** + * Set the accountKind value. + * + * @param accountKind the accountKind value to set. + * @return the ContainerGetAccountInfoHeaders object itself. + */ + public ContainerGetAccountInfoHeaders withAccountKind(AccountKind accountKind) { + this.accountKind = accountKind; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerGetAccountInfoResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerGetAccountInfoResponse.java new file mode 100644 index 0000000000000..2348d16b4c0f0 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerGetAccountInfoResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the getAccountInfo operation. + */ +public final class ContainerGetAccountInfoResponse extends RestResponse { + /** + * Creates an instance of ContainerGetAccountInfoResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public ContainerGetAccountInfoResponse(HttpRequest request, int statusCode, ContainerGetAccountInfoHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public ContainerGetAccountInfoHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerGetPropertiesHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerGetPropertiesHeaders.java new file mode 100644 index 0000000000000..e4e89a3820b17 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerGetPropertiesHeaders.java @@ -0,0 +1,363 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import com.microsoft.rest.v2.annotations.HeaderCollection; +import java.time.OffsetDateTime; +import java.util.Map; + +/** + * Defines headers for GetProperties operation. + */ +@JacksonXmlRootElement(localName = "Container-GetProperties-Headers") +public final class ContainerGetPropertiesHeaders { + /** + * The metadata property. + */ + @HeaderCollection("x-ms-meta-") + private Map metadata; + + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * Returns the date and time the container was last modified. Any operation + * that modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * When a blob is leased, specifies whether the lease is of infinite or + * fixed duration. Possible values include: 'infinite', 'fixed'. + */ + @JsonProperty(value = "x-ms-lease-duration") + private LeaseDurationType leaseDuration; + + /** + * Lease state of the blob. Possible values include: 'available', 'leased', + * 'expired', 'breaking', 'broken'. + */ + @JsonProperty(value = "x-ms-lease-state") + private LeaseStateType leaseState; + + /** + * The current lease status of the blob. Possible values include: 'locked', + * 'unlocked'. + */ + @JsonProperty(value = "x-ms-lease-status") + private LeaseStatusType leaseStatus; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Indicated whether data in the container may be accessed publicly and the + * level of access. Possible values include: 'container', 'blob'. + */ + @JsonProperty(value = "x-ms-blob-public-access") + private PublicAccessType blobPublicAccess; + + /** + * Indicates whether the container has an immutability policy set on it. + */ + @JsonProperty(value = "x-ms-has-immutability-policy") + private Boolean hasImmutabilityPolicy; + + /** + * Indicates whether the container has a legal hold. + */ + @JsonProperty(value = "x-ms-has-legal-hold") + private Boolean hasLegalHold; + + /** + * Get the metadata value. + * + * @return the metadata value. + */ + public Map metadata() { + return this.metadata; + } + + /** + * Set the metadata value. + * + * @param metadata the metadata value to set. + * @return the ContainerGetPropertiesHeaders object itself. + */ + public ContainerGetPropertiesHeaders withMetadata(Map metadata) { + this.metadata = metadata; + return this; + } + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the ContainerGetPropertiesHeaders object itself. + */ + public ContainerGetPropertiesHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the ContainerGetPropertiesHeaders object itself. + */ + public ContainerGetPropertiesHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the leaseDuration value. + * + * @return the leaseDuration value. + */ + public LeaseDurationType leaseDuration() { + return this.leaseDuration; + } + + /** + * Set the leaseDuration value. + * + * @param leaseDuration the leaseDuration value to set. + * @return the ContainerGetPropertiesHeaders object itself. + */ + public ContainerGetPropertiesHeaders withLeaseDuration(LeaseDurationType leaseDuration) { + this.leaseDuration = leaseDuration; + return this; + } + + /** + * Get the leaseState value. + * + * @return the leaseState value. + */ + public LeaseStateType leaseState() { + return this.leaseState; + } + + /** + * Set the leaseState value. + * + * @param leaseState the leaseState value to set. + * @return the ContainerGetPropertiesHeaders object itself. + */ + public ContainerGetPropertiesHeaders withLeaseState(LeaseStateType leaseState) { + this.leaseState = leaseState; + return this; + } + + /** + * Get the leaseStatus value. + * + * @return the leaseStatus value. + */ + public LeaseStatusType leaseStatus() { + return this.leaseStatus; + } + + /** + * Set the leaseStatus value. + * + * @param leaseStatus the leaseStatus value to set. + * @return the ContainerGetPropertiesHeaders object itself. + */ + public ContainerGetPropertiesHeaders withLeaseStatus(LeaseStatusType leaseStatus) { + this.leaseStatus = leaseStatus; + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the ContainerGetPropertiesHeaders object itself. + */ + public ContainerGetPropertiesHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the ContainerGetPropertiesHeaders object itself. + */ + public ContainerGetPropertiesHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the ContainerGetPropertiesHeaders object itself. + */ + public ContainerGetPropertiesHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } + + /** + * Get the blobPublicAccess value. + * + * @return the blobPublicAccess value. + */ + public PublicAccessType blobPublicAccess() { + return this.blobPublicAccess; + } + + /** + * Set the blobPublicAccess value. + * + * @param blobPublicAccess the blobPublicAccess value to set. + * @return the ContainerGetPropertiesHeaders object itself. + */ + public ContainerGetPropertiesHeaders withBlobPublicAccess(PublicAccessType blobPublicAccess) { + this.blobPublicAccess = blobPublicAccess; + return this; + } + + /** + * Get the hasImmutabilityPolicy value. + * + * @return the hasImmutabilityPolicy value. + */ + public Boolean hasImmutabilityPolicy() { + return this.hasImmutabilityPolicy; + } + + /** + * Set the hasImmutabilityPolicy value. + * + * @param hasImmutabilityPolicy the hasImmutabilityPolicy value to set. + * @return the ContainerGetPropertiesHeaders object itself. + */ + public ContainerGetPropertiesHeaders withHasImmutabilityPolicy(Boolean hasImmutabilityPolicy) { + this.hasImmutabilityPolicy = hasImmutabilityPolicy; + return this; + } + + /** + * Get the hasLegalHold value. + * + * @return the hasLegalHold value. + */ + public Boolean hasLegalHold() { + return this.hasLegalHold; + } + + /** + * Set the hasLegalHold value. + * + * @param hasLegalHold the hasLegalHold value to set. + * @return the ContainerGetPropertiesHeaders object itself. + */ + public ContainerGetPropertiesHeaders withHasLegalHold(Boolean hasLegalHold) { + this.hasLegalHold = hasLegalHold; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerGetPropertiesResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerGetPropertiesResponse.java new file mode 100644 index 0000000000000..037e7949bb735 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerGetPropertiesResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the getProperties operation. + */ +public final class ContainerGetPropertiesResponse extends RestResponse { + /** + * Creates an instance of ContainerGetPropertiesResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public ContainerGetPropertiesResponse(HttpRequest request, int statusCode, ContainerGetPropertiesHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public ContainerGetPropertiesHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerItem.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerItem.java new file mode 100644 index 0000000000000..cbc20b073db82 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerItem.java @@ -0,0 +1,100 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import java.util.Map; + +/** + * An Azure Storage container. + */ +@JacksonXmlRootElement(localName = "Container") +public final class ContainerItem { + /** + * The name property. + */ + @JsonProperty(value = "Name", required = true) + private String name; + + /** + * The properties property. + */ + @JsonProperty(value = "Properties", required = true) + private ContainerProperties properties; + + /** + * The metadata property. + */ + @JsonProperty(value = "Metadata") + private Map metadata; + + /** + * Get the name value. + * + * @return the name value. + */ + public String name() { + return this.name; + } + + /** + * Set the name value. + * + * @param name the name value to set. + * @return the ContainerItem object itself. + */ + public ContainerItem withName(String name) { + this.name = name; + return this; + } + + /** + * Get the properties value. + * + * @return the properties value. + */ + public ContainerProperties properties() { + return this.properties; + } + + /** + * Set the properties value. + * + * @param properties the properties value to set. + * @return the ContainerItem object itself. + */ + public ContainerItem withProperties(ContainerProperties properties) { + this.properties = properties; + return this; + } + + /** + * Get the metadata value. + * + * @return the metadata value. + */ + public Map metadata() { + return this.metadata; + } + + /** + * Set the metadata value. + * + * @param metadata the metadata value to set. + * @return the ContainerItem object itself. + */ + public ContainerItem withMetadata(Map metadata) { + this.metadata = metadata; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerListBlobFlatSegmentHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerListBlobFlatSegmentHeaders.java new file mode 100644 index 0000000000000..42340ff400f95 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerListBlobFlatSegmentHeaders.java @@ -0,0 +1,139 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for ListBlobFlatSegment operation. + */ +@JacksonXmlRootElement(localName = "Container-ListBlobFlatSegment-Headers") +public final class ContainerListBlobFlatSegmentHeaders { + /** + * The media type of the body of the response. For List Blobs this is + * 'application/xml'. + */ + @JsonProperty(value = "Content-Type") + private String contentType; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Get the contentType value. + * + * @return the contentType value. + */ + public String contentType() { + return this.contentType; + } + + /** + * Set the contentType value. + * + * @param contentType the contentType value to set. + * @return the ContainerListBlobFlatSegmentHeaders object itself. + */ + public ContainerListBlobFlatSegmentHeaders withContentType(String contentType) { + this.contentType = contentType; + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the ContainerListBlobFlatSegmentHeaders object itself. + */ + public ContainerListBlobFlatSegmentHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the ContainerListBlobFlatSegmentHeaders object itself. + */ + public ContainerListBlobFlatSegmentHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the ContainerListBlobFlatSegmentHeaders object itself. + */ + public ContainerListBlobFlatSegmentHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerListBlobFlatSegmentResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerListBlobFlatSegmentResponse.java new file mode 100644 index 0000000000000..6d751f0b2fd96 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerListBlobFlatSegmentResponse.java @@ -0,0 +1,49 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the listBlobFlatSegment operation. + */ +public final class ContainerListBlobFlatSegmentResponse extends RestResponse { + /** + * Creates an instance of ContainerListBlobFlatSegmentResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public ContainerListBlobFlatSegmentResponse(HttpRequest request, int statusCode, ContainerListBlobFlatSegmentHeaders headers, Map rawHeaders, ListBlobsFlatSegmentResponse body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public ContainerListBlobFlatSegmentHeaders headers() { + return super.headers(); + } + + /** + * @return the deserialized response body. + */ + @Override + public ListBlobsFlatSegmentResponse body() { + return super.body(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerListBlobHierarchySegmentHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerListBlobHierarchySegmentHeaders.java new file mode 100644 index 0000000000000..2c5f83d18305c --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerListBlobHierarchySegmentHeaders.java @@ -0,0 +1,139 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for ListBlobHierarchySegment operation. + */ +@JacksonXmlRootElement(localName = "Container-ListBlobHierarchySegment-Headers") +public final class ContainerListBlobHierarchySegmentHeaders { + /** + * The media type of the body of the response. For List Blobs this is + * 'application/xml'. + */ + @JsonProperty(value = "Content-Type") + private String contentType; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Get the contentType value. + * + * @return the contentType value. + */ + public String contentType() { + return this.contentType; + } + + /** + * Set the contentType value. + * + * @param contentType the contentType value to set. + * @return the ContainerListBlobHierarchySegmentHeaders object itself. + */ + public ContainerListBlobHierarchySegmentHeaders withContentType(String contentType) { + this.contentType = contentType; + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the ContainerListBlobHierarchySegmentHeaders object itself. + */ + public ContainerListBlobHierarchySegmentHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the ContainerListBlobHierarchySegmentHeaders object itself. + */ + public ContainerListBlobHierarchySegmentHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the ContainerListBlobHierarchySegmentHeaders object itself. + */ + public ContainerListBlobHierarchySegmentHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerListBlobHierarchySegmentResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerListBlobHierarchySegmentResponse.java new file mode 100644 index 0000000000000..958af21272e38 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerListBlobHierarchySegmentResponse.java @@ -0,0 +1,49 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the listBlobHierarchySegment operation. + */ +public final class ContainerListBlobHierarchySegmentResponse extends RestResponse { + /** + * Creates an instance of ContainerListBlobHierarchySegmentResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public ContainerListBlobHierarchySegmentResponse(HttpRequest request, int statusCode, ContainerListBlobHierarchySegmentHeaders headers, Map rawHeaders, ListBlobsHierarchySegmentResponse body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public ContainerListBlobHierarchySegmentHeaders headers() { + return super.headers(); + } + + /** + * @return the deserialized response body. + */ + @Override + public ListBlobsHierarchySegmentResponse body() { + return super.body(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerProperties.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerProperties.java new file mode 100644 index 0000000000000..2bc73073e1168 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerProperties.java @@ -0,0 +1,239 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Properties of a container. + */ +@JacksonXmlRootElement(localName = "ContainerProperties") +public final class ContainerProperties { + /** + * The lastModified property. + */ + @JsonProperty(value = "Last-Modified", required = true) + private DateTimeRfc1123 lastModified; + + /** + * The etag property. + */ + @JsonProperty(value = "Etag", required = true) + private String etag; + + /** + * Possible values include: 'locked', 'unlocked'. + */ + @JsonProperty(value = "LeaseStatus") + private LeaseStatusType leaseStatus; + + /** + * Possible values include: 'available', 'leased', 'expired', 'breaking', + * 'broken'. + */ + @JsonProperty(value = "LeaseState") + private LeaseStateType leaseState; + + /** + * Possible values include: 'infinite', 'fixed'. + */ + @JsonProperty(value = "LeaseDuration") + private LeaseDurationType leaseDuration; + + /** + * Possible values include: 'container', 'blob'. + */ + @JsonProperty(value = "PublicAccess") + private PublicAccessType publicAccess; + + /** + * The hasImmutabilityPolicy property. + */ + @JsonProperty(value = "HasImmutabilityPolicy") + private Boolean hasImmutabilityPolicy; + + /** + * The hasLegalHold property. + */ + @JsonProperty(value = "HasLegalHold") + private Boolean hasLegalHold; + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the ContainerProperties object itself. + */ + public ContainerProperties withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the etag value. + * + * @return the etag value. + */ + public String etag() { + return this.etag; + } + + /** + * Set the etag value. + * + * @param etag the etag value to set. + * @return the ContainerProperties object itself. + */ + public ContainerProperties withEtag(String etag) { + this.etag = etag; + return this; + } + + /** + * Get the leaseStatus value. + * + * @return the leaseStatus value. + */ + public LeaseStatusType leaseStatus() { + return this.leaseStatus; + } + + /** + * Set the leaseStatus value. + * + * @param leaseStatus the leaseStatus value to set. + * @return the ContainerProperties object itself. + */ + public ContainerProperties withLeaseStatus(LeaseStatusType leaseStatus) { + this.leaseStatus = leaseStatus; + return this; + } + + /** + * Get the leaseState value. + * + * @return the leaseState value. + */ + public LeaseStateType leaseState() { + return this.leaseState; + } + + /** + * Set the leaseState value. + * + * @param leaseState the leaseState value to set. + * @return the ContainerProperties object itself. + */ + public ContainerProperties withLeaseState(LeaseStateType leaseState) { + this.leaseState = leaseState; + return this; + } + + /** + * Get the leaseDuration value. + * + * @return the leaseDuration value. + */ + public LeaseDurationType leaseDuration() { + return this.leaseDuration; + } + + /** + * Set the leaseDuration value. + * + * @param leaseDuration the leaseDuration value to set. + * @return the ContainerProperties object itself. + */ + public ContainerProperties withLeaseDuration(LeaseDurationType leaseDuration) { + this.leaseDuration = leaseDuration; + return this; + } + + /** + * Get the publicAccess value. + * + * @return the publicAccess value. + */ + public PublicAccessType publicAccess() { + return this.publicAccess; + } + + /** + * Set the publicAccess value. + * + * @param publicAccess the publicAccess value to set. + * @return the ContainerProperties object itself. + */ + public ContainerProperties withPublicAccess(PublicAccessType publicAccess) { + this.publicAccess = publicAccess; + return this; + } + + /** + * Get the hasImmutabilityPolicy value. + * + * @return the hasImmutabilityPolicy value. + */ + public Boolean hasImmutabilityPolicy() { + return this.hasImmutabilityPolicy; + } + + /** + * Set the hasImmutabilityPolicy value. + * + * @param hasImmutabilityPolicy the hasImmutabilityPolicy value to set. + * @return the ContainerProperties object itself. + */ + public ContainerProperties withHasImmutabilityPolicy(Boolean hasImmutabilityPolicy) { + this.hasImmutabilityPolicy = hasImmutabilityPolicy; + return this; + } + + /** + * Get the hasLegalHold value. + * + * @return the hasLegalHold value. + */ + public Boolean hasLegalHold() { + return this.hasLegalHold; + } + + /** + * Set the hasLegalHold value. + * + * @param hasLegalHold the hasLegalHold value to set. + * @return the ContainerProperties object itself. + */ + public ContainerProperties withHasLegalHold(Boolean hasLegalHold) { + this.hasLegalHold = hasLegalHold; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerReleaseLeaseHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerReleaseLeaseHeaders.java new file mode 100644 index 0000000000000..6e4526f5daa0f --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerReleaseLeaseHeaders.java @@ -0,0 +1,175 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for ReleaseLease operation. + */ +@JacksonXmlRootElement(localName = "Container-ReleaseLease-Headers") +public final class ContainerReleaseLeaseHeaders { + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * Returns the date and time the container was last modified. Any operation + * that modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the ContainerReleaseLeaseHeaders object itself. + */ + public ContainerReleaseLeaseHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the ContainerReleaseLeaseHeaders object itself. + */ + public ContainerReleaseLeaseHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the ContainerReleaseLeaseHeaders object itself. + */ + public ContainerReleaseLeaseHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the ContainerReleaseLeaseHeaders object itself. + */ + public ContainerReleaseLeaseHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the ContainerReleaseLeaseHeaders object itself. + */ + public ContainerReleaseLeaseHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerReleaseLeaseResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerReleaseLeaseResponse.java new file mode 100644 index 0000000000000..21a2aefcaf35e --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerReleaseLeaseResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the releaseLease operation. + */ +public final class ContainerReleaseLeaseResponse extends RestResponse { + /** + * Creates an instance of ContainerReleaseLeaseResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public ContainerReleaseLeaseResponse(HttpRequest request, int statusCode, ContainerReleaseLeaseHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public ContainerReleaseLeaseHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerRenewLeaseHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerRenewLeaseHeaders.java new file mode 100644 index 0000000000000..2cd3948690ec7 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerRenewLeaseHeaders.java @@ -0,0 +1,201 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for RenewLease operation. + */ +@JacksonXmlRootElement(localName = "Container-RenewLease-Headers") +public final class ContainerRenewLeaseHeaders { + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * Returns the date and time the container was last modified. Any operation + * that modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * Uniquely identifies a container's lease. + */ + @JsonProperty(value = "x-ms-lease-id") + private String leaseId; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the ContainerRenewLeaseHeaders object itself. + */ + public ContainerRenewLeaseHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the ContainerRenewLeaseHeaders object itself. + */ + public ContainerRenewLeaseHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the leaseId value. + * + * @return the leaseId value. + */ + public String leaseId() { + return this.leaseId; + } + + /** + * Set the leaseId value. + * + * @param leaseId the leaseId value to set. + * @return the ContainerRenewLeaseHeaders object itself. + */ + public ContainerRenewLeaseHeaders withLeaseId(String leaseId) { + this.leaseId = leaseId; + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the ContainerRenewLeaseHeaders object itself. + */ + public ContainerRenewLeaseHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the ContainerRenewLeaseHeaders object itself. + */ + public ContainerRenewLeaseHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the ContainerRenewLeaseHeaders object itself. + */ + public ContainerRenewLeaseHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerRenewLeaseResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerRenewLeaseResponse.java new file mode 100644 index 0000000000000..9c50a71486975 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerRenewLeaseResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the renewLease operation. + */ +public final class ContainerRenewLeaseResponse extends RestResponse { + /** + * Creates an instance of ContainerRenewLeaseResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public ContainerRenewLeaseResponse(HttpRequest request, int statusCode, ContainerRenewLeaseHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public ContainerRenewLeaseHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerSetAccessPolicyHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerSetAccessPolicyHeaders.java new file mode 100644 index 0000000000000..8c97145620a3e --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerSetAccessPolicyHeaders.java @@ -0,0 +1,175 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for SetAccessPolicy operation. + */ +@JacksonXmlRootElement(localName = "Container-SetAccessPolicy-Headers") +public final class ContainerSetAccessPolicyHeaders { + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * Returns the date and time the container was last modified. Any operation + * that modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the ContainerSetAccessPolicyHeaders object itself. + */ + public ContainerSetAccessPolicyHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the ContainerSetAccessPolicyHeaders object itself. + */ + public ContainerSetAccessPolicyHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the ContainerSetAccessPolicyHeaders object itself. + */ + public ContainerSetAccessPolicyHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the ContainerSetAccessPolicyHeaders object itself. + */ + public ContainerSetAccessPolicyHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the ContainerSetAccessPolicyHeaders object itself. + */ + public ContainerSetAccessPolicyHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerSetAccessPolicyResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerSetAccessPolicyResponse.java new file mode 100644 index 0000000000000..08faa8cb06ff9 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerSetAccessPolicyResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the setAccessPolicy operation. + */ +public final class ContainerSetAccessPolicyResponse extends RestResponse { + /** + * Creates an instance of ContainerSetAccessPolicyResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public ContainerSetAccessPolicyResponse(HttpRequest request, int statusCode, ContainerSetAccessPolicyHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public ContainerSetAccessPolicyHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerSetMetadataHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerSetMetadataHeaders.java new file mode 100644 index 0000000000000..cd817a5df08d7 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerSetMetadataHeaders.java @@ -0,0 +1,175 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for SetMetadata operation. + */ +@JacksonXmlRootElement(localName = "Container-SetMetadata-Headers") +public final class ContainerSetMetadataHeaders { + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * Returns the date and time the container was last modified. Any operation + * that modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the ContainerSetMetadataHeaders object itself. + */ + public ContainerSetMetadataHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the ContainerSetMetadataHeaders object itself. + */ + public ContainerSetMetadataHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the ContainerSetMetadataHeaders object itself. + */ + public ContainerSetMetadataHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the ContainerSetMetadataHeaders object itself. + */ + public ContainerSetMetadataHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the ContainerSetMetadataHeaders object itself. + */ + public ContainerSetMetadataHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerSetMetadataResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerSetMetadataResponse.java new file mode 100644 index 0000000000000..07688f18c3bbe --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ContainerSetMetadataResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the setMetadata operation. + */ +public final class ContainerSetMetadataResponse extends RestResponse { + /** + * Creates an instance of ContainerSetMetadataResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public ContainerSetMetadataResponse(HttpRequest request, int statusCode, ContainerSetMetadataHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public ContainerSetMetadataHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/CopyStatusType.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/CopyStatusType.java new file mode 100644 index 0000000000000..1346c03338d57 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/CopyStatusType.java @@ -0,0 +1,71 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; + +/** + * Defines values for CopyStatusType. + */ +public enum CopyStatusType { + /** + * Enum value pending. + */ + PENDING("pending"), + + /** + * Enum value success. + */ + SUCCESS("success"), + + /** + * Enum value aborted. + */ + ABORTED("aborted"), + + /** + * Enum value failed. + */ + FAILED("failed"); + + /** + * The actual serialized value for a CopyStatusType instance. + */ + private final String value; + + private CopyStatusType(String value) { + this.value = value; + } + + /** + * Parses a serialized value to a CopyStatusType instance. + * + * @param value the serialized value to parse. + * @return the parsed CopyStatusType object, or null if unable to parse. + */ + @JsonCreator + public static CopyStatusType fromString(String value) { + CopyStatusType[] items = CopyStatusType.values(); + for (CopyStatusType item : items) { + if (item.toString().equalsIgnoreCase(value)) { + return item; + } + } + return null; + } + + @JsonValue + @Override + public String toString() { + return this.value; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/CorsRule.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/CorsRule.java new file mode 100644 index 0000000000000..9571e18e9bbbe --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/CorsRule.java @@ -0,0 +1,164 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; + +/** + * CORS is an HTTP feature that enables a web application running under one + * domain to access resources in another domain. Web browsers implement a + * security restriction known as same-origin policy that prevents a web page + * from calling APIs in a different domain; CORS provides a secure way to allow + * one domain (the origin domain) to call APIs in another domain. + */ +@JacksonXmlRootElement(localName = "CorsRule") +public final class CorsRule { + /** + * The origin domains that are permitted to make a request against the + * storage service via CORS. The origin domain is the domain from which the + * request originates. Note that the origin must be an exact case-sensitive + * match with the origin that the user age sends to the service. You can + * also use the wildcard character '*' to allow all origin domains to make + * requests via CORS. + */ + @JsonProperty(value = "AllowedOrigins", required = true) + private String allowedOrigins; + + /** + * The methods (HTTP request verbs) that the origin domain may use for a + * CORS request. (comma separated). + */ + @JsonProperty(value = "AllowedMethods", required = true) + private String allowedMethods; + + /** + * the request headers that the origin domain may specify on the CORS + * request. + */ + @JsonProperty(value = "AllowedHeaders", required = true) + private String allowedHeaders; + + /** + * The response headers that may be sent in the response to the CORS + * request and exposed by the browser to the request issuer. + */ + @JsonProperty(value = "ExposedHeaders", required = true) + private String exposedHeaders; + + /** + * The maximum amount time that a browser should cache the preflight + * OPTIONS request. + */ + @JsonProperty(value = "MaxAgeInSeconds", required = true) + private int maxAgeInSeconds; + + /** + * Get the allowedOrigins value. + * + * @return the allowedOrigins value. + */ + public String allowedOrigins() { + return this.allowedOrigins; + } + + /** + * Set the allowedOrigins value. + * + * @param allowedOrigins the allowedOrigins value to set. + * @return the CorsRule object itself. + */ + public CorsRule withAllowedOrigins(String allowedOrigins) { + this.allowedOrigins = allowedOrigins; + return this; + } + + /** + * Get the allowedMethods value. + * + * @return the allowedMethods value. + */ + public String allowedMethods() { + return this.allowedMethods; + } + + /** + * Set the allowedMethods value. + * + * @param allowedMethods the allowedMethods value to set. + * @return the CorsRule object itself. + */ + public CorsRule withAllowedMethods(String allowedMethods) { + this.allowedMethods = allowedMethods; + return this; + } + + /** + * Get the allowedHeaders value. + * + * @return the allowedHeaders value. + */ + public String allowedHeaders() { + return this.allowedHeaders; + } + + /** + * Set the allowedHeaders value. + * + * @param allowedHeaders the allowedHeaders value to set. + * @return the CorsRule object itself. + */ + public CorsRule withAllowedHeaders(String allowedHeaders) { + this.allowedHeaders = allowedHeaders; + return this; + } + + /** + * Get the exposedHeaders value. + * + * @return the exposedHeaders value. + */ + public String exposedHeaders() { + return this.exposedHeaders; + } + + /** + * Set the exposedHeaders value. + * + * @param exposedHeaders the exposedHeaders value to set. + * @return the CorsRule object itself. + */ + public CorsRule withExposedHeaders(String exposedHeaders) { + this.exposedHeaders = exposedHeaders; + return this; + } + + /** + * Get the maxAgeInSeconds value. + * + * @return the maxAgeInSeconds value. + */ + public int maxAgeInSeconds() { + return this.maxAgeInSeconds; + } + + /** + * Set the maxAgeInSeconds value. + * + * @param maxAgeInSeconds the maxAgeInSeconds value to set. + * @return the CorsRule object itself. + */ + public CorsRule withMaxAgeInSeconds(int maxAgeInSeconds) { + this.maxAgeInSeconds = maxAgeInSeconds; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/CustomHierarchicalListingDeserializer.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/CustomHierarchicalListingDeserializer.java new file mode 100644 index 0000000000000..a890f45ced1bf --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/CustomHierarchicalListingDeserializer.java @@ -0,0 +1,57 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.JsonToken; +import com.fasterxml.jackson.databind.*; +import com.fasterxml.jackson.databind.deser.ResolvableDeserializer; +import com.fasterxml.jackson.databind.deser.std.StdDeserializer; +import com.fasterxml.jackson.databind.type.TypeFactory; + +import java.io.IOException; +import java.util.ArrayList; + +// implement ContextualDeserializer or ResolvableDeserializer? +final class CustomHierarchicalListingDeserializer extends JsonDeserializer { + + @Override + public BlobHierarchyListSegment deserialize(JsonParser p, DeserializationContext ctxt) throws IOException { + ArrayList blobItems = new ArrayList<>(); + ArrayList blobPrefixes = new ArrayList<>(); + + JsonDeserializer blobItemDeserializer = + ctxt.findRootValueDeserializer(ctxt.constructType(BlobItem.class)); + JsonDeserializer blobPrefixDeserializer = + ctxt.findRootValueDeserializer(ctxt.constructType(BlobPrefix.class)); + + for (JsonToken currentToken = p.nextToken(); !currentToken.name().equals("END_OBJECT"); + currentToken = p.nextToken()) { + // Get to the root element of the next item. + p.nextToken(); + + if (p.getCurrentName().equals("Blob")) { + blobItems.add((BlobItem)blobItemDeserializer.deserialize(p, ctxt)); + } + else if (p.getCurrentName().equals("BlobPrefix")) { + blobPrefixes.add((BlobPrefix)blobPrefixDeserializer.deserialize(p, ctxt)); + } + } + + return new BlobHierarchyListSegment().withBlobItems(blobItems).withBlobPrefixes(blobPrefixes); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/DeleteSnapshotsOptionType.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/DeleteSnapshotsOptionType.java new file mode 100644 index 0000000000000..19ebff7a0b015 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/DeleteSnapshotsOptionType.java @@ -0,0 +1,61 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; + +/** + * Defines values for DeleteSnapshotsOptionType. + */ +public enum DeleteSnapshotsOptionType { + /** + * Enum value include. + */ + INCLUDE("include"), + + /** + * Enum value only. + */ + ONLY("only"); + + /** + * The actual serialized value for a DeleteSnapshotsOptionType instance. + */ + private final String value; + + private DeleteSnapshotsOptionType(String value) { + this.value = value; + } + + /** + * Parses a serialized value to a DeleteSnapshotsOptionType instance. + * + * @param value the serialized value to parse. + * @return the parsed DeleteSnapshotsOptionType object, or null if unable to parse. + */ + @JsonCreator + public static DeleteSnapshotsOptionType fromString(String value) { + DeleteSnapshotsOptionType[] items = DeleteSnapshotsOptionType.values(); + for (DeleteSnapshotsOptionType item : items) { + if (item.toString().equalsIgnoreCase(value)) { + return item; + } + } + return null; + } + + @JsonValue + @Override + public String toString() { + return this.value; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/GeoReplication.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/GeoReplication.java new file mode 100644 index 0000000000000..dd622489e8dbe --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/GeoReplication.java @@ -0,0 +1,86 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Geo-Replication information for the Secondary Storage Service. + */ +@JacksonXmlRootElement(localName = "GeoReplication") +public final class GeoReplication { + /** + * The status of the secondary location. Possible values include: 'live', + * 'bootstrap', 'unavailable'. + */ + @JsonProperty(value = "Status", required = true) + private GeoReplicationStatusType status; + + /** + * A GMT date/time value, to the second. All primary writes preceding this + * value are guaranteed to be available for read operations at the + * secondary. Primary writes after this point in time may or may not be + * available for reads. + */ + @JsonProperty(value = "LastSyncTime", required = true) + private DateTimeRfc1123 lastSyncTime; + + /** + * Get the status value. + * + * @return the status value. + */ + public GeoReplicationStatusType status() { + return this.status; + } + + /** + * Set the status value. + * + * @param status the status value to set. + * @return the GeoReplication object itself. + */ + public GeoReplication withStatus(GeoReplicationStatusType status) { + this.status = status; + return this; + } + + /** + * Get the lastSyncTime value. + * + * @return the lastSyncTime value. + */ + public OffsetDateTime lastSyncTime() { + if (this.lastSyncTime == null) { + return null; + } + return this.lastSyncTime.dateTime(); + } + + /** + * Set the lastSyncTime value. + * + * @param lastSyncTime the lastSyncTime value to set. + * @return the GeoReplication object itself. + */ + public GeoReplication withLastSyncTime(OffsetDateTime lastSyncTime) { + if (lastSyncTime == null) { + this.lastSyncTime = null; + } else { + this.lastSyncTime = new DateTimeRfc1123(lastSyncTime); + } + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/GeoReplicationStatusType.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/GeoReplicationStatusType.java new file mode 100644 index 0000000000000..815672b880da5 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/GeoReplicationStatusType.java @@ -0,0 +1,53 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.microsoft.rest.v2.ExpandableStringEnum; +import java.util.Collection; + +/** + * Defines values for GeoReplicationStatusType. + */ +public final class GeoReplicationStatusType extends ExpandableStringEnum { + /** + * Static value live for GeoReplicationStatusType. + */ + public static final GeoReplicationStatusType LIVE = fromString("live"); + + /** + * Static value bootstrap for GeoReplicationStatusType. + */ + public static final GeoReplicationStatusType BOOTSTRAP = fromString("bootstrap"); + + /** + * Static value unavailable for GeoReplicationStatusType. + */ + public static final GeoReplicationStatusType UNAVAILABLE = fromString("unavailable"); + + /** + * Creates or finds a GeoReplicationStatusType from its string representation. + * + * @param name a name to look for. + * @return the corresponding GeoReplicationStatusType. + */ + @JsonCreator + public static GeoReplicationStatusType fromString(String name) { + return fromString(name, GeoReplicationStatusType.class); + } + + /** + * @return known GeoReplicationStatusType values. + */ + public static Collection values() { + return values(GeoReplicationStatusType.class); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/LeaseAccessConditions.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/LeaseAccessConditions.java new file mode 100644 index 0000000000000..aee109b1759c1 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/LeaseAccessConditions.java @@ -0,0 +1,48 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; + +/** + * Additional parameters for a set of operations. + */ +@JacksonXmlRootElement(localName = "lease-access-conditions") +public final class LeaseAccessConditions { + /** + * If specified, the operation only succeeds if the resource's lease is + * active and matches this ID. + */ + @JsonProperty(value = "LeaseId") + private String leaseId; + + /** + * Get the leaseId value. + * + * @return the leaseId value. + */ + public String leaseId() { + return this.leaseId; + } + + /** + * Set the leaseId value. + * + * @param leaseId the leaseId value to set. + * @return the LeaseAccessConditions object itself. + */ + public LeaseAccessConditions withLeaseId(String leaseId) { + this.leaseId = leaseId; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/LeaseDurationType.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/LeaseDurationType.java new file mode 100644 index 0000000000000..1add38cbc9fb2 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/LeaseDurationType.java @@ -0,0 +1,61 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; + +/** + * Defines values for LeaseDurationType. + */ +public enum LeaseDurationType { + /** + * Enum value infinite. + */ + INFINITE("infinite"), + + /** + * Enum value fixed. + */ + FIXED("fixed"); + + /** + * The actual serialized value for a LeaseDurationType instance. + */ + private final String value; + + private LeaseDurationType(String value) { + this.value = value; + } + + /** + * Parses a serialized value to a LeaseDurationType instance. + * + * @param value the serialized value to parse. + * @return the parsed LeaseDurationType object, or null if unable to parse. + */ + @JsonCreator + public static LeaseDurationType fromString(String value) { + LeaseDurationType[] items = LeaseDurationType.values(); + for (LeaseDurationType item : items) { + if (item.toString().equalsIgnoreCase(value)) { + return item; + } + } + return null; + } + + @JsonValue + @Override + public String toString() { + return this.value; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/LeaseStateType.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/LeaseStateType.java new file mode 100644 index 0000000000000..fcce6172ea440 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/LeaseStateType.java @@ -0,0 +1,76 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; + +/** + * Defines values for LeaseStateType. + */ +public enum LeaseStateType { + /** + * Enum value available. + */ + AVAILABLE("available"), + + /** + * Enum value leased. + */ + LEASED("leased"), + + /** + * Enum value expired. + */ + EXPIRED("expired"), + + /** + * Enum value breaking. + */ + BREAKING("breaking"), + + /** + * Enum value broken. + */ + BROKEN("broken"); + + /** + * The actual serialized value for a LeaseStateType instance. + */ + private final String value; + + private LeaseStateType(String value) { + this.value = value; + } + + /** + * Parses a serialized value to a LeaseStateType instance. + * + * @param value the serialized value to parse. + * @return the parsed LeaseStateType object, or null if unable to parse. + */ + @JsonCreator + public static LeaseStateType fromString(String value) { + LeaseStateType[] items = LeaseStateType.values(); + for (LeaseStateType item : items) { + if (item.toString().equalsIgnoreCase(value)) { + return item; + } + } + return null; + } + + @JsonValue + @Override + public String toString() { + return this.value; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/LeaseStatusType.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/LeaseStatusType.java new file mode 100644 index 0000000000000..bb3392eb5e460 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/LeaseStatusType.java @@ -0,0 +1,61 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; + +/** + * Defines values for LeaseStatusType. + */ +public enum LeaseStatusType { + /** + * Enum value locked. + */ + LOCKED("locked"), + + /** + * Enum value unlocked. + */ + UNLOCKED("unlocked"); + + /** + * The actual serialized value for a LeaseStatusType instance. + */ + private final String value; + + private LeaseStatusType(String value) { + this.value = value; + } + + /** + * Parses a serialized value to a LeaseStatusType instance. + * + * @param value the serialized value to parse. + * @return the parsed LeaseStatusType object, or null if unable to parse. + */ + @JsonCreator + public static LeaseStatusType fromString(String value) { + LeaseStatusType[] items = LeaseStatusType.values(); + for (LeaseStatusType item : items) { + if (item.toString().equalsIgnoreCase(value)) { + return item; + } + } + return null; + } + + @JsonValue + @Override + public String toString() { + return this.value; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ListBlobsFlatSegmentResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ListBlobsFlatSegmentResponse.java new file mode 100644 index 0000000000000..629a79f53f476 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ListBlobsFlatSegmentResponse.java @@ -0,0 +1,229 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; + +/** + * An enumeration of blobs. + */ +@JacksonXmlRootElement(localName = "EnumerationResults") +public final class ListBlobsFlatSegmentResponse { + /** + * The serviceEndpoint property. + */ + @JacksonXmlProperty(localName = "ServiceEndpoint", isAttribute = true) + private String serviceEndpoint; + + /** + * The containerName property. + */ + @JacksonXmlProperty(localName = "ContainerName", isAttribute = true) + private String containerName; + + /** + * The prefix property. + */ + @JsonProperty(value = "Prefix", required = true) + private String prefix; + + /** + * The marker property. + */ + @JsonProperty(value = "Marker", required = true) + private String marker; + + /** + * The maxResults property. + */ + @JsonProperty(value = "MaxResults", required = true) + private int maxResults; + + /** + * The delimiter property. + */ + @JsonProperty(value = "Delimiter", required = true) + private String delimiter; + + /** + * The segment property. + */ + @JsonProperty(value = "Blobs", required = true) + private BlobFlatListSegment segment; + + /** + * The nextMarker property. + */ + @JsonProperty(value = "NextMarker", required = true) + private String nextMarker; + + /** + * Get the serviceEndpoint value. + * + * @return the serviceEndpoint value. + */ + public String serviceEndpoint() { + return this.serviceEndpoint; + } + + /** + * Set the serviceEndpoint value. + * + * @param serviceEndpoint the serviceEndpoint value to set. + * @return the ListBlobsFlatSegmentResponse object itself. + */ + public ListBlobsFlatSegmentResponse withServiceEndpoint(String serviceEndpoint) { + this.serviceEndpoint = serviceEndpoint; + return this; + } + + /** + * Get the containerName value. + * + * @return the containerName value. + */ + public String containerName() { + return this.containerName; + } + + /** + * Set the containerName value. + * + * @param containerName the containerName value to set. + * @return the ListBlobsFlatSegmentResponse object itself. + */ + public ListBlobsFlatSegmentResponse withContainerName(String containerName) { + this.containerName = containerName; + return this; + } + + /** + * Get the prefix value. + * + * @return the prefix value. + */ + public String prefix() { + return this.prefix; + } + + /** + * Set the prefix value. + * + * @param prefix the prefix value to set. + * @return the ListBlobsFlatSegmentResponse object itself. + */ + public ListBlobsFlatSegmentResponse withPrefix(String prefix) { + this.prefix = prefix; + return this; + } + + /** + * Get the marker value. + * + * @return the marker value. + */ + public String marker() { + return this.marker; + } + + /** + * Set the marker value. + * + * @param marker the marker value to set. + * @return the ListBlobsFlatSegmentResponse object itself. + */ + public ListBlobsFlatSegmentResponse withMarker(String marker) { + this.marker = marker; + return this; + } + + /** + * Get the maxResults value. + * + * @return the maxResults value. + */ + public int maxResults() { + return this.maxResults; + } + + /** + * Set the maxResults value. + * + * @param maxResults the maxResults value to set. + * @return the ListBlobsFlatSegmentResponse object itself. + */ + public ListBlobsFlatSegmentResponse withMaxResults(int maxResults) { + this.maxResults = maxResults; + return this; + } + + /** + * Get the delimiter value. + * + * @return the delimiter value. + */ + public String delimiter() { + return this.delimiter; + } + + /** + * Set the delimiter value. + * + * @param delimiter the delimiter value to set. + * @return the ListBlobsFlatSegmentResponse object itself. + */ + public ListBlobsFlatSegmentResponse withDelimiter(String delimiter) { + this.delimiter = delimiter; + return this; + } + + /** + * Get the segment value. + * + * @return the segment value. + */ + public BlobFlatListSegment segment() { + return this.segment; + } + + /** + * Set the segment value. + * + * @param segment the segment value to set. + * @return the ListBlobsFlatSegmentResponse object itself. + */ + public ListBlobsFlatSegmentResponse withSegment(BlobFlatListSegment segment) { + this.segment = segment; + return this; + } + + /** + * Get the nextMarker value. + * + * @return the nextMarker value. + */ + public String nextMarker() { + return this.nextMarker; + } + + /** + * Set the nextMarker value. + * + * @param nextMarker the nextMarker value to set. + * @return the ListBlobsFlatSegmentResponse object itself. + */ + public ListBlobsFlatSegmentResponse withNextMarker(String nextMarker) { + this.nextMarker = nextMarker; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ListBlobsHierarchySegmentResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ListBlobsHierarchySegmentResponse.java new file mode 100644 index 0000000000000..12ba3f950c78f --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ListBlobsHierarchySegmentResponse.java @@ -0,0 +1,229 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; + +/** + * An enumeration of blobs. + */ +@JacksonXmlRootElement(localName = "EnumerationResults") +public final class ListBlobsHierarchySegmentResponse { + /** + * The serviceEndpoint property. + */ + @JacksonXmlProperty(localName = "ServiceEndpoint", isAttribute = true) + private String serviceEndpoint; + + /** + * The containerName property. + */ + @JacksonXmlProperty(localName = "ContainerName", isAttribute = true) + private String containerName; + + /** + * The prefix property. + */ + @JsonProperty(value = "Prefix", required = true) + private String prefix; + + /** + * The marker property. + */ + @JsonProperty(value = "Marker", required = true) + private String marker; + + /** + * The maxResults property. + */ + @JsonProperty(value = "MaxResults", required = true) + private int maxResults; + + /** + * The delimiter property. + */ + @JsonProperty(value = "Delimiter", required = true) + private String delimiter; + + /** + * The segment property. + */ + @JsonProperty(value = "Blobs", required = true) + private BlobHierarchyListSegment segment; + + /** + * The nextMarker property. + */ + @JsonProperty(value = "NextMarker", required = true) + private String nextMarker; + + /** + * Get the serviceEndpoint value. + * + * @return the serviceEndpoint value. + */ + public String serviceEndpoint() { + return this.serviceEndpoint; + } + + /** + * Set the serviceEndpoint value. + * + * @param serviceEndpoint the serviceEndpoint value to set. + * @return the ListBlobsHierarchySegmentResponse object itself. + */ + public ListBlobsHierarchySegmentResponse withServiceEndpoint(String serviceEndpoint) { + this.serviceEndpoint = serviceEndpoint; + return this; + } + + /** + * Get the containerName value. + * + * @return the containerName value. + */ + public String containerName() { + return this.containerName; + } + + /** + * Set the containerName value. + * + * @param containerName the containerName value to set. + * @return the ListBlobsHierarchySegmentResponse object itself. + */ + public ListBlobsHierarchySegmentResponse withContainerName(String containerName) { + this.containerName = containerName; + return this; + } + + /** + * Get the prefix value. + * + * @return the prefix value. + */ + public String prefix() { + return this.prefix; + } + + /** + * Set the prefix value. + * + * @param prefix the prefix value to set. + * @return the ListBlobsHierarchySegmentResponse object itself. + */ + public ListBlobsHierarchySegmentResponse withPrefix(String prefix) { + this.prefix = prefix; + return this; + } + + /** + * Get the marker value. + * + * @return the marker value. + */ + public String marker() { + return this.marker; + } + + /** + * Set the marker value. + * + * @param marker the marker value to set. + * @return the ListBlobsHierarchySegmentResponse object itself. + */ + public ListBlobsHierarchySegmentResponse withMarker(String marker) { + this.marker = marker; + return this; + } + + /** + * Get the maxResults value. + * + * @return the maxResults value. + */ + public int maxResults() { + return this.maxResults; + } + + /** + * Set the maxResults value. + * + * @param maxResults the maxResults value to set. + * @return the ListBlobsHierarchySegmentResponse object itself. + */ + public ListBlobsHierarchySegmentResponse withMaxResults(int maxResults) { + this.maxResults = maxResults; + return this; + } + + /** + * Get the delimiter value. + * + * @return the delimiter value. + */ + public String delimiter() { + return this.delimiter; + } + + /** + * Set the delimiter value. + * + * @param delimiter the delimiter value to set. + * @return the ListBlobsHierarchySegmentResponse object itself. + */ + public ListBlobsHierarchySegmentResponse withDelimiter(String delimiter) { + this.delimiter = delimiter; + return this; + } + + /** + * Get the segment value. + * + * @return the segment value. + */ + public BlobHierarchyListSegment segment() { + return this.segment; + } + + /** + * Set the segment value. + * + * @param segment the segment value to set. + * @return the ListBlobsHierarchySegmentResponse object itself. + */ + public ListBlobsHierarchySegmentResponse withSegment(BlobHierarchyListSegment segment) { + this.segment = segment; + return this; + } + + /** + * Get the nextMarker value. + * + * @return the nextMarker value. + */ + public String nextMarker() { + return this.nextMarker; + } + + /** + * Set the nextMarker value. + * + * @param nextMarker the nextMarker value to set. + * @return the ListBlobsHierarchySegmentResponse object itself. + */ + public ListBlobsHierarchySegmentResponse withNextMarker(String nextMarker) { + this.nextMarker = nextMarker; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ListBlobsIncludeItem.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ListBlobsIncludeItem.java new file mode 100644 index 0000000000000..8cc85bb21b18b --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ListBlobsIncludeItem.java @@ -0,0 +1,76 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; + +/** + * Defines values for ListBlobsIncludeItem. + */ +public enum ListBlobsIncludeItem { + /** + * Enum value snapshots. + */ + SNAPSHOTS("snapshots"), + + /** + * Enum value metadata. + */ + METADATA("metadata"), + + /** + * Enum value uncommittedblobs. + */ + UNCOMMITTEDBLOBS("uncommittedblobs"), + + /** + * Enum value copy. + */ + COPY("copy"), + + /** + * Enum value deleted. + */ + DELETED("deleted"); + + /** + * The actual serialized value for a ListBlobsIncludeItem instance. + */ + private final String value; + + private ListBlobsIncludeItem(String value) { + this.value = value; + } + + /** + * Parses a serialized value to a ListBlobsIncludeItem instance. + * + * @param value the serialized value to parse. + * @return the parsed ListBlobsIncludeItem object, or null if unable to parse. + */ + @JsonCreator + public static ListBlobsIncludeItem fromString(String value) { + ListBlobsIncludeItem[] items = ListBlobsIncludeItem.values(); + for (ListBlobsIncludeItem item : items) { + if (item.toString().equalsIgnoreCase(value)) { + return item; + } + } + return null; + } + + @JsonValue + @Override + public String toString() { + return this.value; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ListContainersIncludeType.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ListContainersIncludeType.java new file mode 100644 index 0000000000000..d9ee2d5e08e93 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ListContainersIncludeType.java @@ -0,0 +1,56 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; + +/** + * Defines values for ListContainersIncludeType. + */ +public enum ListContainersIncludeType { + /** + * Enum value metadata. + */ + METADATA("metadata"); + + /** + * The actual serialized value for a ListContainersIncludeType instance. + */ + private final String value; + + private ListContainersIncludeType(String value) { + this.value = value; + } + + /** + * Parses a serialized value to a ListContainersIncludeType instance. + * + * @param value the serialized value to parse. + * @return the parsed ListContainersIncludeType object, or null if unable to parse. + */ + @JsonCreator + public static ListContainersIncludeType fromString(String value) { + ListContainersIncludeType[] items = ListContainersIncludeType.values(); + for (ListContainersIncludeType item : items) { + if (item.toString().equalsIgnoreCase(value)) { + return item; + } + } + return null; + } + + @JsonValue + @Override + public String toString() { + return this.value; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ListContainersSegmentResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ListContainersSegmentResponse.java new file mode 100644 index 0000000000000..375389d231e6d --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ListContainersSegmentResponse.java @@ -0,0 +1,193 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import java.util.ArrayList; +import java.util.List; + +/** + * An enumeration of containers. + */ +@JacksonXmlRootElement(localName = "EnumerationResults") +public final class ListContainersSegmentResponse { + /** + * The serviceEndpoint property. + */ + @JacksonXmlProperty(localName = "ServiceEndpoint", isAttribute = true) + private String serviceEndpoint; + + /** + * The prefix property. + */ + @JsonProperty(value = "Prefix", required = true) + private String prefix; + + /** + * The marker property. + */ + @JsonProperty(value = "Marker") + private String marker; + + /** + * The maxResults property. + */ + @JsonProperty(value = "MaxResults", required = true) + private int maxResults; + + private static final class ContainersWrapper { + @JacksonXmlProperty(localName = "Container") + private final List items; + + @JsonCreator + private ContainersWrapper(@JacksonXmlProperty(localName = "Container") List items) { + this.items = items; + } + } + + /** + * The containerItems property. + */ + @JsonProperty(value = "Containers", required = true) + private ContainersWrapper containerItems; + + /** + * The nextMarker property. + */ + @JsonProperty(value = "NextMarker", required = true) + private String nextMarker; + + /** + * Get the serviceEndpoint value. + * + * @return the serviceEndpoint value. + */ + public String serviceEndpoint() { + return this.serviceEndpoint; + } + + /** + * Set the serviceEndpoint value. + * + * @param serviceEndpoint the serviceEndpoint value to set. + * @return the ListContainersSegmentResponse object itself. + */ + public ListContainersSegmentResponse withServiceEndpoint(String serviceEndpoint) { + this.serviceEndpoint = serviceEndpoint; + return this; + } + + /** + * Get the prefix value. + * + * @return the prefix value. + */ + public String prefix() { + return this.prefix; + } + + /** + * Set the prefix value. + * + * @param prefix the prefix value to set. + * @return the ListContainersSegmentResponse object itself. + */ + public ListContainersSegmentResponse withPrefix(String prefix) { + this.prefix = prefix; + return this; + } + + /** + * Get the marker value. + * + * @return the marker value. + */ + public String marker() { + return this.marker; + } + + /** + * Set the marker value. + * + * @param marker the marker value to set. + * @return the ListContainersSegmentResponse object itself. + */ + public ListContainersSegmentResponse withMarker(String marker) { + this.marker = marker; + return this; + } + + /** + * Get the maxResults value. + * + * @return the maxResults value. + */ + public int maxResults() { + return this.maxResults; + } + + /** + * Set the maxResults value. + * + * @param maxResults the maxResults value to set. + * @return the ListContainersSegmentResponse object itself. + */ + public ListContainersSegmentResponse withMaxResults(int maxResults) { + this.maxResults = maxResults; + return this; + } + + /** + * Get the containerItems value. + * + * @return the containerItems value. + */ + public List containerItems() { + if (this.containerItems == null) { + this.containerItems = new ContainersWrapper(new ArrayList()); + } + return this.containerItems.items; + } + + /** + * Set the containerItems value. + * + * @param containerItems the containerItems value to set. + * @return the ListContainersSegmentResponse object itself. + */ + public ListContainersSegmentResponse withContainerItems(List containerItems) { + this.containerItems = new ContainersWrapper(containerItems); + return this; + } + + /** + * Get the nextMarker value. + * + * @return the nextMarker value. + */ + public String nextMarker() { + return this.nextMarker; + } + + /** + * Set the nextMarker value. + * + * @param nextMarker the nextMarker value to set. + * @return the ListContainersSegmentResponse object itself. + */ + public ListContainersSegmentResponse withNextMarker(String nextMarker) { + this.nextMarker = nextMarker; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/Logging.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/Logging.java new file mode 100644 index 0000000000000..33b06c6d35df3 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/Logging.java @@ -0,0 +1,151 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; + +/** + * Azure Analytics Logging settings. + */ +@JacksonXmlRootElement(localName = "Logging") +public final class Logging { + /** + * The version of Storage Analytics to configure. + */ + @JsonProperty(value = "Version", required = true) + private String version; + + /** + * Indicates whether all delete requests should be logged. + */ + @JsonProperty(value = "Delete", required = true) + private boolean delete; + + /** + * Indicates whether all read requests should be logged. + */ + @JsonProperty(value = "Read", required = true) + private boolean read; + + /** + * Indicates whether all write requests should be logged. + */ + @JsonProperty(value = "Write", required = true) + private boolean write; + + /** + * The retentionPolicy property. + */ + @JsonProperty(value = "RetentionPolicy", required = true) + private RetentionPolicy retentionPolicy; + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the Logging object itself. + */ + public Logging withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the delete value. + * + * @return the delete value. + */ + public boolean delete() { + return this.delete; + } + + /** + * Set the delete value. + * + * @param delete the delete value to set. + * @return the Logging object itself. + */ + public Logging withDelete(boolean delete) { + this.delete = delete; + return this; + } + + /** + * Get the read value. + * + * @return the read value. + */ + public boolean read() { + return this.read; + } + + /** + * Set the read value. + * + * @param read the read value to set. + * @return the Logging object itself. + */ + public Logging withRead(boolean read) { + this.read = read; + return this; + } + + /** + * Get the write value. + * + * @return the write value. + */ + public boolean write() { + return this.write; + } + + /** + * Set the write value. + * + * @param write the write value to set. + * @return the Logging object itself. + */ + public Logging withWrite(boolean write) { + this.write = write; + return this; + } + + /** + * Get the retentionPolicy value. + * + * @return the retentionPolicy value. + */ + public RetentionPolicy retentionPolicy() { + return this.retentionPolicy; + } + + /** + * Set the retentionPolicy value. + * + * @param retentionPolicy the retentionPolicy value to set. + * @return the Logging object itself. + */ + public Logging withRetentionPolicy(RetentionPolicy retentionPolicy) { + this.retentionPolicy = retentionPolicy; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/Metrics.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/Metrics.java new file mode 100644 index 0000000000000..4b18484e65afd --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/Metrics.java @@ -0,0 +1,127 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; + +/** + * a summary of request statistics grouped by API in hour or minute aggregates + * for blobs. + */ +@JacksonXmlRootElement(localName = "Metrics") +public final class Metrics { + /** + * The version of Storage Analytics to configure. + */ + @JsonProperty(value = "Version") + private String version; + + /** + * Indicates whether metrics are enabled for the Blob service. + */ + @JsonProperty(value = "Enabled", required = true) + private boolean enabled; + + /** + * Indicates whether metrics should generate summary statistics for called + * API operations. + */ + @JsonProperty(value = "IncludeAPIs") + private Boolean includeAPIs; + + /** + * The retentionPolicy property. + */ + @JsonProperty(value = "RetentionPolicy") + private RetentionPolicy retentionPolicy; + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the Metrics object itself. + */ + public Metrics withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the enabled value. + * + * @return the enabled value. + */ + public boolean enabled() { + return this.enabled; + } + + /** + * Set the enabled value. + * + * @param enabled the enabled value to set. + * @return the Metrics object itself. + */ + public Metrics withEnabled(boolean enabled) { + this.enabled = enabled; + return this; + } + + /** + * Get the includeAPIs value. + * + * @return the includeAPIs value. + */ + public Boolean includeAPIs() { + return this.includeAPIs; + } + + /** + * Set the includeAPIs value. + * + * @param includeAPIs the includeAPIs value to set. + * @return the Metrics object itself. + */ + public Metrics withIncludeAPIs(Boolean includeAPIs) { + this.includeAPIs = includeAPIs; + return this; + } + + /** + * Get the retentionPolicy value. + * + * @return the retentionPolicy value. + */ + public RetentionPolicy retentionPolicy() { + return this.retentionPolicy; + } + + /** + * Set the retentionPolicy value. + * + * @param retentionPolicy the retentionPolicy value to set. + * @return the Metrics object itself. + */ + public Metrics withRetentionPolicy(RetentionPolicy retentionPolicy) { + this.retentionPolicy = retentionPolicy; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ModifiedAccessConditions.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ModifiedAccessConditions.java new file mode 100644 index 0000000000000..23966fad48ac5 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ModifiedAccessConditions.java @@ -0,0 +1,143 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Additional parameters for a set of operations. + */ +@JacksonXmlRootElement(localName = "modified-access-conditions") +public final class ModifiedAccessConditions { + /** + * Specify this header value to operate only on a blob if it has been + * modified since the specified date/time. + */ + @JsonProperty(value = "IfModifiedSince") + private DateTimeRfc1123 ifModifiedSince; + + /** + * Specify this header value to operate only on a blob if it has not been + * modified since the specified date/time. + */ + @JsonProperty(value = "IfUnmodifiedSince") + private DateTimeRfc1123 ifUnmodifiedSince; + + /** + * Specify an ETag value to operate only on blobs with a matching value. + */ + @JsonProperty(value = "IfMatch") + private String ifMatch; + + /** + * Specify an ETag value to operate only on blobs without a matching value. + */ + @JsonProperty(value = "IfNoneMatch") + private String ifNoneMatch; + + /** + * Get the ifModifiedSince value. + * + * @return the ifModifiedSince value. + */ + public OffsetDateTime ifModifiedSince() { + if (this.ifModifiedSince == null) { + return null; + } + return this.ifModifiedSince.dateTime(); + } + + /** + * Set the ifModifiedSince value. + * + * @param ifModifiedSince the ifModifiedSince value to set. + * @return the ModifiedAccessConditions object itself. + */ + public ModifiedAccessConditions withIfModifiedSince(OffsetDateTime ifModifiedSince) { + if (ifModifiedSince == null) { + this.ifModifiedSince = null; + } else { + this.ifModifiedSince = new DateTimeRfc1123(ifModifiedSince); + } + return this; + } + + /** + * Get the ifUnmodifiedSince value. + * + * @return the ifUnmodifiedSince value. + */ + public OffsetDateTime ifUnmodifiedSince() { + if (this.ifUnmodifiedSince == null) { + return null; + } + return this.ifUnmodifiedSince.dateTime(); + } + + /** + * Set the ifUnmodifiedSince value. + * + * @param ifUnmodifiedSince the ifUnmodifiedSince value to set. + * @return the ModifiedAccessConditions object itself. + */ + public ModifiedAccessConditions withIfUnmodifiedSince(OffsetDateTime ifUnmodifiedSince) { + if (ifUnmodifiedSince == null) { + this.ifUnmodifiedSince = null; + } else { + this.ifUnmodifiedSince = new DateTimeRfc1123(ifUnmodifiedSince); + } + return this; + } + + /** + * Get the ifMatch value. + * + * @return the ifMatch value. + */ + public String ifMatch() { + return this.ifMatch; + } + + /** + * Set the ifMatch value. + * + * @param ifMatch the ifMatch value to set. + * @return the ModifiedAccessConditions object itself. + */ + public ModifiedAccessConditions withIfMatch(String ifMatch) { + this.ifMatch = ifMatch; + return this; + } + + /** + * Get the ifNoneMatch value. + * + * @return the ifNoneMatch value. + */ + public String ifNoneMatch() { + return this.ifNoneMatch; + } + + /** + * Set the ifNoneMatch value. + * + * @param ifNoneMatch the ifNoneMatch value to set. + * @return the ModifiedAccessConditions object itself. + */ + public ModifiedAccessConditions withIfNoneMatch(String ifNoneMatch) { + this.ifNoneMatch = ifNoneMatch; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobClearPagesHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobClearPagesHeaders.java new file mode 100644 index 0000000000000..aa3b0e7efdbe1 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobClearPagesHeaders.java @@ -0,0 +1,229 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for ClearPages operation. + */ +@JacksonXmlRootElement(localName = "PageBlob-ClearPages-Headers") +public final class PageBlobClearPagesHeaders { + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * Returns the date and time the container was last modified. Any operation + * that modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * If the blob has an MD5 hash and this operation is to read the full blob, + * this response header is returned so that the client can check for + * message content integrity. + */ + @JsonProperty(value = "Content-MD5") + private byte[] contentMD5; + + /** + * The current sequence number for the page blob. + */ + @JsonProperty(value = "x-ms-blob-sequence-number") + private Long blobSequenceNumber; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the PageBlobClearPagesHeaders object itself. + */ + public PageBlobClearPagesHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the PageBlobClearPagesHeaders object itself. + */ + public PageBlobClearPagesHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the contentMD5 value. + * + * @return the contentMD5 value. + */ + public byte[] contentMD5() { + return this.contentMD5; + } + + /** + * Set the contentMD5 value. + * + * @param contentMD5 the contentMD5 value to set. + * @return the PageBlobClearPagesHeaders object itself. + */ + public PageBlobClearPagesHeaders withContentMD5(byte[] contentMD5) { + this.contentMD5 = contentMD5; + return this; + } + + /** + * Get the blobSequenceNumber value. + * + * @return the blobSequenceNumber value. + */ + public Long blobSequenceNumber() { + return this.blobSequenceNumber; + } + + /** + * Set the blobSequenceNumber value. + * + * @param blobSequenceNumber the blobSequenceNumber value to set. + * @return the PageBlobClearPagesHeaders object itself. + */ + public PageBlobClearPagesHeaders withBlobSequenceNumber(Long blobSequenceNumber) { + this.blobSequenceNumber = blobSequenceNumber; + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the PageBlobClearPagesHeaders object itself. + */ + public PageBlobClearPagesHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the PageBlobClearPagesHeaders object itself. + */ + public PageBlobClearPagesHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the PageBlobClearPagesHeaders object itself. + */ + public PageBlobClearPagesHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobClearPagesResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobClearPagesResponse.java new file mode 100644 index 0000000000000..99f7fe8bd49c8 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobClearPagesResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the clearPages operation. + */ +public final class PageBlobClearPagesResponse extends RestResponse { + /** + * Creates an instance of PageBlobClearPagesResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public PageBlobClearPagesResponse(HttpRequest request, int statusCode, PageBlobClearPagesHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public PageBlobClearPagesHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobCopyIncrementalHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobCopyIncrementalHeaders.java new file mode 100644 index 0000000000000..7221d1a806875 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobCopyIncrementalHeaders.java @@ -0,0 +1,230 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for CopyIncremental operation. + */ +@JacksonXmlRootElement(localName = "PageBlob-CopyIncremental-Headers") +public final class PageBlobCopyIncrementalHeaders { + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * Returns the date and time the container was last modified. Any operation + * that modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * String identifier for this copy operation. Use with Get Blob Properties + * to check the status of this copy operation, or pass to Abort Copy Blob + * to abort a pending copy. + */ + @JsonProperty(value = "x-ms-copy-id") + private String copyId; + + /** + * State of the copy operation identified by x-ms-copy-id. Possible values + * include: 'pending', 'success', 'aborted', 'failed'. + */ + @JsonProperty(value = "x-ms-copy-status") + private CopyStatusType copyStatus; + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the PageBlobCopyIncrementalHeaders object itself. + */ + public PageBlobCopyIncrementalHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the PageBlobCopyIncrementalHeaders object itself. + */ + public PageBlobCopyIncrementalHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the PageBlobCopyIncrementalHeaders object itself. + */ + public PageBlobCopyIncrementalHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the PageBlobCopyIncrementalHeaders object itself. + */ + public PageBlobCopyIncrementalHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the PageBlobCopyIncrementalHeaders object itself. + */ + public PageBlobCopyIncrementalHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } + + /** + * Get the copyId value. + * + * @return the copyId value. + */ + public String copyId() { + return this.copyId; + } + + /** + * Set the copyId value. + * + * @param copyId the copyId value to set. + * @return the PageBlobCopyIncrementalHeaders object itself. + */ + public PageBlobCopyIncrementalHeaders withCopyId(String copyId) { + this.copyId = copyId; + return this; + } + + /** + * Get the copyStatus value. + * + * @return the copyStatus value. + */ + public CopyStatusType copyStatus() { + return this.copyStatus; + } + + /** + * Set the copyStatus value. + * + * @param copyStatus the copyStatus value to set. + * @return the PageBlobCopyIncrementalHeaders object itself. + */ + public PageBlobCopyIncrementalHeaders withCopyStatus(CopyStatusType copyStatus) { + this.copyStatus = copyStatus; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobCopyIncrementalResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobCopyIncrementalResponse.java new file mode 100644 index 0000000000000..077a466656c54 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobCopyIncrementalResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the copyIncremental operation. + */ +public final class PageBlobCopyIncrementalResponse extends RestResponse { + /** + * Creates an instance of PageBlobCopyIncrementalResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public PageBlobCopyIncrementalResponse(HttpRequest request, int statusCode, PageBlobCopyIncrementalHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public PageBlobCopyIncrementalHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobCreateHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobCreateHeaders.java new file mode 100644 index 0000000000000..099fde09a1484 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobCreateHeaders.java @@ -0,0 +1,231 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for Create operation. + */ +@JacksonXmlRootElement(localName = "PageBlob-Create-Headers") +public final class PageBlobCreateHeaders { + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * Returns the date and time the container was last modified. Any operation + * that modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * If the blob has an MD5 hash and this operation is to read the full blob, + * this response header is returned so that the client can check for + * message content integrity. + */ + @JsonProperty(value = "Content-MD5") + private byte[] contentMD5; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * The value of this header is set to true if the contents of the request + * are successfully encrypted using the specified algorithm, and false + * otherwise. + */ + @JsonProperty(value = "x-ms-request-server-encrypted") + private Boolean isServerEncrypted; + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the PageBlobCreateHeaders object itself. + */ + public PageBlobCreateHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the PageBlobCreateHeaders object itself. + */ + public PageBlobCreateHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the contentMD5 value. + * + * @return the contentMD5 value. + */ + public byte[] contentMD5() { + return this.contentMD5; + } + + /** + * Set the contentMD5 value. + * + * @param contentMD5 the contentMD5 value to set. + * @return the PageBlobCreateHeaders object itself. + */ + public PageBlobCreateHeaders withContentMD5(byte[] contentMD5) { + this.contentMD5 = contentMD5; + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the PageBlobCreateHeaders object itself. + */ + public PageBlobCreateHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the PageBlobCreateHeaders object itself. + */ + public PageBlobCreateHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the PageBlobCreateHeaders object itself. + */ + public PageBlobCreateHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } + + /** + * Get the isServerEncrypted value. + * + * @return the isServerEncrypted value. + */ + public Boolean isServerEncrypted() { + return this.isServerEncrypted; + } + + /** + * Set the isServerEncrypted value. + * + * @param isServerEncrypted the isServerEncrypted value to set. + * @return the PageBlobCreateHeaders object itself. + */ + public PageBlobCreateHeaders withIsServerEncrypted(Boolean isServerEncrypted) { + this.isServerEncrypted = isServerEncrypted; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobCreateResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobCreateResponse.java new file mode 100644 index 0000000000000..b18a74841b950 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobCreateResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the create operation. + */ +public final class PageBlobCreateResponse extends RestResponse { + /** + * Creates an instance of PageBlobCreateResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public PageBlobCreateResponse(HttpRequest request, int statusCode, PageBlobCreateHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public PageBlobCreateHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobGetPageRangesDiffHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobGetPageRangesDiffHeaders.java new file mode 100644 index 0000000000000..a3d098737841b --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobGetPageRangesDiffHeaders.java @@ -0,0 +1,201 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for GetPageRangesDiff operation. + */ +@JacksonXmlRootElement(localName = "PageBlob-GetPageRangesDiff-Headers") +public final class PageBlobGetPageRangesDiffHeaders { + /** + * Returns the date and time the container was last modified. Any operation + * that modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * The size of the blob in bytes. + */ + @JsonProperty(value = "x-ms-blob-content-length") + private Long blobContentLength; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the PageBlobGetPageRangesDiffHeaders object itself. + */ + public PageBlobGetPageRangesDiffHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the PageBlobGetPageRangesDiffHeaders object itself. + */ + public PageBlobGetPageRangesDiffHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the blobContentLength value. + * + * @return the blobContentLength value. + */ + public Long blobContentLength() { + return this.blobContentLength; + } + + /** + * Set the blobContentLength value. + * + * @param blobContentLength the blobContentLength value to set. + * @return the PageBlobGetPageRangesDiffHeaders object itself. + */ + public PageBlobGetPageRangesDiffHeaders withBlobContentLength(Long blobContentLength) { + this.blobContentLength = blobContentLength; + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the PageBlobGetPageRangesDiffHeaders object itself. + */ + public PageBlobGetPageRangesDiffHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the PageBlobGetPageRangesDiffHeaders object itself. + */ + public PageBlobGetPageRangesDiffHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the PageBlobGetPageRangesDiffHeaders object itself. + */ + public PageBlobGetPageRangesDiffHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobGetPageRangesDiffResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobGetPageRangesDiffResponse.java new file mode 100644 index 0000000000000..28d4ed76ac45f --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobGetPageRangesDiffResponse.java @@ -0,0 +1,49 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the getPageRangesDiff operation. + */ +public final class PageBlobGetPageRangesDiffResponse extends RestResponse { + /** + * Creates an instance of PageBlobGetPageRangesDiffResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public PageBlobGetPageRangesDiffResponse(HttpRequest request, int statusCode, PageBlobGetPageRangesDiffHeaders headers, Map rawHeaders, PageList body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public PageBlobGetPageRangesDiffHeaders headers() { + return super.headers(); + } + + /** + * @return the deserialized response body. + */ + @Override + public PageList body() { + return super.body(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobGetPageRangesHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobGetPageRangesHeaders.java new file mode 100644 index 0000000000000..32860d3793852 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobGetPageRangesHeaders.java @@ -0,0 +1,201 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for GetPageRanges operation. + */ +@JacksonXmlRootElement(localName = "PageBlob-GetPageRanges-Headers") +public final class PageBlobGetPageRangesHeaders { + /** + * Returns the date and time the container was last modified. Any operation + * that modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * The size of the blob in bytes. + */ + @JsonProperty(value = "x-ms-blob-content-length") + private Long blobContentLength; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the PageBlobGetPageRangesHeaders object itself. + */ + public PageBlobGetPageRangesHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the PageBlobGetPageRangesHeaders object itself. + */ + public PageBlobGetPageRangesHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the blobContentLength value. + * + * @return the blobContentLength value. + */ + public Long blobContentLength() { + return this.blobContentLength; + } + + /** + * Set the blobContentLength value. + * + * @param blobContentLength the blobContentLength value to set. + * @return the PageBlobGetPageRangesHeaders object itself. + */ + public PageBlobGetPageRangesHeaders withBlobContentLength(Long blobContentLength) { + this.blobContentLength = blobContentLength; + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the PageBlobGetPageRangesHeaders object itself. + */ + public PageBlobGetPageRangesHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the PageBlobGetPageRangesHeaders object itself. + */ + public PageBlobGetPageRangesHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the PageBlobGetPageRangesHeaders object itself. + */ + public PageBlobGetPageRangesHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobGetPageRangesResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobGetPageRangesResponse.java new file mode 100644 index 0000000000000..4ca391ed0715e --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobGetPageRangesResponse.java @@ -0,0 +1,49 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the getPageRanges operation. + */ +public final class PageBlobGetPageRangesResponse extends RestResponse { + /** + * Creates an instance of PageBlobGetPageRangesResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public PageBlobGetPageRangesResponse(HttpRequest request, int statusCode, PageBlobGetPageRangesHeaders headers, Map rawHeaders, PageList body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public PageBlobGetPageRangesHeaders headers() { + return super.headers(); + } + + /** + * @return the deserialized response body. + */ + @Override + public PageList body() { + return super.body(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobResizeHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobResizeHeaders.java new file mode 100644 index 0000000000000..bdbca9d22c18d --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobResizeHeaders.java @@ -0,0 +1,202 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for Resize operation. + */ +@JacksonXmlRootElement(localName = "PageBlob-Resize-Headers") +public final class PageBlobResizeHeaders { + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * Returns the date and time the container was last modified. Any operation + * that modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * The current sequence number for a page blob. This header is not returned + * for block blobs or append blobs. + */ + @JsonProperty(value = "x-ms-blob-sequence-number") + private Long blobSequenceNumber; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the PageBlobResizeHeaders object itself. + */ + public PageBlobResizeHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the PageBlobResizeHeaders object itself. + */ + public PageBlobResizeHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the blobSequenceNumber value. + * + * @return the blobSequenceNumber value. + */ + public Long blobSequenceNumber() { + return this.blobSequenceNumber; + } + + /** + * Set the blobSequenceNumber value. + * + * @param blobSequenceNumber the blobSequenceNumber value to set. + * @return the PageBlobResizeHeaders object itself. + */ + public PageBlobResizeHeaders withBlobSequenceNumber(Long blobSequenceNumber) { + this.blobSequenceNumber = blobSequenceNumber; + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the PageBlobResizeHeaders object itself. + */ + public PageBlobResizeHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the PageBlobResizeHeaders object itself. + */ + public PageBlobResizeHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the PageBlobResizeHeaders object itself. + */ + public PageBlobResizeHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobResizeResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobResizeResponse.java new file mode 100644 index 0000000000000..3de49146bb9a7 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobResizeResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the resize operation. + */ +public final class PageBlobResizeResponse extends RestResponse { + /** + * Creates an instance of PageBlobResizeResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public PageBlobResizeResponse(HttpRequest request, int statusCode, PageBlobResizeHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public PageBlobResizeHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobUpdateSequenceNumberHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobUpdateSequenceNumberHeaders.java new file mode 100644 index 0000000000000..e2b73c9a1ba5c --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobUpdateSequenceNumberHeaders.java @@ -0,0 +1,202 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for UpdateSequenceNumber operation. + */ +@JacksonXmlRootElement(localName = "PageBlob-UpdateSequenceNumber-Headers") +public final class PageBlobUpdateSequenceNumberHeaders { + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * Returns the date and time the container was last modified. Any operation + * that modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * The current sequence number for a page blob. This header is not returned + * for block blobs or append blobs. + */ + @JsonProperty(value = "x-ms-blob-sequence-number") + private Long blobSequenceNumber; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the PageBlobUpdateSequenceNumberHeaders object itself. + */ + public PageBlobUpdateSequenceNumberHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the PageBlobUpdateSequenceNumberHeaders object itself. + */ + public PageBlobUpdateSequenceNumberHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the blobSequenceNumber value. + * + * @return the blobSequenceNumber value. + */ + public Long blobSequenceNumber() { + return this.blobSequenceNumber; + } + + /** + * Set the blobSequenceNumber value. + * + * @param blobSequenceNumber the blobSequenceNumber value to set. + * @return the PageBlobUpdateSequenceNumberHeaders object itself. + */ + public PageBlobUpdateSequenceNumberHeaders withBlobSequenceNumber(Long blobSequenceNumber) { + this.blobSequenceNumber = blobSequenceNumber; + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the PageBlobUpdateSequenceNumberHeaders object itself. + */ + public PageBlobUpdateSequenceNumberHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the PageBlobUpdateSequenceNumberHeaders object itself. + */ + public PageBlobUpdateSequenceNumberHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the PageBlobUpdateSequenceNumberHeaders object itself. + */ + public PageBlobUpdateSequenceNumberHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobUpdateSequenceNumberResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobUpdateSequenceNumberResponse.java new file mode 100644 index 0000000000000..637404963e488 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobUpdateSequenceNumberResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the updateSequenceNumber operation. + */ +public final class PageBlobUpdateSequenceNumberResponse extends RestResponse { + /** + * Creates an instance of PageBlobUpdateSequenceNumberResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public PageBlobUpdateSequenceNumberResponse(HttpRequest request, int statusCode, PageBlobUpdateSequenceNumberHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public PageBlobUpdateSequenceNumberHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobUploadPagesHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobUploadPagesHeaders.java new file mode 100644 index 0000000000000..090cb61724a3c --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobUploadPagesHeaders.java @@ -0,0 +1,257 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for UploadPages operation. + */ +@JacksonXmlRootElement(localName = "PageBlob-UploadPages-Headers") +public final class PageBlobUploadPagesHeaders { + /** + * The ETag contains a value that you can use to perform operations + * conditionally. If the request version is 2011-08-18 or newer, the ETag + * value will be in quotes. + */ + @JsonProperty(value = "ETag") + private String eTag; + + /** + * Returns the date and time the container was last modified. Any operation + * that modifies the blob, including an update of the blob's metadata or + * properties, changes the last-modified time of the blob. + */ + @JsonProperty(value = "Last-Modified") + private DateTimeRfc1123 lastModified; + + /** + * If the blob has an MD5 hash and this operation is to read the full blob, + * this response header is returned so that the client can check for + * message content integrity. + */ + @JsonProperty(value = "Content-MD5") + private byte[] contentMD5; + + /** + * The current sequence number for the page blob. + */ + @JsonProperty(value = "x-ms-blob-sequence-number") + private Long blobSequenceNumber; + + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * The value of this header is set to true if the contents of the request + * are successfully encrypted using the specified algorithm, and false + * otherwise. + */ + @JsonProperty(value = "x-ms-request-server-encrypted") + private Boolean isServerEncrypted; + + /** + * Get the eTag value. + * + * @return the eTag value. + */ + public String eTag() { + return this.eTag; + } + + /** + * Set the eTag value. + * + * @param eTag the eTag value to set. + * @return the PageBlobUploadPagesHeaders object itself. + */ + public PageBlobUploadPagesHeaders withETag(String eTag) { + this.eTag = eTag; + return this; + } + + /** + * Get the lastModified value. + * + * @return the lastModified value. + */ + public OffsetDateTime lastModified() { + if (this.lastModified == null) { + return null; + } + return this.lastModified.dateTime(); + } + + /** + * Set the lastModified value. + * + * @param lastModified the lastModified value to set. + * @return the PageBlobUploadPagesHeaders object itself. + */ + public PageBlobUploadPagesHeaders withLastModified(OffsetDateTime lastModified) { + if (lastModified == null) { + this.lastModified = null; + } else { + this.lastModified = new DateTimeRfc1123(lastModified); + } + return this; + } + + /** + * Get the contentMD5 value. + * + * @return the contentMD5 value. + */ + public byte[] contentMD5() { + return this.contentMD5; + } + + /** + * Set the contentMD5 value. + * + * @param contentMD5 the contentMD5 value to set. + * @return the PageBlobUploadPagesHeaders object itself. + */ + public PageBlobUploadPagesHeaders withContentMD5(byte[] contentMD5) { + this.contentMD5 = contentMD5; + return this; + } + + /** + * Get the blobSequenceNumber value. + * + * @return the blobSequenceNumber value. + */ + public Long blobSequenceNumber() { + return this.blobSequenceNumber; + } + + /** + * Set the blobSequenceNumber value. + * + * @param blobSequenceNumber the blobSequenceNumber value to set. + * @return the PageBlobUploadPagesHeaders object itself. + */ + public PageBlobUploadPagesHeaders withBlobSequenceNumber(Long blobSequenceNumber) { + this.blobSequenceNumber = blobSequenceNumber; + return this; + } + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the PageBlobUploadPagesHeaders object itself. + */ + public PageBlobUploadPagesHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the PageBlobUploadPagesHeaders object itself. + */ + public PageBlobUploadPagesHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the PageBlobUploadPagesHeaders object itself. + */ + public PageBlobUploadPagesHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } + + /** + * Get the isServerEncrypted value. + * + * @return the isServerEncrypted value. + */ + public Boolean isServerEncrypted() { + return this.isServerEncrypted; + } + + /** + * Set the isServerEncrypted value. + * + * @param isServerEncrypted the isServerEncrypted value to set. + * @return the PageBlobUploadPagesHeaders object itself. + */ + public PageBlobUploadPagesHeaders withIsServerEncrypted(Boolean isServerEncrypted) { + this.isServerEncrypted = isServerEncrypted; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobUploadPagesResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobUploadPagesResponse.java new file mode 100644 index 0000000000000..d9184fc61d2ca --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageBlobUploadPagesResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the uploadPages operation. + */ +public final class PageBlobUploadPagesResponse extends RestResponse { + /** + * Creates an instance of PageBlobUploadPagesResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public PageBlobUploadPagesResponse(HttpRequest request, int statusCode, PageBlobUploadPagesHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public PageBlobUploadPagesHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageList.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageList.java new file mode 100644 index 0000000000000..80a2863707540 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageList.java @@ -0,0 +1,75 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import java.util.ArrayList; +import java.util.List; + +/** + * the list of pages. + */ +@JacksonXmlRootElement(localName = "PageList") +public final class PageList { + /** + * The pageRange property. + */ + @JsonProperty("PageRange") + private List pageRange = new ArrayList<>(); + + /** + * The clearRange property. + */ + @JsonProperty("ClearRange") + private List clearRange = new ArrayList<>(); + + /** + * Get the pageRange value. + * + * @return the pageRange value. + */ + public List pageRange() { + return this.pageRange; + } + + /** + * Set the pageRange value. + * + * @param pageRange the pageRange value to set. + * @return the PageList object itself. + */ + public PageList withPageRange(List pageRange) { + this.pageRange = pageRange; + return this; + } + + /** + * Get the clearRange value. + * + * @return the clearRange value. + */ + public List clearRange() { + return this.clearRange; + } + + /** + * Set the clearRange value. + * + * @param clearRange the clearRange value to set. + * @return the PageList object itself. + */ + public PageList withClearRange(List clearRange) { + this.clearRange = clearRange; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageRange.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageRange.java new file mode 100644 index 0000000000000..1ba9c1fd5c9af --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PageRange.java @@ -0,0 +1,73 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; + +/** + * The PageRange model. + */ +@JacksonXmlRootElement(localName = "PageRange") +public final class PageRange { + /** + * The start property. + */ + @JsonProperty(value = "Start", required = true) + private long start; + + /** + * The end property. + */ + @JsonProperty(value = "End", required = true) + private long end; + + /** + * Get the start value. + * + * @return the start value. + */ + public long start() { + return this.start; + } + + /** + * Set the start value. + * + * @param start the start value to set. + * @return the PageRange object itself. + */ + public PageRange withStart(long start) { + this.start = start; + return this; + } + + /** + * Get the end value. + * + * @return the end value. + */ + public long end() { + return this.end; + } + + /** + * Set the end value. + * + * @param end the end value to set. + * @return the PageRange object itself. + */ + public PageRange withEnd(long end) { + this.end = end; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PublicAccessType.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PublicAccessType.java new file mode 100644 index 0000000000000..6dc2b2dbc258d --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/PublicAccessType.java @@ -0,0 +1,48 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.microsoft.rest.v2.ExpandableStringEnum; +import java.util.Collection; + +/** + * Defines values for PublicAccessType. + */ +public final class PublicAccessType extends ExpandableStringEnum { + /** + * Static value container for PublicAccessType. + */ + public static final PublicAccessType CONTAINER = fromString("container"); + + /** + * Static value blob for PublicAccessType. + */ + public static final PublicAccessType BLOB = fromString("blob"); + + /** + * Creates or finds a PublicAccessType from its string representation. + * + * @param name a name to look for. + * @return the corresponding PublicAccessType. + */ + @JsonCreator + public static PublicAccessType fromString(String name) { + return fromString(name, PublicAccessType.class); + } + + /** + * @return known PublicAccessType values. + */ + public static Collection values() { + return values(PublicAccessType.class); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/RetentionPolicy.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/RetentionPolicy.java new file mode 100644 index 0000000000000..092a86bcca5df --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/RetentionPolicy.java @@ -0,0 +1,75 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; + +/** + * the retention policy which determines how long the associated data should + * persist. + */ +@JacksonXmlRootElement(localName = "RetentionPolicy") +public final class RetentionPolicy { + /** + * Indicates whether a retention policy is enabled for the storage service. + */ + @JsonProperty(value = "Enabled", required = true) + private boolean enabled; + + /** + * Indicates the number of days that metrics or logging or soft-deleted + * data should be retained. All data older than this value will be deleted. + */ + @JsonProperty(value = "Days") + private Integer days; + + /** + * Get the enabled value. + * + * @return the enabled value. + */ + public boolean enabled() { + return this.enabled; + } + + /** + * Set the enabled value. + * + * @param enabled the enabled value to set. + * @return the RetentionPolicy object itself. + */ + public RetentionPolicy withEnabled(boolean enabled) { + this.enabled = enabled; + return this; + } + + /** + * Get the days value. + * + * @return the days value. + */ + public Integer days() { + return this.days; + } + + /** + * Set the days value. + * + * @param days the days value to set. + * @return the RetentionPolicy object itself. + */ + public RetentionPolicy withDays(Integer days) { + this.days = days; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/SequenceNumberAccessConditions.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/SequenceNumberAccessConditions.java new file mode 100644 index 0000000000000..faf2e1b71041d --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/SequenceNumberAccessConditions.java @@ -0,0 +1,105 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; + +/** + * Additional parameters for a set of operations, such as: + * PageBlob_uploadPages, PageBlob_clearPages. + */ +@JacksonXmlRootElement(localName = "sequence-number-access-conditions") +public final class SequenceNumberAccessConditions { + /** + * Specify this header value to operate only on a blob if it has a sequence + * number less than or equal to the specified. + */ + @JsonProperty(value = "IfSequenceNumberLessThanOrEqualTo") + private Long ifSequenceNumberLessThanOrEqualTo; + + /** + * Specify this header value to operate only on a blob if it has a sequence + * number less than the specified. + */ + @JsonProperty(value = "IfSequenceNumberLessThan") + private Long ifSequenceNumberLessThan; + + /** + * Specify this header value to operate only on a blob if it has the + * specified sequence number. + */ + @JsonProperty(value = "IfSequenceNumberEqualTo") + private Long ifSequenceNumberEqualTo; + + /** + * Get the ifSequenceNumberLessThanOrEqualTo value. + * + * @return the ifSequenceNumberLessThanOrEqualTo value. + */ + public Long ifSequenceNumberLessThanOrEqualTo() { + return this.ifSequenceNumberLessThanOrEqualTo; + } + + /** + * Set the ifSequenceNumberLessThanOrEqualTo value. + * + * @param ifSequenceNumberLessThanOrEqualTo the + * ifSequenceNumberLessThanOrEqualTo value to set. + * @return the SequenceNumberAccessConditions object itself. + */ + public SequenceNumberAccessConditions withIfSequenceNumberLessThanOrEqualTo(Long ifSequenceNumberLessThanOrEqualTo) { + this.ifSequenceNumberLessThanOrEqualTo = ifSequenceNumberLessThanOrEqualTo; + return this; + } + + /** + * Get the ifSequenceNumberLessThan value. + * + * @return the ifSequenceNumberLessThan value. + */ + public Long ifSequenceNumberLessThan() { + return this.ifSequenceNumberLessThan; + } + + /** + * Set the ifSequenceNumberLessThan value. + * + * @param ifSequenceNumberLessThan the ifSequenceNumberLessThan value to + * set. + * @return the SequenceNumberAccessConditions object itself. + */ + public SequenceNumberAccessConditions withIfSequenceNumberLessThan(Long ifSequenceNumberLessThan) { + this.ifSequenceNumberLessThan = ifSequenceNumberLessThan; + return this; + } + + /** + * Get the ifSequenceNumberEqualTo value. + * + * @return the ifSequenceNumberEqualTo value. + */ + public Long ifSequenceNumberEqualTo() { + return this.ifSequenceNumberEqualTo; + } + + /** + * Set the ifSequenceNumberEqualTo value. + * + * @param ifSequenceNumberEqualTo the ifSequenceNumberEqualTo value to set. + * @return the SequenceNumberAccessConditions object itself. + */ + public SequenceNumberAccessConditions withIfSequenceNumberEqualTo(Long ifSequenceNumberEqualTo) { + this.ifSequenceNumberEqualTo = ifSequenceNumberEqualTo; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/SequenceNumberActionType.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/SequenceNumberActionType.java new file mode 100644 index 0000000000000..918864b149c48 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/SequenceNumberActionType.java @@ -0,0 +1,66 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; + +/** + * Defines values for SequenceNumberActionType. + */ +public enum SequenceNumberActionType { + /** + * Enum value max. + */ + MAX("max"), + + /** + * Enum value update. + */ + UPDATE("update"), + + /** + * Enum value increment. + */ + INCREMENT("increment"); + + /** + * The actual serialized value for a SequenceNumberActionType instance. + */ + private final String value; + + private SequenceNumberActionType(String value) { + this.value = value; + } + + /** + * Parses a serialized value to a SequenceNumberActionType instance. + * + * @param value the serialized value to parse. + * @return the parsed SequenceNumberActionType object, or null if unable to parse. + */ + @JsonCreator + public static SequenceNumberActionType fromString(String value) { + SequenceNumberActionType[] items = SequenceNumberActionType.values(); + for (SequenceNumberActionType item : items) { + if (item.toString().equalsIgnoreCase(value)) { + return item; + } + } + return null; + } + + @JsonValue + @Override + public String toString() { + return this.value; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ServiceGetAccountInfoHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ServiceGetAccountInfoHeaders.java new file mode 100644 index 0000000000000..f552cf6254497 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ServiceGetAccountInfoHeaders.java @@ -0,0 +1,167 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for GetAccountInfo operation. + */ +@JacksonXmlRootElement(localName = "Service-GetAccountInfo-Headers") +public final class ServiceGetAccountInfoHeaders { + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Identifies the sku name of the account. Possible values include: + * 'Standard_LRS', 'Standard_GRS', 'Standard_RAGRS', 'Standard_ZRS', + * 'Premium_LRS'. + */ + @JsonProperty(value = "x-ms-sku-name") + private SkuName skuName; + + /** + * Identifies the account kind. Possible values include: 'Storage', + * 'BlobStorage', 'StorageV2'. + */ + @JsonProperty(value = "x-ms-account-kind") + private AccountKind accountKind; + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the ServiceGetAccountInfoHeaders object itself. + */ + public ServiceGetAccountInfoHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the ServiceGetAccountInfoHeaders object itself. + */ + public ServiceGetAccountInfoHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the ServiceGetAccountInfoHeaders object itself. + */ + public ServiceGetAccountInfoHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } + + /** + * Get the skuName value. + * + * @return the skuName value. + */ + public SkuName skuName() { + return this.skuName; + } + + /** + * Set the skuName value. + * + * @param skuName the skuName value to set. + * @return the ServiceGetAccountInfoHeaders object itself. + */ + public ServiceGetAccountInfoHeaders withSkuName(SkuName skuName) { + this.skuName = skuName; + return this; + } + + /** + * Get the accountKind value. + * + * @return the accountKind value. + */ + public AccountKind accountKind() { + return this.accountKind; + } + + /** + * Set the accountKind value. + * + * @param accountKind the accountKind value to set. + * @return the ServiceGetAccountInfoHeaders object itself. + */ + public ServiceGetAccountInfoHeaders withAccountKind(AccountKind accountKind) { + this.accountKind = accountKind; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ServiceGetAccountInfoResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ServiceGetAccountInfoResponse.java new file mode 100644 index 0000000000000..eb1ac1e604985 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ServiceGetAccountInfoResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the getAccountInfo operation. + */ +public final class ServiceGetAccountInfoResponse extends RestResponse { + /** + * Creates an instance of ServiceGetAccountInfoResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public ServiceGetAccountInfoResponse(HttpRequest request, int statusCode, ServiceGetAccountInfoHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public ServiceGetAccountInfoHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ServiceGetPropertiesHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ServiceGetPropertiesHeaders.java new file mode 100644 index 0000000000000..687a47bfe07e4 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ServiceGetPropertiesHeaders.java @@ -0,0 +1,76 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; + +/** + * Defines headers for GetProperties operation. + */ +@JacksonXmlRootElement(localName = "Service-GetProperties-Headers") +public final class ServiceGetPropertiesHeaders { + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the ServiceGetPropertiesHeaders object itself. + */ + public ServiceGetPropertiesHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the ServiceGetPropertiesHeaders object itself. + */ + public ServiceGetPropertiesHeaders withVersion(String version) { + this.version = version; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ServiceGetPropertiesResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ServiceGetPropertiesResponse.java new file mode 100644 index 0000000000000..cabb5217fe1df --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ServiceGetPropertiesResponse.java @@ -0,0 +1,49 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the getProperties operation. + */ +public final class ServiceGetPropertiesResponse extends RestResponse { + /** + * Creates an instance of ServiceGetPropertiesResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public ServiceGetPropertiesResponse(HttpRequest request, int statusCode, ServiceGetPropertiesHeaders headers, Map rawHeaders, StorageServiceProperties body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public ServiceGetPropertiesHeaders headers() { + return super.headers(); + } + + /** + * @return the deserialized response body. + */ + @Override + public StorageServiceProperties body() { + return super.body(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ServiceGetStatisticsHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ServiceGetStatisticsHeaders.java new file mode 100644 index 0000000000000..30ae0d08f8a33 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ServiceGetStatisticsHeaders.java @@ -0,0 +1,112 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Defines headers for GetStatistics operation. + */ +@JacksonXmlRootElement(localName = "Service-GetStatistics-Headers") +public final class ServiceGetStatisticsHeaders { + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * UTC date/time value generated by the service that indicates the time at + * which the response was initiated. + */ + @JsonProperty(value = "Date") + private DateTimeRfc1123 date; + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the ServiceGetStatisticsHeaders object itself. + */ + public ServiceGetStatisticsHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the ServiceGetStatisticsHeaders object itself. + */ + public ServiceGetStatisticsHeaders withVersion(String version) { + this.version = version; + return this; + } + + /** + * Get the date value. + * + * @return the date value. + */ + public OffsetDateTime date() { + if (this.date == null) { + return null; + } + return this.date.dateTime(); + } + + /** + * Set the date value. + * + * @param date the date value to set. + * @return the ServiceGetStatisticsHeaders object itself. + */ + public ServiceGetStatisticsHeaders withDate(OffsetDateTime date) { + if (date == null) { + this.date = null; + } else { + this.date = new DateTimeRfc1123(date); + } + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ServiceGetStatisticsResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ServiceGetStatisticsResponse.java new file mode 100644 index 0000000000000..34c3d7675303f --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ServiceGetStatisticsResponse.java @@ -0,0 +1,49 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the getStatistics operation. + */ +public final class ServiceGetStatisticsResponse extends RestResponse { + /** + * Creates an instance of ServiceGetStatisticsResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public ServiceGetStatisticsResponse(HttpRequest request, int statusCode, ServiceGetStatisticsHeaders headers, Map rawHeaders, StorageServiceStats body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public ServiceGetStatisticsHeaders headers() { + return super.headers(); + } + + /** + * @return the deserialized response body. + */ + @Override + public StorageServiceStats body() { + return super.body(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ServiceListContainersSegmentHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ServiceListContainersSegmentHeaders.java new file mode 100644 index 0000000000000..89100e12c78d4 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ServiceListContainersSegmentHeaders.java @@ -0,0 +1,76 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; + +/** + * Defines headers for ListContainersSegment operation. + */ +@JacksonXmlRootElement(localName = "Service-ListContainersSegment-Headers") +public final class ServiceListContainersSegmentHeaders { + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the ServiceListContainersSegmentHeaders object itself. + */ + public ServiceListContainersSegmentHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the ServiceListContainersSegmentHeaders object itself. + */ + public ServiceListContainersSegmentHeaders withVersion(String version) { + this.version = version; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ServiceListContainersSegmentResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ServiceListContainersSegmentResponse.java new file mode 100644 index 0000000000000..0ead4cebd4e2e --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ServiceListContainersSegmentResponse.java @@ -0,0 +1,49 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the listContainersSegment operation. + */ +public final class ServiceListContainersSegmentResponse extends RestResponse { + /** + * Creates an instance of ServiceListContainersSegmentResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public ServiceListContainersSegmentResponse(HttpRequest request, int statusCode, ServiceListContainersSegmentHeaders headers, Map rawHeaders, ListContainersSegmentResponse body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public ServiceListContainersSegmentHeaders headers() { + return super.headers(); + } + + /** + * @return the deserialized response body. + */ + @Override + public ListContainersSegmentResponse body() { + return super.body(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ServiceSetPropertiesHeaders.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ServiceSetPropertiesHeaders.java new file mode 100644 index 0000000000000..fdbc6ca7f74fc --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ServiceSetPropertiesHeaders.java @@ -0,0 +1,76 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; + +/** + * Defines headers for SetProperties operation. + */ +@JacksonXmlRootElement(localName = "Service-SetProperties-Headers") +public final class ServiceSetPropertiesHeaders { + /** + * This header uniquely identifies the request that was made and can be + * used for troubleshooting the request. + */ + @JsonProperty(value = "x-ms-request-id") + private String requestId; + + /** + * Indicates the version of the Blob service used to execute the request. + * This header is returned for requests made against version 2009-09-19 and + * above. + */ + @JsonProperty(value = "x-ms-version") + private String version; + + /** + * Get the requestId value. + * + * @return the requestId value. + */ + public String requestId() { + return this.requestId; + } + + /** + * Set the requestId value. + * + * @param requestId the requestId value to set. + * @return the ServiceSetPropertiesHeaders object itself. + */ + public ServiceSetPropertiesHeaders withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the version value. + * + * @return the version value. + */ + public String version() { + return this.version; + } + + /** + * Set the version value. + * + * @param version the version value to set. + * @return the ServiceSetPropertiesHeaders object itself. + */ + public ServiceSetPropertiesHeaders withVersion(String version) { + this.version = version; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ServiceSetPropertiesResponse.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ServiceSetPropertiesResponse.java new file mode 100644 index 0000000000000..52fb3b2a30c93 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/ServiceSetPropertiesResponse.java @@ -0,0 +1,41 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestResponse; +import com.microsoft.rest.v2.http.HttpRequest; +import java.util.Map; + +/** + * Contains all response data for the setProperties operation. + */ +public final class ServiceSetPropertiesResponse extends RestResponse { + /** + * Creates an instance of ServiceSetPropertiesResponse. + * + * @param request the request which resulted in this {response.Name}. + * @param statusCode the status code of the HTTP response. + * @param headers the deserialized headers of the HTTP response. + * @param rawHeaders the raw headers of the HTTP response. + * @param body the deserialized body of the HTTP response. + */ + public ServiceSetPropertiesResponse(HttpRequest request, int statusCode, ServiceSetPropertiesHeaders headers, Map rawHeaders, Void body) { + super(request, statusCode, headers, rawHeaders, body); + } + + /** + * @return the deserialized response headers. + */ + @Override + public ServiceSetPropertiesHeaders headers() { + return super.headers(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/SignedIdentifier.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/SignedIdentifier.java new file mode 100644 index 0000000000000..6ef42d41a9ccb --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/SignedIdentifier.java @@ -0,0 +1,73 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; + +/** + * signed identifier. + */ +@JacksonXmlRootElement(localName = "SignedIdentifier") +public final class SignedIdentifier { + /** + * a unique id. + */ + @JsonProperty(value = "Id", required = true) + private String id; + + /** + * The accessPolicy property. + */ + @JsonProperty(value = "AccessPolicy", required = true) + private AccessPolicy accessPolicy; + + /** + * Get the id value. + * + * @return the id value. + */ + public String id() { + return this.id; + } + + /** + * Set the id value. + * + * @param id the id value to set. + * @return the SignedIdentifier object itself. + */ + public SignedIdentifier withId(String id) { + this.id = id; + return this; + } + + /** + * Get the accessPolicy value. + * + * @return the accessPolicy value. + */ + public AccessPolicy accessPolicy() { + return this.accessPolicy; + } + + /** + * Set the accessPolicy value. + * + * @param accessPolicy the accessPolicy value to set. + * @return the SignedIdentifier object itself. + */ + public SignedIdentifier withAccessPolicy(AccessPolicy accessPolicy) { + this.accessPolicy = accessPolicy; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/SkuName.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/SkuName.java new file mode 100644 index 0000000000000..468e579a685fa --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/SkuName.java @@ -0,0 +1,76 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; + +/** + * Defines values for SkuName. + */ +public enum SkuName { + /** + * Enum value Standard_LRS. + */ + STANDARD_LRS("Standard_LRS"), + + /** + * Enum value Standard_GRS. + */ + STANDARD_GRS("Standard_GRS"), + + /** + * Enum value Standard_RAGRS. + */ + STANDARD_RAGRS("Standard_RAGRS"), + + /** + * Enum value Standard_ZRS. + */ + STANDARD_ZRS("Standard_ZRS"), + + /** + * Enum value Premium_LRS. + */ + PREMIUM_LRS("Premium_LRS"); + + /** + * The actual serialized value for a SkuName instance. + */ + private final String value; + + private SkuName(String value) { + this.value = value; + } + + /** + * Parses a serialized value to a SkuName instance. + * + * @param value the serialized value to parse. + * @return the parsed SkuName object, or null if unable to parse. + */ + @JsonCreator + public static SkuName fromString(String value) { + SkuName[] items = SkuName.values(); + for (SkuName item : items) { + if (item.toString().equalsIgnoreCase(value)) { + return item; + } + } + return null; + } + + @JsonValue + @Override + public String toString() { + return this.value; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/SourceModifiedAccessConditions.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/SourceModifiedAccessConditions.java new file mode 100644 index 0000000000000..6ab1d6455ad44 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/SourceModifiedAccessConditions.java @@ -0,0 +1,144 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import com.microsoft.rest.v2.DateTimeRfc1123; +import java.time.OffsetDateTime; + +/** + * Additional parameters for a set of operations, such as: + * Blob_startCopyFromURL, Blob_copyFromURL. + */ +@JacksonXmlRootElement(localName = "source-modified-access-conditions") +public final class SourceModifiedAccessConditions { + /** + * Specify this header value to operate only on a blob if it has been + * modified since the specified date/time. + */ + @JsonProperty(value = "SourceIfModifiedSince") + private DateTimeRfc1123 sourceIfModifiedSince; + + /** + * Specify this header value to operate only on a blob if it has not been + * modified since the specified date/time. + */ + @JsonProperty(value = "SourceIfUnmodifiedSince") + private DateTimeRfc1123 sourceIfUnmodifiedSince; + + /** + * Specify an ETag value to operate only on blobs with a matching value. + */ + @JsonProperty(value = "SourceIfMatch") + private String sourceIfMatch; + + /** + * Specify an ETag value to operate only on blobs without a matching value. + */ + @JsonProperty(value = "SourceIfNoneMatch") + private String sourceIfNoneMatch; + + /** + * Get the sourceIfModifiedSince value. + * + * @return the sourceIfModifiedSince value. + */ + public OffsetDateTime sourceIfModifiedSince() { + if (this.sourceIfModifiedSince == null) { + return null; + } + return this.sourceIfModifiedSince.dateTime(); + } + + /** + * Set the sourceIfModifiedSince value. + * + * @param sourceIfModifiedSince the sourceIfModifiedSince value to set. + * @return the SourceModifiedAccessConditions object itself. + */ + public SourceModifiedAccessConditions withSourceIfModifiedSince(OffsetDateTime sourceIfModifiedSince) { + if (sourceIfModifiedSince == null) { + this.sourceIfModifiedSince = null; + } else { + this.sourceIfModifiedSince = new DateTimeRfc1123(sourceIfModifiedSince); + } + return this; + } + + /** + * Get the sourceIfUnmodifiedSince value. + * + * @return the sourceIfUnmodifiedSince value. + */ + public OffsetDateTime sourceIfUnmodifiedSince() { + if (this.sourceIfUnmodifiedSince == null) { + return null; + } + return this.sourceIfUnmodifiedSince.dateTime(); + } + + /** + * Set the sourceIfUnmodifiedSince value. + * + * @param sourceIfUnmodifiedSince the sourceIfUnmodifiedSince value to set. + * @return the SourceModifiedAccessConditions object itself. + */ + public SourceModifiedAccessConditions withSourceIfUnmodifiedSince(OffsetDateTime sourceIfUnmodifiedSince) { + if (sourceIfUnmodifiedSince == null) { + this.sourceIfUnmodifiedSince = null; + } else { + this.sourceIfUnmodifiedSince = new DateTimeRfc1123(sourceIfUnmodifiedSince); + } + return this; + } + + /** + * Get the sourceIfMatch value. + * + * @return the sourceIfMatch value. + */ + public String sourceIfMatch() { + return this.sourceIfMatch; + } + + /** + * Set the sourceIfMatch value. + * + * @param sourceIfMatch the sourceIfMatch value to set. + * @return the SourceModifiedAccessConditions object itself. + */ + public SourceModifiedAccessConditions withSourceIfMatch(String sourceIfMatch) { + this.sourceIfMatch = sourceIfMatch; + return this; + } + + /** + * Get the sourceIfNoneMatch value. + * + * @return the sourceIfNoneMatch value. + */ + public String sourceIfNoneMatch() { + return this.sourceIfNoneMatch; + } + + /** + * Set the sourceIfNoneMatch value. + * + * @param sourceIfNoneMatch the sourceIfNoneMatch value to set. + * @return the SourceModifiedAccessConditions object itself. + */ + public SourceModifiedAccessConditions withSourceIfNoneMatch(String sourceIfNoneMatch) { + this.sourceIfNoneMatch = sourceIfNoneMatch; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/StaticWebsite.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/StaticWebsite.java new file mode 100644 index 0000000000000..dbda8afe34ec8 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/StaticWebsite.java @@ -0,0 +1,99 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; + +/** + * The properties that enable an account to host a static website. + */ +@JacksonXmlRootElement(localName = "StaticWebsite") +public final class StaticWebsite { + /** + * Indicates whether this account is hosting a static website. + */ + @JsonProperty(value = "Enabled", required = true) + private boolean enabled; + + /** + * The default name of the index page under each directory. + */ + @JsonProperty(value = "IndexDocument") + private String indexDocument; + + /** + * The absolute path of the custom 404 page. + */ + @JsonProperty(value = "ErrorDocument404Path") + private String errorDocument404Path; + + /** + * Get the enabled value. + * + * @return the enabled value. + */ + public boolean enabled() { + return this.enabled; + } + + /** + * Set the enabled value. + * + * @param enabled the enabled value to set. + * @return the StaticWebsite object itself. + */ + public StaticWebsite withEnabled(boolean enabled) { + this.enabled = enabled; + return this; + } + + /** + * Get the indexDocument value. + * + * @return the indexDocument value. + */ + public String indexDocument() { + return this.indexDocument; + } + + /** + * Set the indexDocument value. + * + * @param indexDocument the indexDocument value to set. + * @return the StaticWebsite object itself. + */ + public StaticWebsite withIndexDocument(String indexDocument) { + this.indexDocument = indexDocument; + return this; + } + + /** + * Get the errorDocument404Path value. + * + * @return the errorDocument404Path value. + */ + public String errorDocument404Path() { + return this.errorDocument404Path; + } + + /** + * Set the errorDocument404Path value. + * + * @param errorDocument404Path the errorDocument404Path value to set. + * @return the StaticWebsite object itself. + */ + public StaticWebsite withErrorDocument404Path(String errorDocument404Path) { + this.errorDocument404Path = errorDocument404Path; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/StorageError.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/StorageError.java new file mode 100644 index 0000000000000..1b4713a371a7e --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/StorageError.java @@ -0,0 +1,47 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; + +/** + * The StorageError model. + */ +@JacksonXmlRootElement(localName = "StorageError") +public final class StorageError { + /** + * The message property. + */ + @JsonProperty(value = "Message") + private String message; + + /** + * Get the message value. + * + * @return the message value. + */ + public String message() { + return this.message; + } + + /** + * Set the message value. + * + * @param message the message value to set. + * @return the StorageError object itself. + */ + public StorageError withMessage(String message) { + this.message = message; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/StorageErrorCode.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/StorageErrorCode.java new file mode 100644 index 0000000000000..f06879206d107 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/StorageErrorCode.java @@ -0,0 +1,558 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.microsoft.rest.v2.ExpandableStringEnum; +import java.util.Collection; + +/** + * Defines values for StorageErrorCode. + */ +public final class StorageErrorCode extends ExpandableStringEnum { + /** + * Static value AccountAlreadyExists for StorageErrorCode. + */ + public static final StorageErrorCode ACCOUNT_ALREADY_EXISTS = fromString("AccountAlreadyExists"); + + /** + * Static value AccountBeingCreated for StorageErrorCode. + */ + public static final StorageErrorCode ACCOUNT_BEING_CREATED = fromString("AccountBeingCreated"); + + /** + * Static value AccountIsDisabled for StorageErrorCode. + */ + public static final StorageErrorCode ACCOUNT_IS_DISABLED = fromString("AccountIsDisabled"); + + /** + * Static value AuthenticationFailed for StorageErrorCode. + */ + public static final StorageErrorCode AUTHENTICATION_FAILED = fromString("AuthenticationFailed"); + + /** + * Static value ConditionHeadersNotSupported for StorageErrorCode. + */ + public static final StorageErrorCode CONDITION_HEADERS_NOT_SUPPORTED = fromString("ConditionHeadersNotSupported"); + + /** + * Static value ConditionNotMet for StorageErrorCode. + */ + public static final StorageErrorCode CONDITION_NOT_MET = fromString("ConditionNotMet"); + + /** + * Static value EmptyMetadataKey for StorageErrorCode. + */ + public static final StorageErrorCode EMPTY_METADATA_KEY = fromString("EmptyMetadataKey"); + + /** + * Static value InsufficientAccountPermissions for StorageErrorCode. + */ + public static final StorageErrorCode INSUFFICIENT_ACCOUNT_PERMISSIONS = fromString("InsufficientAccountPermissions"); + + /** + * Static value InternalError for StorageErrorCode. + */ + public static final StorageErrorCode INTERNAL_ERROR = fromString("InternalError"); + + /** + * Static value InvalidAuthenticationInfo for StorageErrorCode. + */ + public static final StorageErrorCode INVALID_AUTHENTICATION_INFO = fromString("InvalidAuthenticationInfo"); + + /** + * Static value InvalidHeaderValue for StorageErrorCode. + */ + public static final StorageErrorCode INVALID_HEADER_VALUE = fromString("InvalidHeaderValue"); + + /** + * Static value InvalidHttpVerb for StorageErrorCode. + */ + public static final StorageErrorCode INVALID_HTTP_VERB = fromString("InvalidHttpVerb"); + + /** + * Static value InvalidInput for StorageErrorCode. + */ + public static final StorageErrorCode INVALID_INPUT = fromString("InvalidInput"); + + /** + * Static value InvalidMd5 for StorageErrorCode. + */ + public static final StorageErrorCode INVALID_MD5 = fromString("InvalidMd5"); + + /** + * Static value InvalidMetadata for StorageErrorCode. + */ + public static final StorageErrorCode INVALID_METADATA = fromString("InvalidMetadata"); + + /** + * Static value InvalidQueryParameterValue for StorageErrorCode. + */ + public static final StorageErrorCode INVALID_QUERY_PARAMETER_VALUE = fromString("InvalidQueryParameterValue"); + + /** + * Static value InvalidRange for StorageErrorCode. + */ + public static final StorageErrorCode INVALID_RANGE = fromString("InvalidRange"); + + /** + * Static value InvalidResourceName for StorageErrorCode. + */ + public static final StorageErrorCode INVALID_RESOURCE_NAME = fromString("InvalidResourceName"); + + /** + * Static value InvalidUri for StorageErrorCode. + */ + public static final StorageErrorCode INVALID_URI = fromString("InvalidUri"); + + /** + * Static value InvalidXmlDocument for StorageErrorCode. + */ + public static final StorageErrorCode INVALID_XML_DOCUMENT = fromString("InvalidXmlDocument"); + + /** + * Static value InvalidXmlNodeValue for StorageErrorCode. + */ + public static final StorageErrorCode INVALID_XML_NODE_VALUE = fromString("InvalidXmlNodeValue"); + + /** + * Static value Md5Mismatch for StorageErrorCode. + */ + public static final StorageErrorCode MD5MISMATCH = fromString("Md5Mismatch"); + + /** + * Static value MetadataTooLarge for StorageErrorCode. + */ + public static final StorageErrorCode METADATA_TOO_LARGE = fromString("MetadataTooLarge"); + + /** + * Static value MissingContentLengthHeader for StorageErrorCode. + */ + public static final StorageErrorCode MISSING_CONTENT_LENGTH_HEADER = fromString("MissingContentLengthHeader"); + + /** + * Static value MissingRequiredQueryParameter for StorageErrorCode. + */ + public static final StorageErrorCode MISSING_REQUIRED_QUERY_PARAMETER = fromString("MissingRequiredQueryParameter"); + + /** + * Static value MissingRequiredHeader for StorageErrorCode. + */ + public static final StorageErrorCode MISSING_REQUIRED_HEADER = fromString("MissingRequiredHeader"); + + /** + * Static value MissingRequiredXmlNode for StorageErrorCode. + */ + public static final StorageErrorCode MISSING_REQUIRED_XML_NODE = fromString("MissingRequiredXmlNode"); + + /** + * Static value MultipleConditionHeadersNotSupported for StorageErrorCode. + */ + public static final StorageErrorCode MULTIPLE_CONDITION_HEADERS_NOT_SUPPORTED = fromString("MultipleConditionHeadersNotSupported"); + + /** + * Static value OperationTimedOut for StorageErrorCode. + */ + public static final StorageErrorCode OPERATION_TIMED_OUT = fromString("OperationTimedOut"); + + /** + * Static value OutOfRangeInput for StorageErrorCode. + */ + public static final StorageErrorCode OUT_OF_RANGE_INPUT = fromString("OutOfRangeInput"); + + /** + * Static value OutOfRangeQueryParameterValue for StorageErrorCode. + */ + public static final StorageErrorCode OUT_OF_RANGE_QUERY_PARAMETER_VALUE = fromString("OutOfRangeQueryParameterValue"); + + /** + * Static value RequestBodyTooLarge for StorageErrorCode. + */ + public static final StorageErrorCode REQUEST_BODY_TOO_LARGE = fromString("RequestBodyTooLarge"); + + /** + * Static value ResourceTypeMismatch for StorageErrorCode. + */ + public static final StorageErrorCode RESOURCE_TYPE_MISMATCH = fromString("ResourceTypeMismatch"); + + /** + * Static value RequestUrlFailedToParse for StorageErrorCode. + */ + public static final StorageErrorCode REQUEST_URL_FAILED_TO_PARSE = fromString("RequestUrlFailedToParse"); + + /** + * Static value ResourceAlreadyExists for StorageErrorCode. + */ + public static final StorageErrorCode RESOURCE_ALREADY_EXISTS = fromString("ResourceAlreadyExists"); + + /** + * Static value ResourceNotFound for StorageErrorCode. + */ + public static final StorageErrorCode RESOURCE_NOT_FOUND = fromString("ResourceNotFound"); + + /** + * Static value ServerBusy for StorageErrorCode. + */ + public static final StorageErrorCode SERVER_BUSY = fromString("ServerBusy"); + + /** + * Static value UnsupportedHeader for StorageErrorCode. + */ + public static final StorageErrorCode UNSUPPORTED_HEADER = fromString("UnsupportedHeader"); + + /** + * Static value UnsupportedXmlNode for StorageErrorCode. + */ + public static final StorageErrorCode UNSUPPORTED_XML_NODE = fromString("UnsupportedXmlNode"); + + /** + * Static value UnsupportedQueryParameter for StorageErrorCode. + */ + public static final StorageErrorCode UNSUPPORTED_QUERY_PARAMETER = fromString("UnsupportedQueryParameter"); + + /** + * Static value UnsupportedHttpVerb for StorageErrorCode. + */ + public static final StorageErrorCode UNSUPPORTED_HTTP_VERB = fromString("UnsupportedHttpVerb"); + + /** + * Static value AppendPositionConditionNotMet for StorageErrorCode. + */ + public static final StorageErrorCode APPEND_POSITION_CONDITION_NOT_MET = fromString("AppendPositionConditionNotMet"); + + /** + * Static value BlobAlreadyExists for StorageErrorCode. + */ + public static final StorageErrorCode BLOB_ALREADY_EXISTS = fromString("BlobAlreadyExists"); + + /** + * Static value BlobNotFound for StorageErrorCode. + */ + public static final StorageErrorCode BLOB_NOT_FOUND = fromString("BlobNotFound"); + + /** + * Static value BlobOverwritten for StorageErrorCode. + */ + public static final StorageErrorCode BLOB_OVERWRITTEN = fromString("BlobOverwritten"); + + /** + * Static value BlobTierInadequateForContentLength for StorageErrorCode. + */ + public static final StorageErrorCode BLOB_TIER_INADEQUATE_FOR_CONTENT_LENGTH = fromString("BlobTierInadequateForContentLength"); + + /** + * Static value BlockCountExceedsLimit for StorageErrorCode. + */ + public static final StorageErrorCode BLOCK_COUNT_EXCEEDS_LIMIT = fromString("BlockCountExceedsLimit"); + + /** + * Static value BlockListTooLong for StorageErrorCode. + */ + public static final StorageErrorCode BLOCK_LIST_TOO_LONG = fromString("BlockListTooLong"); + + /** + * Static value CannotChangeToLowerTier for StorageErrorCode. + */ + public static final StorageErrorCode CANNOT_CHANGE_TO_LOWER_TIER = fromString("CannotChangeToLowerTier"); + + /** + * Static value CannotVerifyCopySource for StorageErrorCode. + */ + public static final StorageErrorCode CANNOT_VERIFY_COPY_SOURCE = fromString("CannotVerifyCopySource"); + + /** + * Static value ContainerAlreadyExists for StorageErrorCode. + */ + public static final StorageErrorCode CONTAINER_ALREADY_EXISTS = fromString("ContainerAlreadyExists"); + + /** + * Static value ContainerBeingDeleted for StorageErrorCode. + */ + public static final StorageErrorCode CONTAINER_BEING_DELETED = fromString("ContainerBeingDeleted"); + + /** + * Static value ContainerDisabled for StorageErrorCode. + */ + public static final StorageErrorCode CONTAINER_DISABLED = fromString("ContainerDisabled"); + + /** + * Static value ContainerNotFound for StorageErrorCode. + */ + public static final StorageErrorCode CONTAINER_NOT_FOUND = fromString("ContainerNotFound"); + + /** + * Static value ContentLengthLargerThanTierLimit for StorageErrorCode. + */ + public static final StorageErrorCode CONTENT_LENGTH_LARGER_THAN_TIER_LIMIT = fromString("ContentLengthLargerThanTierLimit"); + + /** + * Static value CopyAcrossAccountsNotSupported for StorageErrorCode. + */ + public static final StorageErrorCode COPY_ACROSS_ACCOUNTS_NOT_SUPPORTED = fromString("CopyAcrossAccountsNotSupported"); + + /** + * Static value CopyIdMismatch for StorageErrorCode. + */ + public static final StorageErrorCode COPY_ID_MISMATCH = fromString("CopyIdMismatch"); + + /** + * Static value FeatureVersionMismatch for StorageErrorCode. + */ + public static final StorageErrorCode FEATURE_VERSION_MISMATCH = fromString("FeatureVersionMismatch"); + + /** + * Static value IncrementalCopyBlobMismatch for StorageErrorCode. + */ + public static final StorageErrorCode INCREMENTAL_COPY_BLOB_MISMATCH = fromString("IncrementalCopyBlobMismatch"); + + /** + * Static value IncrementalCopyOfEralierVersionSnapshotNotAllowed for StorageErrorCode. + */ + public static final StorageErrorCode INCREMENTAL_COPY_OF_ERALIER_VERSION_SNAPSHOT_NOT_ALLOWED = fromString("IncrementalCopyOfEralierVersionSnapshotNotAllowed"); + + /** + * Static value IncrementalCopySourceMustBeSnapshot for StorageErrorCode. + */ + public static final StorageErrorCode INCREMENTAL_COPY_SOURCE_MUST_BE_SNAPSHOT = fromString("IncrementalCopySourceMustBeSnapshot"); + + /** + * Static value InfiniteLeaseDurationRequired for StorageErrorCode. + */ + public static final StorageErrorCode INFINITE_LEASE_DURATION_REQUIRED = fromString("InfiniteLeaseDurationRequired"); + + /** + * Static value InvalidBlobOrBlock for StorageErrorCode. + */ + public static final StorageErrorCode INVALID_BLOB_OR_BLOCK = fromString("InvalidBlobOrBlock"); + + /** + * Static value InvalidBlobTier for StorageErrorCode. + */ + public static final StorageErrorCode INVALID_BLOB_TIER = fromString("InvalidBlobTier"); + + /** + * Static value InvalidBlobType for StorageErrorCode. + */ + public static final StorageErrorCode INVALID_BLOB_TYPE = fromString("InvalidBlobType"); + + /** + * Static value InvalidBlockId for StorageErrorCode. + */ + public static final StorageErrorCode INVALID_BLOCK_ID = fromString("InvalidBlockId"); + + /** + * Static value InvalidBlockList for StorageErrorCode. + */ + public static final StorageErrorCode INVALID_BLOCK_LIST = fromString("InvalidBlockList"); + + /** + * Static value InvalidOperation for StorageErrorCode. + */ + public static final StorageErrorCode INVALID_OPERATION = fromString("InvalidOperation"); + + /** + * Static value InvalidPageRange for StorageErrorCode. + */ + public static final StorageErrorCode INVALID_PAGE_RANGE = fromString("InvalidPageRange"); + + /** + * Static value InvalidSourceBlobType for StorageErrorCode. + */ + public static final StorageErrorCode INVALID_SOURCE_BLOB_TYPE = fromString("InvalidSourceBlobType"); + + /** + * Static value InvalidSourceBlobUrl for StorageErrorCode. + */ + public static final StorageErrorCode INVALID_SOURCE_BLOB_URL = fromString("InvalidSourceBlobUrl"); + + /** + * Static value InvalidVersionForPageBlobOperation for StorageErrorCode. + */ + public static final StorageErrorCode INVALID_VERSION_FOR_PAGE_BLOB_OPERATION = fromString("InvalidVersionForPageBlobOperation"); + + /** + * Static value LeaseAlreadyPresent for StorageErrorCode. + */ + public static final StorageErrorCode LEASE_ALREADY_PRESENT = fromString("LeaseAlreadyPresent"); + + /** + * Static value LeaseAlreadyBroken for StorageErrorCode. + */ + public static final StorageErrorCode LEASE_ALREADY_BROKEN = fromString("LeaseAlreadyBroken"); + + /** + * Static value LeaseIdMismatchWithBlobOperation for StorageErrorCode. + */ + public static final StorageErrorCode LEASE_ID_MISMATCH_WITH_BLOB_OPERATION = fromString("LeaseIdMismatchWithBlobOperation"); + + /** + * Static value LeaseIdMismatchWithContainerOperation for StorageErrorCode. + */ + public static final StorageErrorCode LEASE_ID_MISMATCH_WITH_CONTAINER_OPERATION = fromString("LeaseIdMismatchWithContainerOperation"); + + /** + * Static value LeaseIdMismatchWithLeaseOperation for StorageErrorCode. + */ + public static final StorageErrorCode LEASE_ID_MISMATCH_WITH_LEASE_OPERATION = fromString("LeaseIdMismatchWithLeaseOperation"); + + /** + * Static value LeaseIdMissing for StorageErrorCode. + */ + public static final StorageErrorCode LEASE_ID_MISSING = fromString("LeaseIdMissing"); + + /** + * Static value LeaseIsBreakingAndCannotBeAcquired for StorageErrorCode. + */ + public static final StorageErrorCode LEASE_IS_BREAKING_AND_CANNOT_BE_ACQUIRED = fromString("LeaseIsBreakingAndCannotBeAcquired"); + + /** + * Static value LeaseIsBreakingAndCannotBeChanged for StorageErrorCode. + */ + public static final StorageErrorCode LEASE_IS_BREAKING_AND_CANNOT_BE_CHANGED = fromString("LeaseIsBreakingAndCannotBeChanged"); + + /** + * Static value LeaseIsBrokenAndCannotBeRenewed for StorageErrorCode. + */ + public static final StorageErrorCode LEASE_IS_BROKEN_AND_CANNOT_BE_RENEWED = fromString("LeaseIsBrokenAndCannotBeRenewed"); + + /** + * Static value LeaseLost for StorageErrorCode. + */ + public static final StorageErrorCode LEASE_LOST = fromString("LeaseLost"); + + /** + * Static value LeaseNotPresentWithBlobOperation for StorageErrorCode. + */ + public static final StorageErrorCode LEASE_NOT_PRESENT_WITH_BLOB_OPERATION = fromString("LeaseNotPresentWithBlobOperation"); + + /** + * Static value LeaseNotPresentWithContainerOperation for StorageErrorCode. + */ + public static final StorageErrorCode LEASE_NOT_PRESENT_WITH_CONTAINER_OPERATION = fromString("LeaseNotPresentWithContainerOperation"); + + /** + * Static value LeaseNotPresentWithLeaseOperation for StorageErrorCode. + */ + public static final StorageErrorCode LEASE_NOT_PRESENT_WITH_LEASE_OPERATION = fromString("LeaseNotPresentWithLeaseOperation"); + + /** + * Static value MaxBlobSizeConditionNotMet for StorageErrorCode. + */ + public static final StorageErrorCode MAX_BLOB_SIZE_CONDITION_NOT_MET = fromString("MaxBlobSizeConditionNotMet"); + + /** + * Static value NoPendingCopyOperation for StorageErrorCode. + */ + public static final StorageErrorCode NO_PENDING_COPY_OPERATION = fromString("NoPendingCopyOperation"); + + /** + * Static value OperationNotAllowedOnIncrementalCopyBlob for StorageErrorCode. + */ + public static final StorageErrorCode OPERATION_NOT_ALLOWED_ON_INCREMENTAL_COPY_BLOB = fromString("OperationNotAllowedOnIncrementalCopyBlob"); + + /** + * Static value PendingCopyOperation for StorageErrorCode. + */ + public static final StorageErrorCode PENDING_COPY_OPERATION = fromString("PendingCopyOperation"); + + /** + * Static value PreviousSnapshotCannotBeNewer for StorageErrorCode. + */ + public static final StorageErrorCode PREVIOUS_SNAPSHOT_CANNOT_BE_NEWER = fromString("PreviousSnapshotCannotBeNewer"); + + /** + * Static value PreviousSnapshotNotFound for StorageErrorCode. + */ + public static final StorageErrorCode PREVIOUS_SNAPSHOT_NOT_FOUND = fromString("PreviousSnapshotNotFound"); + + /** + * Static value PreviousSnapshotOperationNotSupported for StorageErrorCode. + */ + public static final StorageErrorCode PREVIOUS_SNAPSHOT_OPERATION_NOT_SUPPORTED = fromString("PreviousSnapshotOperationNotSupported"); + + /** + * Static value SequenceNumberConditionNotMet for StorageErrorCode. + */ + public static final StorageErrorCode SEQUENCE_NUMBER_CONDITION_NOT_MET = fromString("SequenceNumberConditionNotMet"); + + /** + * Static value SequenceNumberIncrementTooLarge for StorageErrorCode. + */ + public static final StorageErrorCode SEQUENCE_NUMBER_INCREMENT_TOO_LARGE = fromString("SequenceNumberIncrementTooLarge"); + + /** + * Static value SnapshotCountExceeded for StorageErrorCode. + */ + public static final StorageErrorCode SNAPSHOT_COUNT_EXCEEDED = fromString("SnapshotCountExceeded"); + + /** + * Static value SnaphotOperationRateExceeded for StorageErrorCode. + */ + public static final StorageErrorCode SNAPHOT_OPERATION_RATE_EXCEEDED = fromString("SnaphotOperationRateExceeded"); + + /** + * Static value SnapshotsPresent for StorageErrorCode. + */ + public static final StorageErrorCode SNAPSHOTS_PRESENT = fromString("SnapshotsPresent"); + + /** + * Static value SourceConditionNotMet for StorageErrorCode. + */ + public static final StorageErrorCode SOURCE_CONDITION_NOT_MET = fromString("SourceConditionNotMet"); + + /** + * Static value SystemInUse for StorageErrorCode. + */ + public static final StorageErrorCode SYSTEM_IN_USE = fromString("SystemInUse"); + + /** + * Static value TargetConditionNotMet for StorageErrorCode. + */ + public static final StorageErrorCode TARGET_CONDITION_NOT_MET = fromString("TargetConditionNotMet"); + + /** + * Static value UnauthorizedBlobOverwrite for StorageErrorCode. + */ + public static final StorageErrorCode UNAUTHORIZED_BLOB_OVERWRITE = fromString("UnauthorizedBlobOverwrite"); + + /** + * Static value BlobBeingRehydrated for StorageErrorCode. + */ + public static final StorageErrorCode BLOB_BEING_REHYDRATED = fromString("BlobBeingRehydrated"); + + /** + * Static value BlobArchived for StorageErrorCode. + */ + public static final StorageErrorCode BLOB_ARCHIVED = fromString("BlobArchived"); + + /** + * Static value BlobNotArchived for StorageErrorCode. + */ + public static final StorageErrorCode BLOB_NOT_ARCHIVED = fromString("BlobNotArchived"); + + /** + * Creates or finds a StorageErrorCode from its string representation. + * + * @param name a name to look for. + * @return the corresponding StorageErrorCode. + */ + @JsonCreator + public static StorageErrorCode fromString(String name) { + return fromString(name, StorageErrorCode.class); + } + + /** + * @return known StorageErrorCode values. + */ + public static Collection values() { + return values(StorageErrorCode.class); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/StorageErrorException.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/StorageErrorException.java new file mode 100644 index 0000000000000..935fc05c4944a --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/StorageErrorException.java @@ -0,0 +1,45 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.microsoft.rest.v2.RestException; +import com.microsoft.rest.v2.http.HttpResponse; + +/** + * Exception thrown for an invalid response with StorageError information. + */ +public final class StorageErrorException extends RestException { + /** + * Initializes a new instance of the StorageErrorException class. + * + * @param message the exception message or the response content if a message is not available. + * @param response the HTTP response. + */ + public StorageErrorException(String message, HttpResponse response) { + super(message, response); + } + + /** + * Initializes a new instance of the StorageErrorException class. + * + * @param message the exception message or the response content if a message is not available. + * @param response the HTTP response. + * @param body the deserialized response body. + */ + public StorageErrorException(String message, HttpResponse response, StorageError body) { + super(message, response, body); + } + + @Override + public StorageError body() { + return (StorageError) super.body(); + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/StorageServiceProperties.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/StorageServiceProperties.java new file mode 100644 index 0000000000000..5b3467c7e4a19 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/StorageServiceProperties.java @@ -0,0 +1,221 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; +import java.util.ArrayList; +import java.util.List; + +/** + * Storage Service Properties. + */ +@JacksonXmlRootElement(localName = "StorageServiceProperties") +public final class StorageServiceProperties { + /** + * The logging property. + */ + @JsonProperty(value = "Logging") + private Logging logging; + + /** + * The hourMetrics property. + */ + @JsonProperty(value = "HourMetrics") + private Metrics hourMetrics; + + /** + * The minuteMetrics property. + */ + @JsonProperty(value = "MinuteMetrics") + private Metrics minuteMetrics; + + private static final class CorsWrapper { + @JacksonXmlProperty(localName = "CorsRule") + private final List items; + + @JsonCreator + private CorsWrapper(@JacksonXmlProperty(localName = "CorsRule") List items) { + this.items = items; + } + } + + /** + * The set of CORS rules. + */ + @JsonProperty(value = "Cors") + private CorsWrapper cors; + + /** + * The default version to use for requests to the Blob service if an + * incoming request's version is not specified. Possible values include + * version 2008-10-27 and all more recent versions. + */ + @JsonProperty(value = "DefaultServiceVersion") + private String defaultServiceVersion; + + /** + * The deleteRetentionPolicy property. + */ + @JsonProperty(value = "DeleteRetentionPolicy") + private RetentionPolicy deleteRetentionPolicy; + + /** + * The staticWebsite property. + */ + @JsonProperty(value = "StaticWebsite") + private StaticWebsite staticWebsite; + + /** + * Get the logging value. + * + * @return the logging value. + */ + public Logging logging() { + return this.logging; + } + + /** + * Set the logging value. + * + * @param logging the logging value to set. + * @return the StorageServiceProperties object itself. + */ + public StorageServiceProperties withLogging(Logging logging) { + this.logging = logging; + return this; + } + + /** + * Get the hourMetrics value. + * + * @return the hourMetrics value. + */ + public Metrics hourMetrics() { + return this.hourMetrics; + } + + /** + * Set the hourMetrics value. + * + * @param hourMetrics the hourMetrics value to set. + * @return the StorageServiceProperties object itself. + */ + public StorageServiceProperties withHourMetrics(Metrics hourMetrics) { + this.hourMetrics = hourMetrics; + return this; + } + + /** + * Get the minuteMetrics value. + * + * @return the minuteMetrics value. + */ + public Metrics minuteMetrics() { + return this.minuteMetrics; + } + + /** + * Set the minuteMetrics value. + * + * @param minuteMetrics the minuteMetrics value to set. + * @return the StorageServiceProperties object itself. + */ + public StorageServiceProperties withMinuteMetrics(Metrics minuteMetrics) { + this.minuteMetrics = minuteMetrics; + return this; + } + + /** + * Get the cors value. + * + * @return the cors value. + */ + public List cors() { + if (this.cors == null) { + this.cors = new CorsWrapper(new ArrayList()); + } + return this.cors.items; + } + + /** + * Set the cors value. + * + * @param cors the cors value to set. + * @return the StorageServiceProperties object itself. + */ + public StorageServiceProperties withCors(List cors) { + this.cors = new CorsWrapper(cors); + return this; + } + + /** + * Get the defaultServiceVersion value. + * + * @return the defaultServiceVersion value. + */ + public String defaultServiceVersion() { + return this.defaultServiceVersion; + } + + /** + * Set the defaultServiceVersion value. + * + * @param defaultServiceVersion the defaultServiceVersion value to set. + * @return the StorageServiceProperties object itself. + */ + public StorageServiceProperties withDefaultServiceVersion(String defaultServiceVersion) { + this.defaultServiceVersion = defaultServiceVersion; + return this; + } + + /** + * Get the deleteRetentionPolicy value. + * + * @return the deleteRetentionPolicy value. + */ + public RetentionPolicy deleteRetentionPolicy() { + return this.deleteRetentionPolicy; + } + + /** + * Set the deleteRetentionPolicy value. + * + * @param deleteRetentionPolicy the deleteRetentionPolicy value to set. + * @return the StorageServiceProperties object itself. + */ + public StorageServiceProperties withDeleteRetentionPolicy(RetentionPolicy deleteRetentionPolicy) { + this.deleteRetentionPolicy = deleteRetentionPolicy; + return this; + } + + /** + * Get the staticWebsite value. + * + * @return the staticWebsite value. + */ + public StaticWebsite staticWebsite() { + return this.staticWebsite; + } + + /** + * Set the staticWebsite value. + * + * @param staticWebsite the staticWebsite value to set. + * @return the StorageServiceProperties object itself. + */ + public StorageServiceProperties withStaticWebsite(StaticWebsite staticWebsite) { + this.staticWebsite = staticWebsite; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/StorageServiceStats.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/StorageServiceStats.java new file mode 100644 index 0000000000000..9b0800077701c --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/StorageServiceStats.java @@ -0,0 +1,47 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; +import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement; + +/** + * Stats for the storage service. + */ +@JacksonXmlRootElement(localName = "StorageServiceStats") +public final class StorageServiceStats { + /** + * The geoReplication property. + */ + @JsonProperty(value = "GeoReplication") + private GeoReplication geoReplication; + + /** + * Get the geoReplication value. + * + * @return the geoReplication value. + */ + public GeoReplication geoReplication() { + return this.geoReplication; + } + + /** + * Set the geoReplication value. + * + * @param geoReplication the geoReplication value to set. + * @return the StorageServiceStats object itself. + */ + public StorageServiceStats withGeoReplication(GeoReplication geoReplication) { + this.geoReplication = geoReplication; + return this; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/SyncCopyStatusType.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/SyncCopyStatusType.java new file mode 100644 index 0000000000000..fe6fc7acd923c --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/SyncCopyStatusType.java @@ -0,0 +1,56 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +package com.microsoft.azure.storage.blob.models; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; + +/** + * Defines values for SyncCopyStatusType. + */ +public enum SyncCopyStatusType { + /** + * Enum value success. + */ + SUCCESS("success"); + + /** + * The actual serialized value for a SyncCopyStatusType instance. + */ + private final String value; + + private SyncCopyStatusType(String value) { + this.value = value; + } + + /** + * Parses a serialized value to a SyncCopyStatusType instance. + * + * @param value the serialized value to parse. + * @return the parsed SyncCopyStatusType object, or null if unable to parse. + */ + @JsonCreator + public static SyncCopyStatusType fromString(String value) { + SyncCopyStatusType[] items = SyncCopyStatusType.values(); + for (SyncCopyStatusType item : items) { + if (item.toString().equalsIgnoreCase(value)) { + return item; + } + } + return null; + } + + @JsonValue + @Override + public String toString() { + return this.value; + } +} diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/package-info.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/package-info.java new file mode 100644 index 0000000000000..275a622143ada --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/models/package-info.java @@ -0,0 +1,13 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for +// license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +/** + * This package contains the blob.models classes for StorageClient. + * Storage Client. + */ +package com.microsoft.azure.storage.blob.models; diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/package-info.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/package-info.java new file mode 100644 index 0000000000000..62ca93b2bb0eb --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/blob/package-info.java @@ -0,0 +1,8 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for +// license information. + +/** + * This package contains the core types for interacting with Azure Storage; start here. + */ +package com.microsoft.azure.storage.blob; diff --git a/storage/data-plane/src/main/java/com/microsoft/azure/storage/package-info.java b/storage/data-plane/src/main/java/com/microsoft/azure/storage/package-info.java new file mode 100644 index 0000000000000..b558d1e46a019 --- /dev/null +++ b/storage/data-plane/src/main/java/com/microsoft/azure/storage/package-info.java @@ -0,0 +1,13 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for +// license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +/** + * This package contains the classes for StorageClient. + * Storage Client. + */ +package com.microsoft.azure.storage; diff --git a/storage/data-plane/src/test/java/com/microsoft/azure/storage/APISpec.groovy b/storage/data-plane/src/test/java/com/microsoft/azure/storage/APISpec.groovy new file mode 100644 index 0000000000000..16c1dd3709aac --- /dev/null +++ b/storage/data-plane/src/test/java/com/microsoft/azure/storage/APISpec.groovy @@ -0,0 +1,564 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.microsoft.azure.storage + +import com.microsoft.azure.storage.blob.* +import com.microsoft.azure.storage.blob.models.* +import com.microsoft.rest.v2.Context +import com.microsoft.rest.v2.http.* +import com.microsoft.rest.v2.policy.RequestPolicy +import com.microsoft.rest.v2.policy.RequestPolicyFactory +import io.reactivex.Flowable +import io.reactivex.Single +import org.spockframework.lang.ISpecificationContext +import spock.lang.Shared +import spock.lang.Specification + +import java.nio.ByteBuffer +import java.time.OffsetDateTime + +class APISpec extends Specification { + @Shared + Integer iterationNo = 0 // Used to generate stable container names for recording tests with multiple iterations. + + Integer entityNo = 0 // Used to generate stable container names for recording tests requiring multiple containers. + + @Shared + ContainerURL cu + + // Fields used for conveniently creating blobs with data. + static final String defaultText = "default" + + static final ByteBuffer defaultData = ByteBuffer.wrap(defaultText.bytes) + + static final Flowable defaultFlowable = Flowable.just(defaultData) + + static defaultDataSize = defaultData.remaining() + + // If debugging is enabled, recordings cannot run as there can only be one proxy at a time. + static boolean enableDebugging = false + + // Prefixes for blobs and containers + static String containerPrefix = "jtc" // java test container + + static String blobPrefix = "javablob" + + /* + The values below are used to create data-driven tests for access conditions. + */ + static final OffsetDateTime oldDate = OffsetDateTime.now().minusDays(1) + + static final OffsetDateTime newDate = OffsetDateTime.now().plusDays(1) + + /* + Note that this value is only used to check if we are depending on the received etag. This value will not actually + be used. + */ + static final String receivedEtag = "received" + + static final String garbageEtag = "garbage" + + /* + Note that this value is only used to check if we are depending on the received etag. This value will not actually + be used. + */ + static final String receivedLeaseID = "received" + + static final String garbageLeaseID = UUID.randomUUID().toString() + + /* + Credentials for various kinds of accounts. + */ + static SharedKeyCredentials primaryCreds = getGenericCreds("") + + static ServiceURL primaryServiceURL = getGenericServiceURL(primaryCreds) + + static SharedKeyCredentials alternateCreds = getGenericCreds("SECONDARY_") + + /* + URLs to various kinds of accounts. + */ + static ServiceURL alternateServiceURL = getGenericServiceURL(alternateCreds) + + static ServiceURL blobStorageServiceURL = getGenericServiceURL(getGenericCreds("BLOB_STORAGE_")) + + static ServiceURL premiumServiceURL = getGenericServiceURL(getGenericCreds("PREMIUM_")) + + /* + Constants for testing that the context parameter is properly passed to the pipeline. + */ + static final String defaultContextKey = "Key" + + static final String defaultContextValue = "Value" + + static final Context defaultContext = new Context(defaultContextKey, defaultContextValue) + + static String getTestName(ISpecificationContext ctx) { + return ctx.getCurrentFeature().name.replace(' ', '').toLowerCase() + } + + def generateContainerName() { + generateContainerName(specificationContext, iterationNo, entityNo++) + } + + def generateBlobName() { + generateBlobName(specificationContext, iterationNo, entityNo++) + } + + /** + * This function generates an entity name by concatenating the passed prefix, the name of the test requesting the + * entity name, and some unique suffix. This ensures that the entity name is unique for each test so there are + * no conflicts on the service. If we are not recording, we can just use the time. If we are recording, the suffix + * must always be the same so we can match requests. To solve this, we use the entityNo for how many entities have + * already been created by this test so far. This would sufficiently distinguish entities within a recording, but + * could still yield duplicates on the service for data-driven tests. Therefore, we also add the iteration number + * of the data driven tests. + * + * @param specificationContext + * Used to obtain the name of the test running. + * @param prefix + * Used to group all entities created by these tests under common prefixes. Useful for listing. + * @param iterationNo + * Indicates which iteration of a data-driven test is being executed. + * @param entityNo + * Indicates how man entities have been created by the test so far. This distinguishes multiple containers + * or multiple blobs created by the same test. Only used when dealing with recordings. + * @return + */ + static String generateResourceName(ISpecificationContext specificationContext, String prefix, int iterationNo, + int entityNo) { + String suffix = "" + suffix += System.currentTimeMillis() // For uniqueness between runs. + suffix += entityNo // For easy identification of which call created this resource. + return prefix + getTestName(specificationContext) + suffix + } + + static int updateIterationNo(ISpecificationContext specificationContext, int iterationNo) { + if (specificationContext.currentIteration.estimatedNumIterations > 1) { + return iterationNo + 1 + } else { + return 0 + } + } + + static String generateContainerName(ISpecificationContext specificationContext, int iterationNo, int entityNo) { + return generateResourceName(specificationContext, containerPrefix, iterationNo, entityNo) + } + + static String generateBlobName(ISpecificationContext specificationContext, int iterationNo, int entityNo) { + return generateResourceName(specificationContext, blobPrefix, iterationNo, entityNo) + } + + static void setupFeatureRecording(String sceneName) { + + } + + static void scrubAuthHeader(String sceneName) { + + } + + static getGenericCreds(String accountType) { + String accountName = System.getenv().get(accountType + "ACCOUNT_NAME") + String accountKey = System.getenv().get(accountType + "ACCOUNT_KEY") + if (accountName == null || accountKey == null) { + System.out.println("Account name or key for the " + accountType + " account was null. Test's requiring " + + "these credentials will fail.") + return null + } + return new SharedKeyCredentials(accountName, accountKey) + } + + static HttpClient getHttpClient() { + if (enableDebugging) { + HttpClientConfiguration configuration = new HttpClientConfiguration( + new Proxy(Proxy.Type.HTTP, new InetSocketAddress("localhost", 8888))) + return HttpClient.createDefault(configuration) + } else return HttpClient.createDefault() + } + + static ServiceURL getGenericServiceURL(SharedKeyCredentials creds) { + PipelineOptions po = new PipelineOptions() + po.withClient(getHttpClient()) + + // Logging errors can be helpful for debugging in Travis. + po.withLogger(new HttpPipelineLogger() { + @Override + HttpPipelineLogLevel minimumLogLevel() { + HttpPipelineLogLevel.ERROR + } + + @Override + void log(HttpPipelineLogLevel httpPipelineLogLevel, String s, Object... objects) { + System.out.println(String.format(s, objects)) + } + }) + + HttpPipeline pipeline = StorageURL.createPipeline(creds, po) + + return new ServiceURL(new URL("http://" + creds.getAccountName() + ".blob.core.windows.net"), pipeline) + } + + static void cleanupContainers() throws MalformedURLException { + // Create a new pipeline without any proxies + HttpPipeline pipeline = StorageURL.createPipeline(primaryCreds, new PipelineOptions()) + + ServiceURL serviceURL = new ServiceURL( + new URL("http://" + System.getenv().get("ACCOUNT_NAME") + ".blob.core.windows.net"), pipeline) + // There should not be more than 5000 containers from these tests + for (ContainerItem c : serviceURL.listContainersSegment(null, + new ListContainersOptions().withPrefix(containerPrefix), null).blockingGet() + .body().containerItems()) { + ContainerURL containerURL = serviceURL.createContainerURL(c.name()) + if (c.properties().leaseState().equals(LeaseStateType.LEASED)) { + containerURL.breakLease(0, null, null).blockingGet() + } + containerURL.delete(null, null).blockingGet() + } + } + + /* + Size must be an int because ByteBuffer sizes can only be an int. Long is not supported. + */ + static ByteBuffer getRandomData(int size) { + Random rand = new Random(getRandomSeed()) + byte[] data = new byte[size] + rand.nextBytes(data) + return ByteBuffer.wrap(data) + } + + /* + We only allow int because anything larger than 2GB (which would require a long) is left to stress/perf. + */ + static File getRandomFile(int size) { + File file = File.createTempFile(UUID.randomUUID().toString(), ".txt") + file.deleteOnExit() + FileOutputStream fos = new FileOutputStream(file) + fos.write(getRandomData(size).array()) + fos.close() + return file + } + + static long getRandomSeed() { + return System.currentTimeMillis() + } + + def setupSpec() { + } + + def cleanupSpec() { + cleanupContainers() + } + + def setup() { + /* + We'll let primary creds throw and crash if there are no credentials specified because everything else will fail. + */ + primaryCreds = getGenericCreds("") + primaryServiceURL = getGenericServiceURL(primaryCreds) + + /* + It's feasible someone wants to test a specific subset of tests, so we'll still attempt to create each of the + ServiceURLs separately. We don't really need to take any action here, as we've already reported to the user, + so we just swallow the exception and let the relevant tests fail later. Perhaps we can add annotations or + something in the future. + */ + try { + alternateCreds = getGenericCreds("SECONDARY_") + alternateServiceURL = getGenericServiceURL(alternateCreds) + } + catch (Exception e) { + } + try { + blobStorageServiceURL = getGenericServiceURL(getGenericCreds("BLOB_STORAGE_")) + } + catch (Exception e) { + } + try { + premiumServiceURL = getGenericServiceURL(getGenericCreds("PREMIUM_")) + } + catch (Exception e) { + } + + cu = primaryServiceURL.createContainerURL(generateContainerName()) + cu.create(null, null, null).blockingGet() + } + + def cleanup() { + // TODO: Scrub auth header here? + iterationNo = updateIterationNo(specificationContext, iterationNo) + } + + /** + * This will retrieve the etag to be used in testing match conditions. The result will typically be assigned to + * the ifMatch condition when testing success and the ifNoneMatch condition when testing failure. + * + * @param bu + * The URL to the blob to get the etag on. + * @param match + * The ETag value for this test. If {@code receivedEtag} is passed, that will signal that the test is expecting + * the blob's actual etag for this test, so it is retrieved. + * @return + * The appropriate etag value to run the current test. + */ + def setupBlobMatchCondition(BlobURL bu, String match) { + if (match == receivedEtag) { + BlobGetPropertiesHeaders headers = bu.getProperties(null, null).blockingGet().headers() + return headers.eTag() + } else { + return match + } + } + + /** + * This helper method will acquire a lease on a blob to prepare for testing leaseAccessConditions. We want to test + * against a valid lease in both the success and failure cases to guarantee that the results actually indicate + * proper setting of the header. If we pass null, though, we don't want to acquire a lease, as that will interfere + * with other AC tests. + * + * @param bu + * The blob on which to acquire a lease. + * @param leaseID + * The signalID. Values should only ever be {@code receivedLeaseID}, {@code garbageLeaseID}, or {@code null}. + * @return + * The actual leaseAccessConditions of the blob if recievedLeaseID is passed, otherwise whatever was passed will be + * returned. + */ + def setupBlobLeaseCondition(BlobURL bu, String leaseID) { + BlobAcquireLeaseHeaders headers = null + if (leaseID == receivedLeaseID || leaseID == garbageLeaseID) { + headers = bu.acquireLease(null, -1, null, null).blockingGet().headers() + } + if (leaseID == receivedLeaseID) { + return headers.leaseId() + } else { + return leaseID + } + } + + def setupContainerMatchCondition(ContainerURL cu, String match) { + if (match == receivedEtag) { + ContainerGetPropertiesHeaders headers = cu.getProperties(null, null).blockingGet().headers() + return headers.eTag() + } else { + return match + } + } + + def setupContainerLeaseCondition(ContainerURL cu, String leaseID) { + if (leaseID == receivedLeaseID) { + ContainerAcquireLeaseHeaders headers = + cu.acquireLease(null, -1, null, null).blockingGet().headers() + return headers.leaseId() + } else { + return leaseID + } + } + + def getMockRequest() { + HttpHeaders headers = new HttpHeaders() + headers.set(Constants.HeaderConstants.CONTENT_ENCODING, "en-US") + URL url = new URL("http://devtest.blob.core.windows.net/test-container/test-blob") + HttpRequest request = new HttpRequest(null, HttpMethod.POST, url, headers, null, null) + return request + } + + def waitForCopy(BlobURL bu, CopyStatusType status) { + OffsetDateTime start = OffsetDateTime.now() + while (status != CopyStatusType.SUCCESS) { + status = bu.getProperties(null, null).blockingGet().headers().copyStatus() + OffsetDateTime currentTime = OffsetDateTime.now() + if (status == CopyStatusType.FAILED || currentTime.minusMinutes(1) == start) { + throw new Exception("Copy failed or took too long") + } + sleep(1000) + } + } + + /** + * Validates the presence of headers that are present on a large number of responses. These headers are generally + * random and can really only be checked as not null. + * @param headers + * The object (may be headers object or response object) that has properties which expose these common headers. + * @return + * Whether or not the header values are appropriate. + */ + def validateBasicHeaders(Object headers) { + return headers.class.getMethod("eTag").invoke(headers) != null && + headers.class.getMethod("lastModified").invoke(headers) != null && + headers.class.getMethod("requestId").invoke(headers) != null && + headers.class.getMethod("version").invoke(headers) != null && + headers.class.getMethod("date").invoke(headers) != null + } + + def validateBlobHeaders(Object headers, String cacheControl, String contentDisposition, String contentEncoding, + String contentLangauge, byte[] contentMD5, String contentType) { + return headers.class.getMethod("cacheControl").invoke(headers) == cacheControl && + headers.class.getMethod("contentDisposition").invoke(headers) == contentDisposition && + headers.class.getMethod("contentEncoding").invoke(headers) == contentEncoding && + headers.class.getMethod("contentLanguage").invoke(headers) == contentLangauge && + headers.class.getMethod("contentMD5").invoke(headers) == contentMD5 && + headers.class.getMethod("contentType").invoke(headers) == contentType + + } + + def enableSoftDelete() { + primaryServiceURL.setProperties(new StorageServiceProperties() + .withDeleteRetentionPolicy(new RetentionPolicy().withEnabled(true).withDays(2)), null) + .blockingGet() + sleep(30000) // Wait for the policy to take effect. + } + + def disableSoftDelete() { + primaryServiceURL.setProperties(new StorageServiceProperties() + .withDeleteRetentionPolicy(new RetentionPolicy().withEnabled(false)), null).blockingGet() + + sleep(30000) // Wait for the policy to take effect. + } + + + + /* + This method returns a stub of an HttpResponse. This is for when we want to test policies in isolation but don't care + about the status code, so we stub a response that always returns a given value for the status code. We never care + about the number or nature of interactions with this stub. + */ + + def getStubResponse(int code) { + return Stub(HttpResponse) { + statusCode() >> code + } + } + + /* + This is for stubbing responses that will actually go through the pipeline and autorest code. Autorest does not seem + to play too nicely with mocked objects and the complex reflection stuff on both ends made it more difficult to work + with than was worth it. + */ + def getStubResponse(int code, Class responseHeadersType) { + return new HttpResponse() { + + @Override + int statusCode() { + return code + } + + @Override + String headerValue(String s) { + return null + } + + @Override + HttpHeaders headers() { + return new HttpHeaders() + } + + @Override + Flowable body() { + return Flowable.empty() + } + + @Override + Single bodyAsByteArray() { + return null + } + + @Override + Single bodyAsString() { + return null + } + + @Override + Object deserializedHeaders() { + return responseHeadersType.getConstructor().newInstance() + } + + @Override + boolean isDecoded() { + return true + } + } + } + + /* + This is for stubbing responses that will actually go through the pipeline and autorest code. Autorest does not seem + to play too nicely with mocked objects and the complex reflection stuff on both ends made it more difficult to work + with than was worth it. Because this type is just for BlobDownload, we don't need to accept a header type. + */ + def getStubResponseForBlobDownload(int code, Flowable body, String etag) { + return new HttpResponse() { + + @Override + int statusCode() { + return code + } + + @Override + String headerValue(String s) { + return null + } + + @Override + HttpHeaders headers() { + return new HttpHeaders() + } + + @Override + Flowable body() { + return body + } + + @Override + Single bodyAsByteArray() { + return null + } + + @Override + Single bodyAsString() { + return null + } + + @Override + Object deserializedHeaders() { + def headers = new BlobDownloadHeaders() + headers.withETag(etag) + return headers + } + + @Override + boolean isDecoded() { + return true + } + } + } + + def getContextStubPolicy(int successCode, Class responseHeadersType) { + return Mock(RequestPolicy) { + sendAsync(_) >> { HttpRequest request -> + if (!request.context().getData(defaultContextKey).isPresent()) { + return Single.error(new RuntimeException("Context key not present.")) + } else { + return Single.just(getStubResponse(successCode, responseHeadersType)) + } + } + } + } + + def getStubFactory(RequestPolicy policy) { + return Mock(RequestPolicyFactory) { + create(*_) >> policy + } + } +} diff --git a/storage/data-plane/src/test/java/com/microsoft/azure/storage/AppendBlobAPITest.groovy b/storage/data-plane/src/test/java/com/microsoft/azure/storage/AppendBlobAPITest.groovy new file mode 100644 index 0000000000000..96c79ecbeb47e --- /dev/null +++ b/storage/data-plane/src/test/java/com/microsoft/azure/storage/AppendBlobAPITest.groovy @@ -0,0 +1,309 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.microsoft.azure.storage + +import com.microsoft.azure.storage.blob.* +import com.microsoft.azure.storage.blob.models.* +import com.microsoft.rest.v2.http.HttpPipeline +import com.microsoft.rest.v2.http.UnexpectedLengthException +import com.microsoft.rest.v2.util.FlowableUtil +import io.reactivex.Flowable +import spock.lang.Unroll + +import java.nio.ByteBuffer +import java.security.MessageDigest + +class AppendBlobAPITest extends APISpec { + AppendBlobURL bu + + def setup() { + bu = cu.createAppendBlobURL(generateBlobName()) + bu.create(null, null, null, null).blockingGet() + } + + def "Create defaults"() { + when: + AppendBlobCreateResponse createResponse = + bu.create(null, null, null, null).blockingGet() + + then: + createResponse.statusCode() == 201 + validateBasicHeaders(createResponse.headers()) + createResponse.headers().contentMD5() == null + createResponse.headers().isServerEncrypted() + } + + def "Create min"() { + expect: + bu.create().blockingGet().statusCode() == 201 + } + + def "Create error"() { + when: + bu.create(null, null, new BlobAccessConditions().withModifiedAccessConditions(new ModifiedAccessConditions() + .withIfMatch("garbage")), null).blockingGet() + + then: + thrown(StorageException) + } + + @Unroll + def "Create headers"() { + setup: + BlobHTTPHeaders headers = new BlobHTTPHeaders().withBlobCacheControl(cacheControl) + .withBlobContentDisposition(contentDisposition) + .withBlobContentEncoding(contentEncoding) + .withBlobContentLanguage(contentLanguage) + .withBlobContentMD5(contentMD5) + .withBlobContentType(contentType) + + when: + bu.create(headers, null, null, null).blockingGet() + BlobGetPropertiesResponse response = bu.getProperties(null, null).blockingGet() + + then: + validateBlobHeaders(response.headers(), cacheControl, contentDisposition, contentEncoding, contentLanguage, + contentMD5, contentType == null ? "application/octet-stream" : contentType) + // HTTP default content type is application/octet-stream + + where: + cacheControl | contentDisposition | contentEncoding | contentLanguage | contentMD5 | contentType + null | null | null | null | null | null + "control" | "disposition" | "encoding" | "language" | Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(defaultData.array())) | "type" + } + + @Unroll + def "Create metadata"() { + setup: + Metadata metadata = new Metadata() + if (key1 != null) { + metadata.put(key1, value1) + } + if (key2 != null) { + metadata.put(key2, value2) + } + + when: + bu.create(null, metadata, null, null).blockingGet() + BlobGetPropertiesResponse response = bu.getProperties(null, null).blockingGet() + + then: + response.headers().metadata() == metadata + + where: + key1 | value1 | key2 | value2 + null | null | null | null + "foo" | "bar" | "fizz" | "buzz" + } + + @Unroll + def "Create AC"() { + setup: + match = setupBlobMatchCondition(bu, match) + leaseID = setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + + expect: + bu.create(null, null, bac, null).blockingGet().statusCode() == 201 + + where: + modified | unmodified | match | noneMatch | leaseID + null | null | null | null | null + oldDate | null | null | null | null + null | newDate | null | null | null + null | null | receivedEtag | null | null + null | null | null | garbageEtag | null + null | null | null | null | receivedLeaseID + } + + @Unroll + def "Create AC fail"() { + setup: + noneMatch = setupBlobMatchCondition(bu, noneMatch) + setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + when: + bu.create(null, null, bac, null).blockingGet() + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch | leaseID + newDate | null | null | null | null + null | oldDate | null | null | null + null | null | garbageEtag | null | null + null | null | null | receivedEtag | null + null | null | null | null | garbageLeaseID + } + + def "Create context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(201, AppendBlobCreateHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + bu.create(null, null, null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + def "Append block defaults"() { + setup: + AppendBlobAppendBlockHeaders headers = + bu.appendBlock(defaultFlowable, defaultDataSize, + null, null).blockingGet().headers() + + expect: + FlowableUtil.collectBytesInBuffer(bu.download(null, null, false, null) + .blockingGet().body(null)).blockingGet().compareTo(defaultData) == 0 + validateBasicHeaders(headers) + headers.contentMD5() != null + headers.blobAppendOffset() != null + headers.blobCommittedBlockCount() != null + bu.getProperties(null, null).blockingGet().headers().blobCommittedBlockCount() == 1 + } + + def "Append block min"() { + expect: + bu.appendBlock(defaultFlowable, defaultDataSize).blockingGet().statusCode() == 201 + } + + @Unroll + def "Append block IA"() { + when: + bu.appendBlock(data, dataSize, null, null).blockingGet() + + then: + def e = thrown(Exception) + exceptionType.isInstance(e) + + where: + data | dataSize | exceptionType + null | defaultDataSize | IllegalArgumentException + defaultFlowable | defaultDataSize + 1 | UnexpectedLengthException + defaultFlowable | defaultDataSize - 1 | UnexpectedLengthException + } + + def "Append block empty body"() { + when: + bu.appendBlock(Flowable.just(ByteBuffer.wrap(new byte[0])), 0, null, null).blockingGet() + + then: + thrown(StorageException) + } + + def "Append block null body"() { + when: + bu.appendBlock(Flowable.just(null), 0, null, null).blockingGet() + + then: + thrown(NullPointerException) // Thrown by Flowable. + } + + @Unroll + def "Append block AC"() { + setup: + match = setupBlobMatchCondition(bu, match) + leaseID = setupBlobLeaseCondition(bu, leaseID) + AppendBlobAccessConditions bac = new AppendBlobAccessConditions() + .withModifiedAccessConditions(new ModifiedAccessConditions().withIfModifiedSince(modified) + .withIfUnmodifiedSince(unmodified).withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + .withAppendPositionAccessConditions(new AppendPositionAccessConditions() + .withAppendPosition(appendPosE).withMaxSize(maxSizeLTE)) + + expect: + bu.appendBlock(defaultFlowable, defaultDataSize, bac, null) + .blockingGet().statusCode() == 201 + + where: + modified | unmodified | match | noneMatch | leaseID | appendPosE | maxSizeLTE + null | null | null | null | null | null | null + oldDate | null | null | null | null | null | null + null | newDate | null | null | null | null | null + null | null | receivedEtag | null | null | null | null + null | null | null | garbageEtag | null | null | null + null | null | null | null | receivedLeaseID | null | null + null | null | null | null | null | 0 | null + null | null | null | null | null | null | 100 + } + + @Unroll + def "Append block AC fail"() { + setup: + noneMatch = setupBlobMatchCondition(bu, noneMatch) + setupBlobLeaseCondition(bu, leaseID) + + AppendBlobAccessConditions bac = new AppendBlobAccessConditions() + .withModifiedAccessConditions(new ModifiedAccessConditions().withIfModifiedSince(modified) + .withIfUnmodifiedSince(unmodified).withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + .withAppendPositionAccessConditions(new AppendPositionAccessConditions() + .withAppendPosition(appendPosE).withMaxSize(maxSizeLTE)) + + when: + bu.appendBlock(defaultFlowable, defaultDataSize, bac, null) + .blockingGet().statusCode() + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch | leaseID | appendPosE | maxSizeLTE + newDate | null | null | null | null | null | null + null | oldDate | null | null | null | null | null + null | null | garbageEtag | null | null | null | null + null | null | null | receivedEtag | null | null | null + null | null | null | null | garbageLeaseID | null | null + null | null | null | null | null | 1 | null + null | null | null | null | null | null | 1 + } + + def "Append block error"() { + setup: + bu = cu.createAppendBlobURL(generateBlobName()) + + when: + bu.appendBlock(defaultFlowable, defaultDataSize, null, null).blockingGet() + + then: + thrown(StorageException) + } + + def "Append block context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(201, AppendBlobAppendBlockHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + bu.appendBlock(defaultFlowable, defaultDataSize, null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } +} diff --git a/storage/data-plane/src/test/java/com/microsoft/azure/storage/BlobAPITest.groovy b/storage/data-plane/src/test/java/com/microsoft/azure/storage/BlobAPITest.groovy new file mode 100644 index 0000000000000..a6729a91caa50 --- /dev/null +++ b/storage/data-plane/src/test/java/com/microsoft/azure/storage/BlobAPITest.groovy @@ -0,0 +1,2011 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.microsoft.azure.storage + +import com.microsoft.azure.storage.blob.* +import com.microsoft.azure.storage.blob.models.* +import com.microsoft.rest.v2.http.HttpPipeline +import com.microsoft.rest.v2.http.HttpRequest +import com.microsoft.rest.v2.policy.RequestPolicy +import com.microsoft.rest.v2.util.FlowableUtil +import io.reactivex.Flowable +import io.reactivex.Single +import spock.lang.Unroll + +import java.nio.ByteBuffer +import java.security.MessageDigest + +class BlobAPITest extends APISpec { + BlobURL bu + + def setup() { + bu = cu.createBlockBlobURL(generateBlobName()) + bu.upload(defaultFlowable, defaultDataSize, null, null, + null, null).blockingGet() + } + + def "Download all null"() { + when: + DownloadResponse response = bu.download(null, null, false, null) + .blockingGet() + ByteBuffer body = FlowableUtil.collectBytesInBuffer(response.body(null)).blockingGet() + BlobDownloadHeaders headers = response.headers() + + then: + validateBasicHeaders(headers) + body == defaultData + headers.metadata().isEmpty() + headers.contentLength() != null + headers.contentType() != null + headers.contentRange() != null + headers.contentMD5() == null + headers.contentEncoding() == null + headers.cacheControl() == null + headers.contentDisposition() == null + headers.contentLanguage() == null + headers.blobSequenceNumber() == null + headers.blobType() == BlobType.BLOCK_BLOB + headers.copyCompletionTime() == null + headers.copyStatusDescription() == null + headers.copyId() == null + headers.copyProgress() == null + headers.copySource() == null + headers.copyStatus() == null + headers.leaseDuration() == null + headers.leaseState() == LeaseStateType.AVAILABLE + headers.leaseStatus() == LeaseStatusType.UNLOCKED + headers.acceptRanges() == "bytes" + headers.blobCommittedBlockCount() == null + headers.serverEncrypted + headers.blobContentMD5() != null + } + + /* + This is to test the appropriate integration of DownloadResponse, including setting the correct range values on + HTTPGetterInfo. + */ + def "Download with retry range"() { + /* + We are going to make a request for some range on a blob. The Flowable returned will throw an exception, forcing + a retry per the ReliableDownloadOptions. The next request should have the same range header, which was generated + from the count and offset values in HTTPGetterInfo that was constructed on the initial call to download. We + don't need to check the data here, but we want to ensure that the correct range is set each time. This will + test the correction of a bug that was found which caused HTTPGetterInfo to have an incorrect offset when it was + constructed in BlobURL.download(). + */ + setup: + def mockPolicy = Mock(RequestPolicy) { + sendAsync(_) >> { HttpRequest request -> + if (request.headers().value("x-ms-range") != "bytes=2-6") { + return Single.error(new IllegalArgumentException("The range header was not set correctly on retry.")) + } + else { + // ETag can be a dummy value. It's not validated, but DownloadResponse requires one + return Single.just(getStubResponseForBlobDownload(206, Flowable.error(new IOException()), "etag")) + } + } + } + def pipeline = HttpPipeline.build(getStubFactory(mockPolicy)) + bu = bu.withPipeline(pipeline) + + when: + def range = new BlobRange().withOffset(2).withCount(5) + bu.download(range, null, false, null).blockingGet().body(new ReliableDownloadOptions().withMaxRetryRequests(3)) + .blockingSubscribe() + + then: + /* + Because the dummy Flowable always throws an error. This will also validate that an IllegalArgumentException is + NOT thrown because the types would not match. + */ + def e = thrown(RuntimeException) + e.getCause() instanceof IOException + } + + def "Download min"() { + expect: + FlowableUtil.collectBytesInBuffer(bu.download().blockingGet().body(null)).blockingGet() == defaultData + } + + @Unroll + def "Download range"() { + setup: + BlobRange range = new BlobRange().withOffset(offset).withCount(count) + + when: + ByteBuffer body = FlowableUtil.collectBytesInBuffer( + bu.download(range, null, false, null).blockingGet().body(null)).blockingGet() + String bodyStr = new String(body.array()) + + then: + bodyStr == expectedData + + where: + offset | count || expectedData + 0 | null || defaultText + 0 | 5 || defaultText.substring(0, 5) + 3 | 2 || defaultText.substring(3, 3 + 2) + } + + @Unroll + def "Download AC"() { + setup: + match = setupBlobMatchCondition(bu, match) + leaseID = setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + expect: + bu.download(null, bac, false, null).blockingGet().statusCode() == 206 + + where: + modified | unmodified | match | noneMatch | leaseID + null | null | null | null | null + oldDate | null | null | null | null + null | newDate | null | null | null + null | null | receivedEtag | null | null + null | null | null | garbageEtag | null + null | null | null | null | receivedLeaseID + } + + @Unroll + def "Download AC fail"() { + setup: + noneMatch = setupBlobMatchCondition(bu, noneMatch) + setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + when: + bu.download(null, bac, false, null).blockingGet().statusCode() == 206 + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch | leaseID + newDate | null | null | null | null + null | oldDate | null | null | null + null | null | garbageEtag | null | null + null | null | null | receivedEtag | null + null | null | null | null | garbageLeaseID + } + + def "Download md5"() { + expect: + bu.download(new BlobRange().withOffset(0).withCount(3), null, true, null).blockingGet() + .headers().contentMD5() == + MessageDigest.getInstance("MD5").digest(defaultText.substring(0, 3).getBytes()) + } + + def "Download error"() { + setup: + bu = cu.createBlockBlobURL(generateBlobName()) + + when: + bu.download(null, null, false, null).blockingGet() + + then: + thrown(StorageException) + } + + def "Download context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(206, BlobDownloadHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + bu.download(null, null, false, defaultContext).blockingGet() + + then: + /* + DownloadResponse requires that there be an etag present, but our mock response doesn't give back an etag. The + easiest way to validate this is to ensure the cause of the exception is in fact the absence of the etag. + */ + def e = thrown(IllegalArgumentException) + e.getMessage().contains("eTag") + } + + def "Get properties default"() { + when: + BlobGetPropertiesHeaders headers = bu.getProperties(null, null).blockingGet().headers() + + then: + validateBasicHeaders(headers) + headers.metadata().isEmpty() + headers.blobType() == BlobType.BLOCK_BLOB + headers.copyCompletionTime() == null // tested in "copy" + headers.copyStatusDescription() == null // only returned when the service has errors; cannot validate. + headers.copyId() == null // tested in "abort copy" + headers.copyProgress() == null // tested in "copy" + headers.copySource() == null // tested in "copy" + headers.copyStatus() == null // tested in "copy" + headers.isIncrementalCopy() == null // tested in PageBlob."start incremental copy" + headers.destinationSnapshot() == null // tested in PageBlob."start incremental copy" + headers.leaseDuration() == null // tested in "acquire lease" + headers.leaseState() == LeaseStateType.AVAILABLE + headers.leaseStatus() == LeaseStatusType.UNLOCKED + headers.contentLength() != null + headers.contentType() != null + headers.contentMD5() != null + headers.contentEncoding() == null // tested in "set HTTP headers" + headers.contentDisposition() == null // tested in "set HTTP headers" + headers.contentLanguage() == null // tested in "set HTTP headers" + headers.cacheControl() == null // tested in "set HTTP headers" + headers.blobSequenceNumber() == null // tested in PageBlob."create sequence number" + headers.acceptRanges() == "bytes" + headers.blobCommittedBlockCount() == null // tested in AppendBlob."append block" + headers.isServerEncrypted() + headers.accessTier() == AccessTier.HOT.toString() + headers.accessTierInferred() + headers.archiveStatus() == null + headers.creationTime() != null + } + + def "Get properties min"() { + expect: + bu.getProperties().blockingGet().statusCode() == 200 + } + + @Unroll + def "Get properties AC"() { + setup: + match = setupBlobMatchCondition(bu, match) + leaseID = setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + expect: + bu.getProperties(bac, null).blockingGet().statusCode() == 200 + + where: + modified | unmodified | match | noneMatch | leaseID + null | null | null | null | null + oldDate | null | null | null | null + null | newDate | null | null | null + null | null | receivedEtag | null | null + null | null | null | garbageEtag | null + null | null | null | null | receivedLeaseID + } + + @Unroll + def "Get properties AC fail"() { + setup: + noneMatch = setupBlobMatchCondition(bu, noneMatch) + setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + when: + bu.getProperties(bac, null).blockingGet() + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch | leaseID + newDate | null | null | null | null + null | oldDate | null | null | null + null | null | garbageEtag | null | null + null | null | null | receivedEtag | null + null | null | null | null | garbageLeaseID + } + + def "Get properties error"() { + setup: + bu = cu.createBlockBlobURL(generateBlobName()) + + when: + bu.getProperties(null, null).blockingGet() + + then: + thrown(StorageException) + } + + def "Get properties context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(200, BlobGetPropertiesHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + bu.getProperties(null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + def "Set HTTP headers null"() { + setup: + BlobSetHTTPHeadersResponse response = bu.setHTTPHeaders(null, null, null).blockingGet() + + expect: + response.statusCode() == 200 + validateBasicHeaders(response.headers()) + response.headers().blobSequenceNumber() == null + } + + def "Set HTTP headers min"() { + when: + bu.setHTTPHeaders(new BlobHTTPHeaders().withBlobContentType("type")).blockingGet() + + then: + bu.getProperties().blockingGet().headers().contentType() == "type" + } + + @Unroll + def "Set HTTP headers headers"() { + setup: + BlobHTTPHeaders putHeaders = new BlobHTTPHeaders().withBlobCacheControl(cacheControl) + .withBlobContentDisposition(contentDisposition) + .withBlobContentEncoding(contentEncoding) + .withBlobContentLanguage(contentLanguage) + .withBlobContentMD5(contentMD5) + .withBlobContentType(contentType) + bu.setHTTPHeaders(putHeaders, null, null).blockingGet() + + BlobGetPropertiesHeaders receivedHeaders = + bu.getProperties(null, null).blockingGet().headers() + + expect: + validateBlobHeaders(receivedHeaders, cacheControl, contentDisposition, contentEncoding, contentLanguage, + contentMD5, contentType) + + where: + cacheControl | contentDisposition | contentEncoding | contentLanguage | contentMD5 | contentType + null | null | null | null | null | null + "control" | "disposition" | "encoding" | "language" | Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(defaultData.array())) | "type" + + } + + + @Unroll + def "Set HTTP headers AC"() { + setup: + match = setupBlobMatchCondition(bu, match) + leaseID = setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + expect: + bu.setHTTPHeaders(null, bac, null).blockingGet().statusCode() == 200 + + where: + modified | unmodified | match | noneMatch | leaseID + null | null | null | null | null + oldDate | null | null | null | null + null | newDate | null | null | null + null | null | receivedEtag | null | null + null | null | null | garbageEtag | null + null | null | null | null | receivedLeaseID + } + + @Unroll + def "Set HTTP headers AC fail"() { + setup: + noneMatch = setupBlobMatchCondition(bu, noneMatch) + setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + when: + bu.setHTTPHeaders(null, bac, null).blockingGet() + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch | leaseID + newDate | null | null | null | null + null | oldDate | null | null | null + null | null | garbageEtag | null | null + null | null | null | receivedEtag | null + null | null | null | null | garbageLeaseID + } + + def "Set HTTP headers error"() { + setup: + bu = cu.createBlockBlobURL(generateBlobName()) + + when: + bu.setHTTPHeaders(null, null, null).blockingGet() + + then: + thrown(StorageException) + } + + def "Set HTTP headers context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(200, BlobSetHTTPHeadersHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + bu.setHTTPHeaders(null, null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + def "Set metadata all null"() { + setup: + BlobSetMetadataResponse response = bu.setMetadata(null, null, null).blockingGet() + + expect: + bu.getProperties(null, null).blockingGet().headers().metadata().size() == 0 + response.statusCode() == 200 + validateBasicHeaders(response.headers()) + response.headers().isServerEncrypted() + } + + def "Set metadata min"() { + setup: + Metadata metadata = new Metadata() + metadata.put("foo", "bar") + + when: + bu.setMetadata(metadata).blockingGet() + + then: + bu.getProperties().blockingGet().headers().metadata() == metadata + } + + @Unroll + def "Set metadata metadata"() { + setup: + Metadata metadata = new Metadata() + if (key1 != null && value1 != null) { + metadata.put(key1, value1) + } + if (key2 != null && value2 != null) { + metadata.put(key2, value2) + } + + expect: + bu.setMetadata(metadata, null, null).blockingGet().statusCode() == statusCode + bu.getProperties(null, null).blockingGet().headers().metadata() == metadata + + where: + key1 | value1 | key2 | value2 || statusCode + null | null | null | null || 200 + "foo" | "bar" | "fizz" | "buzz" || 200 + } + + @Unroll + def "Set metadata AC"() { + setup: + match = setupBlobMatchCondition(bu, match) + leaseID = setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + expect: + bu.setMetadata(null, bac, null).blockingGet().statusCode() == 200 + + where: + modified | unmodified | match | noneMatch | leaseID + null | null | null | null | null + oldDate | null | null | null | null + null | newDate | null | null | null + null | null | receivedEtag | null | null + null | null | null | garbageEtag | null + null | null | null | null | receivedLeaseID + } + + @Unroll + def "Set metadata AC fail"() { + setup: + noneMatch = setupBlobMatchCondition(bu, noneMatch) + setupBlobLeaseCondition(bu, leaseID) + + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + when: + bu.setMetadata(null, bac, null).blockingGet() + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch | leaseID + newDate | null | null | null | null + null | oldDate | null | null | null + null | null | garbageEtag | null | null + null | null | null | receivedEtag | null + null | null | null | null | garbageLeaseID + } + + def "Set metadata error"() { + setup: + bu = cu.createBlockBlobURL(generateBlobName()) + + when: + bu.setMetadata(null, null, null).blockingGet() + + then: + thrown(StorageException) + } + + def "Set metadata context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(200, BlobSetMetadataHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + bu.setMetadata(null, null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + @Unroll + def "Acquire lease"() { + setup: + BlobAcquireLeaseHeaders headers = + bu.acquireLease(proposedID, leaseTime, null, null) + .blockingGet().headers() + + when: + BlobGetPropertiesHeaders properties = bu.getProperties(null, null).blockingGet() + .headers() + + then: + properties.leaseState() == leaseState + properties.leaseDuration() == leaseDuration + headers.leaseId() != null + validateBasicHeaders(headers) + + where: + proposedID | leaseTime || leaseState | leaseDuration + null | -1 || LeaseStateType.LEASED | LeaseDurationType.INFINITE + null | 25 || LeaseStateType.LEASED | LeaseDurationType.FIXED + UUID.randomUUID().toString() | -1 || LeaseStateType.LEASED | LeaseDurationType.INFINITE + } + + def "Acquire lease min"() { + setup: + bu.acquireLease(null, -1).blockingGet().statusCode() == 201 + } + + @Unroll + def "Acquire lease AC"() { + setup: + match = setupBlobMatchCondition(bu, match) + def mac = new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch) + + expect: + bu.acquireLease(null, -1, mac, null).blockingGet().statusCode() == 201 + + where: + modified | unmodified | match | noneMatch + null | null | null | null + oldDate | null | null | null + null | newDate | null | null + null | null | receivedEtag | null + null | null | null | garbageEtag + } + + @Unroll + def "Acquire lease AC fail"() { + setup: + noneMatch = setupBlobMatchCondition(bu, noneMatch) + def mac = new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch) + + when: + bu.acquireLease(null, -1, mac, null).blockingGet() + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch + newDate | null | null | null + null | oldDate | null | null + null | null | garbageEtag | null + null | null | null | receivedEtag + } + + def "Acquire lease error"() { + setup: + bu = cu.createBlockBlobURL(generateBlobName()) + + when: + bu.acquireLease(null, 20, null, null).blockingGet() + + then: + thrown(StorageException) + } + + def "Acquire lease context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(201, BlobAcquireLeaseHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + bu.acquireLease(null, 20, null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + def "Renew lease"() { + setup: + String leaseID = setupBlobLeaseCondition(bu, receivedLeaseID) + + Thread.sleep(16000) // Wait for the lease to expire to ensure we are actually renewing it + BlobRenewLeaseHeaders headers = bu.renewLease(leaseID, null, null).blockingGet().headers() + + expect: + bu.getProperties(null, null).blockingGet().headers().leaseState() + .equals(LeaseStateType.LEASED) + validateBasicHeaders(headers) + headers.leaseId() != null + } + + def "Renew lease min"() { + setup: + String leaseID = setupBlobLeaseCondition(bu, receivedLeaseID) + + expect: + bu.renewLease(leaseID).blockingGet().statusCode() == 200 + } + + @Unroll + def "Renew lease AC"() { + setup: + match = setupBlobMatchCondition(bu, match) + String leaseID = setupBlobLeaseCondition(bu, receivedLeaseID) + def mac = new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch) + + expect: + bu.renewLease(leaseID, mac, null).blockingGet().statusCode() == 200 + + where: + modified | unmodified | match | noneMatch + null | null | null | null + oldDate | null | null | null + null | newDate | null | null + null | null | receivedEtag | null + null | null | null | garbageEtag + } + + @Unroll + def "Renew lease AC fail"() { + noneMatch = setupBlobMatchCondition(bu, noneMatch) + String leaseID = setupBlobLeaseCondition(bu, receivedLeaseID) + def mac = new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch) + + when: + bu.renewLease(leaseID, mac, null).blockingGet() + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch + newDate | null | null | null + null | oldDate | null | null + null | null | garbageEtag | null + null | null | null | receivedEtag + } + + def "Renew lease error"() { + setup: + bu = cu.createBlockBlobURL(generateBlobName()) + + when: + bu.renewLease("id", null, null).blockingGet() + + then: + thrown(StorageException) + } + + def "Renew lease context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(200, BlobRenewLeaseHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.renewLease("id", null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + def "Release lease"() { + setup: + String leaseID = setupBlobLeaseCondition(bu, receivedLeaseID) + + BlobReleaseLeaseHeaders headers = bu.releaseLease(leaseID, null, null).blockingGet().headers() + + expect: + bu.getProperties(null, null).blockingGet().headers().leaseState() == LeaseStateType.AVAILABLE + validateBasicHeaders(headers) + } + + def "Release lease min"() { + setup: + String leaseID = setupBlobLeaseCondition(bu, receivedLeaseID) + + expect: + bu.releaseLease(leaseID).blockingGet().statusCode() == 200 + } + + @Unroll + def "Release lease AC"() { + setup: + match = setupBlobMatchCondition(bu, match) + String leaseID = setupBlobLeaseCondition(bu, receivedLeaseID) + def mac = new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch) + + expect: + bu.releaseLease(leaseID, mac, null).blockingGet().statusCode() == 200 + + where: + modified | unmodified | match | noneMatch + null | null | null | null + oldDate | null | null | null + null | newDate | null | null + null | null | receivedEtag | null + null | null | null | garbageEtag + } + + @Unroll + def "Release lease AC fail"() { + setup: + noneMatch = setupBlobMatchCondition(bu, noneMatch) + String leaseID = setupBlobLeaseCondition(bu, receivedLeaseID) + def mac = new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch) + + when: + bu.releaseLease(leaseID, mac, null).blockingGet() + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch + newDate | null | null | null + null | oldDate | null | null + null | null | garbageEtag | null + null | null | null | receivedEtag + } + + def "Release lease error"() { + setup: + bu = cu.createBlockBlobURL(generateBlobName()) + + when: + bu.releaseLease("id", null, null).blockingGet() + + then: + thrown(StorageException) + } + + def "Release lease context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(200, BlobReleaseLeaseHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.releaseLease("id", null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + @Unroll + def "Break lease"() { + setup: + bu.acquireLease(UUID.randomUUID().toString(), leaseTime, null, null).blockingGet() + + BlobBreakLeaseHeaders headers = bu.breakLease(breakPeriod, null, null).blockingGet().headers() + LeaseStateType state = bu.getProperties(null, null).blockingGet().headers().leaseState() + + expect: + state == LeaseStateType.BROKEN || state == LeaseStateType.BREAKING + headers.leaseTime() <= remainingTime + validateBasicHeaders(headers) + + where: + leaseTime | breakPeriod | remainingTime + -1 | null | 0 + -1 | 20 | 25 + 20 | 15 | 16 + } + + def "Break lease min"() { + setup: + setupBlobLeaseCondition(bu, receivedLeaseID) + + expect: + bu.breakLease().blockingGet().statusCode() == 202 + } + + @Unroll + def "Break lease AC"() { + setup: + match = setupBlobMatchCondition(bu, match) + setupBlobLeaseCondition(bu, receivedLeaseID) + def mac = new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch) + + expect: + bu.breakLease(null, mac, null).blockingGet().statusCode() == 202 + + where: + modified | unmodified | match | noneMatch + null | null | null | null + oldDate | null | null | null + null | newDate | null | null + null | null | receivedEtag | null + null | null | null | garbageEtag + } + + @Unroll + def "Break lease AC fail"() { + setup: + noneMatch = setupBlobMatchCondition(bu, noneMatch) + setupBlobLeaseCondition(bu, receivedLeaseID) + def mac = new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch) + + when: + bu.breakLease(null, mac, null).blockingGet() + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch + newDate | null | null | null + null | oldDate | null | null + null | null | garbageEtag | null + null | null | null | receivedEtag + } + + def "Break lease error"() { + setup: + bu = cu.createBlockBlobURL(generateBlobName()) + + when: + bu.breakLease(null, null, null).blockingGet() + + then: + thrown(StorageException) + } + + def "Break lease context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(202, BlobBreakLeaseHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.breakLease(18, null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + def "Change lease"() { + setup: + String leaseID = + bu.acquireLease(UUID.randomUUID().toString(), 15, null, null).blockingGet() + .headers().leaseId() + BlobChangeLeaseHeaders headers = bu.changeLease(leaseID, UUID.randomUUID().toString(), null, null) + .blockingGet().headers() + leaseID = headers.leaseId() + + expect: + bu.releaseLease(leaseID, null, null).blockingGet().statusCode() == 200 + validateBasicHeaders(headers) + } + + def "Change lease min"() { + setup: + def leaseID = setupBlobLeaseCondition(bu, receivedLeaseID) + + expect: + bu.changeLease(leaseID, UUID.randomUUID().toString()).blockingGet().statusCode() == 200 + } + + @Unroll + def "Change lease AC"() { + setup: + match = setupBlobMatchCondition(bu, match) + String leaseID = setupBlobLeaseCondition(bu, receivedLeaseID) + def mac = new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch) + + expect: + bu.changeLease(leaseID, UUID.randomUUID().toString(), mac, null).blockingGet().statusCode() == 200 + + where: + modified | unmodified | match | noneMatch + null | null | null | null + oldDate | null | null | null + null | newDate | null | null + null | null | receivedEtag | null + null | null | null | garbageEtag + } + + @Unroll + def "Change lease AC fail"() { + setup: + noneMatch = setupBlobMatchCondition(bu, noneMatch) + String leaseID = setupBlobLeaseCondition(bu, receivedLeaseID) + def mac = new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch) + + when: + bu.changeLease(leaseID, UUID.randomUUID().toString(), mac, null).blockingGet() + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch + newDate | null | null | null + null | oldDate | null | null + null | null | garbageEtag | null + null | null | null | receivedEtag + } + + def "Change lease error"() { + setup: + bu = cu.createBlockBlobURL(generateBlobName()) + + when: + bu.changeLease("id", "id", null, null).blockingGet() + + then: + thrown(StorageException) + } + + def "Change lease context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(200, BlobChangeLeaseHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.changeLease("id", "newId", null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + def "Snapshot"() { + when: + BlobCreateSnapshotHeaders headers = bu.createSnapshot(null, null, null) + .blockingGet().headers() + + then: + bu.withSnapshot(headers.snapshot()).getProperties(null, null).blockingGet().statusCode() == 200 + validateBasicHeaders(headers) + } + + def "Snapshot min"() { + expect: + bu.createSnapshot().blockingGet().statusCode() == 201 + } + + @Unroll + def "Snapshot metadata"() { + setup: + Metadata metadata = new Metadata() + if (key1 != null && value1 != null) { + metadata.put(key1, value1) + } + if (key2 != null && value2 != null) { + metadata.put(key2, value2) + } + + BlobCreateSnapshotResponse response = bu.createSnapshot(metadata, null, null).blockingGet() + + expect: + response.statusCode() == 201 + bu.withSnapshot(response.headers().snapshot()) + .getProperties(null, null).blockingGet().headers().metadata() == metadata + + where: + key1 | value1 | key2 | value2 + null | null | null | null + "foo" | "bar" | "fizz" | "buzz" + } + + @Unroll + def "Snapshot AC"() { + setup: + match = setupBlobMatchCondition(bu, match) + leaseID = setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + expect: + bu.createSnapshot(null, bac, null).blockingGet().statusCode() == 201 + + where: + modified | unmodified | match | noneMatch | leaseID + null | null | null | null | null + oldDate | null | null | null | null + null | newDate | null | null | null + null | null | receivedEtag | null | null + null | null | null | garbageEtag | null + null | null | null | null | receivedLeaseID + } + + @Unroll + def "Snapshot AC fail"() { + setup: + noneMatch = setupBlobMatchCondition(bu, noneMatch) + setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + when: + bu.createSnapshot(null, bac, null).blockingGet() + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch | leaseID + newDate | null | null | null | null + null | oldDate | null | null | null + null | null | garbageEtag | null | null + null | null | null | receivedEtag | null + null | null | null | null | garbageLeaseID + } + + def "Snapshot error"() { + setup: + bu = cu.createBlockBlobURL(generateBlobName()) + + when: + bu.createSnapshot(null, null, null).blockingGet() + + then: + thrown(StorageException) + } + + def "Snapshot context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(201, BlobCreateSnapshotHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.createSnapshot(null, null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + def "Copy"() { + setup: + BlobURL bu2 = cu.createBlockBlobURL(generateBlobName()) + BlobStartCopyFromURLHeaders headers = + bu2.startCopyFromURL(bu.toURL(), null, null, null, null) + .blockingGet().headers() + + when: + while (bu2.getProperties(null, null).blockingGet().headers().copyStatus() == CopyStatusType.PENDING) { + sleep(1000) + } + BlobGetPropertiesHeaders headers2 = bu2.getProperties(null, null).blockingGet().headers() + + then: + headers2.copyStatus() == CopyStatusType.SUCCESS + headers2.copyCompletionTime() != null + headers2.copyProgress() != null + headers2.copySource() != null + validateBasicHeaders(headers) + headers.copyId() != null + } + + def "Copy min"() { + expect: + bu.startCopyFromURL(bu.toURL()).blockingGet().statusCode() == 202 + } + + @Unroll + def "Copy metadata"() { + setup: + BlobURL bu2 = cu.createBlockBlobURL(generateBlobName()) + Metadata metadata = new Metadata() + if (key1 != null && value1 != null) { + metadata.put(key1, value1) + } + if (key2 != null && value2 != null) { + metadata.put(key2, value2) + } + + BlobStartCopyFromURLResponse response = + bu2.startCopyFromURL(bu.toURL(), metadata, null, null, null) + .blockingGet() + waitForCopy(bu2, response.headers().copyStatus()) + + expect: + bu2.getProperties(null, null).blockingGet().headers().metadata() == metadata + + where: + key1 | value1 | key2 | value2 + null | null | null | null + "foo" | "bar" | "fizz" | "buzz" + } + + @Unroll + def "Copy source AC"() { + setup: + BlobURL bu2 = cu.createBlockBlobURL(generateBlobName()) + match = setupBlobMatchCondition(bu, match) + def mac = new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch) + + expect: + bu2.startCopyFromURL(bu.toURL(), null, mac, null, null).blockingGet().statusCode() == 202 + + where: + modified | unmodified | match | noneMatch + null | null | null | null + oldDate | null | null | null + null | newDate | null | null + null | null | receivedEtag | null + null | null | null | garbageEtag + } + + @Unroll + def "Copy source AC fail"() { + setup: + BlobURL bu2 = cu.createBlockBlobURL(generateBlobName()) + noneMatch = setupBlobMatchCondition(bu, noneMatch) + def mac = new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch) + + when: + bu2.startCopyFromURL(bu.toURL(), null, mac, null, null).blockingGet() + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch + newDate | null | null | null + null | oldDate | null | null + null | null | garbageEtag | null + null | null | null | receivedEtag + } + + @Unroll + def "Copy dest AC"() { + setup: + BlobURL bu2 = cu.createBlockBlobURL(generateBlobName()) + bu2.upload(defaultFlowable, defaultDataSize, null, null, + null, null).blockingGet() + match = setupBlobMatchCondition(bu2, match) + leaseID = setupBlobLeaseCondition(bu2, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + expect: + bu2.startCopyFromURL(bu.toURL(), null, null, bac, null) + .blockingGet().statusCode() == 202 + + where: + modified | unmodified | match | noneMatch | leaseID + null | null | null | null | null + oldDate | null | null | null | null + null | newDate | null | null | null + null | null | receivedEtag | null | null + null | null | null | garbageEtag | null + null | null | null | null | receivedLeaseID + } + + @Unroll + def "Copy dest AC fail"() { + setup: + BlobURL bu2 = cu.createBlockBlobURL(generateBlobName()) + bu2.upload(defaultFlowable, defaultDataSize, null, null, + null, null).blockingGet() + noneMatch = setupBlobMatchCondition(bu2, noneMatch) + setupBlobLeaseCondition(bu2, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + when: + bu2.startCopyFromURL(bu.toURL(), null, null, bac, null).blockingGet() + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch | leaseID + newDate | null | null | null | null + null | oldDate | null | null | null + null | null | garbageEtag | null | null + null | null | null | receivedEtag | null + null | null | null | null | garbageLeaseID + } + + def "Copy error"() { + setup: + bu = cu.createBlockBlobURL(generateBlobName()) + + when: + bu.startCopyFromURL(new URL("http://www.error.com"), + null, null, null, null).blockingGet() + + then: + thrown(StorageException) + } + + def "Copy context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(202, BlobStartCopyFromURLHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.startCopyFromURL(new URL("http://www.example.com"), null, null, null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + def "Abort copy"() { + setup: + // Data has to be large enough and copied between accounts to give us enough time to abort + ByteBuffer data = getRandomData(8 * 1024 * 1024) + bu.toBlockBlobURL() + .upload(Flowable.just(data), 8 * 1024 * 1024, null, null, null, null) + .blockingGet() + // So we don't have to create a SAS. + cu.setAccessPolicy(PublicAccessType.BLOB, null, null, null).blockingGet() + + ContainerURL cu2 = alternateServiceURL.createContainerURL(generateBlobName()) + cu2.create(null, null, null).blockingGet() + BlobURL bu2 = cu2.createBlobURL(generateBlobName()) + + when: + String copyID = + bu2.startCopyFromURL(bu.toURL(), null, null, null, null) + .blockingGet().headers().copyId() + BlobAbortCopyFromURLResponse response = bu2.abortCopyFromURL(copyID, null, null).blockingGet() + BlobAbortCopyFromURLHeaders headers = response.headers() + + then: + response.statusCode() == 204 + headers.requestId() != null + headers.version() != null + headers.date() != null + // Normal test cleanup will not clean up containers in the alternate account. + cu2.delete(null, null).blockingGet().statusCode() == 202 + } + + def "Abort copy min"() { + setup: + // Data has to be large enough and copied between accounts to give us enough time to abort + ByteBuffer data = getRandomData(8 * 1024 * 1024) + bu.toBlockBlobURL() + .upload(Flowable.just(data), 8 * 1024 * 1024, null, null, null, null) + .blockingGet() + // So we don't have to create a SAS. + cu.setAccessPolicy(PublicAccessType.BLOB, null, null, null).blockingGet() + + ContainerURL cu2 = alternateServiceURL.createContainerURL(generateBlobName()) + cu2.create(null, null, null).blockingGet() + BlobURL bu2 = cu2.createBlobURL(generateBlobName()) + + when: + String copyID = + bu2.startCopyFromURL(bu.toURL(), null, null, null, null) + .blockingGet().headers().copyId() + + then: + bu2.abortCopyFromURL(copyID).blockingGet().statusCode() == 204 + } + + def "Abort copy lease"() { + setup: + // Data has to be large enough and copied between accounts to give us enough time to abort + ByteBuffer data = getRandomData(8 * 1024 * 1024) + bu.toBlockBlobURL() + .upload(Flowable.just(data), 8 * 1024 * 1024, null, null, null, null) + .blockingGet() + // So we don't have to create a SAS. + cu.setAccessPolicy(PublicAccessType.BLOB, null, null, null).blockingGet() + + ContainerURL cu2 = alternateServiceURL.createContainerURL(generateBlobName()) + cu2.create(null, null, null).blockingGet() + BlockBlobURL bu2 = cu2.createBlockBlobURL(generateBlobName()) + bu2.upload(defaultFlowable, defaultDataSize, null, null, null, null) + .blockingGet() + String leaseID = setupBlobLeaseCondition(bu2, receivedLeaseID) + + when: + String copyID = + bu2.startCopyFromURL(bu.toURL(), null, null, + new BlobAccessConditions().withLeaseAccessConditions(new LeaseAccessConditions() + .withLeaseId(leaseID)), null) + .blockingGet().headers().copyId() + + then: + bu2.abortCopyFromURL(copyID, new LeaseAccessConditions().withLeaseId(leaseID), null) + .blockingGet().statusCode() == 204 + // Normal test cleanup will not clean up containers in the alternate account. + cu2.delete(null, null).blockingGet() + } + + def "Abort copy lease fail"() { + // Data has to be large enough and copied between accounts to give us enough time to abort + ByteBuffer data = getRandomData(8 * 1024 * 1024) + bu.toBlockBlobURL() + .upload(Flowable.just(data), 8 * 1024 * 1024, null, null, null, null) + .blockingGet() + // So we don't have to create a SAS. + cu.setAccessPolicy(PublicAccessType.BLOB, null, null, null).blockingGet() + + ContainerURL cu2 = alternateServiceURL.createContainerURL(generateBlobName()) + cu2.create(null, null, null).blockingGet() + BlockBlobURL bu2 = cu2.createBlockBlobURL(generateBlobName()) + bu2.upload(defaultFlowable, defaultDataSize, null, null, null, null) + .blockingGet() + String leaseID = setupBlobLeaseCondition(bu2, receivedLeaseID) + + when: + String copyID = + bu2.startCopyFromURL(bu.toURL(), null, null, + new BlobAccessConditions().withLeaseAccessConditions(new LeaseAccessConditions() + .withLeaseId(leaseID)), null) + .blockingGet().headers().copyId() + bu2.abortCopyFromURL(copyID, new LeaseAccessConditions().withLeaseId(garbageLeaseID), null).blockingGet() + + then: + def e = thrown(StorageException) + e.statusCode() == 412 + cu2.delete(null, null).blockingGet() + } + + def "Abort copy error"() { + setup: + bu = cu.createBlockBlobURL(generateBlobName()) + + when: + bu.abortCopyFromURL("id", null, null).blockingGet() + + then: + thrown(StorageException) + } + + def "Abort copy context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(204, BlobAbortCopyFromURLHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.abortCopyFromURL("id", null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + def "Sync copy"() { + setup: + // Sync copy is a deep copy, which requires either sas or public access. + cu.setAccessPolicy(PublicAccessType.CONTAINER, null).blockingGet() + BlobURL bu2 = cu.createBlockBlobURL(generateBlobName()) + def headers = bu2.syncCopyFromURL(bu.toURL(), null, null,null, null).blockingGet().headers() + + expect: + headers.copyStatus() == SyncCopyStatusType.SUCCESS + headers.copyId() != null + validateBasicHeaders(headers) + } + + def "Sync copy min"() { + setup: + cu.setAccessPolicy(PublicAccessType.CONTAINER, null).blockingGet() + BlobURL bu2 = cu.createBlockBlobURL(generateBlobName()) + + expect: + bu2.syncCopyFromURL(bu.toURL()).blockingGet().statusCode() == 202 + } + + @Unroll + def "Sync copy metadata"() { + setup: + cu.setAccessPolicy(PublicAccessType.CONTAINER, null).blockingGet() + BlobURL bu2 = cu.createBlockBlobURL(generateBlobName()) + Metadata metadata = new Metadata() + if (key1 != null && value1 != null) { + metadata.put(key1, value1) + } + if (key2 != null && value2 != null) { + metadata.put(key2, value2) + } + + when: + bu2.syncCopyFromURL(bu.toURL(), metadata, null, null, null).blockingGet() + + then: + bu2.getProperties().blockingGet().headers().metadata() == metadata + + where: + key1 | value1 | key2 | value2 + null | null | null | null + "foo" | "bar" | "fizz" | "buzz" + } + + @Unroll + def "Sync copy source AC"() { + setup: + cu.setAccessPolicy(PublicAccessType.CONTAINER, null).blockingGet() + BlobURL bu2 = cu.createBlockBlobURL(generateBlobName()) + match = setupBlobMatchCondition(bu, match) + def mac = new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch) + + expect: + bu2.syncCopyFromURL(bu.toURL(), null, mac, null, null).blockingGet().statusCode() == 202 + + where: + modified | unmodified | match | noneMatch + null | null | null | null + oldDate | null | null | null + null | newDate | null | null + null | null | receivedEtag | null + null | null | null | garbageEtag + } + + @Unroll + def "Sync copy source AC fail"() { + setup: + cu.setAccessPolicy(PublicAccessType.CONTAINER, null).blockingGet() + BlobURL bu2 = cu.createBlockBlobURL(generateBlobName()) + noneMatch = setupBlobMatchCondition(bu, noneMatch) + def mac = new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch) + + when: + bu2.syncCopyFromURL(bu.toURL(), null, mac, null, null).blockingGet() + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch + newDate | null | null | null + null | oldDate | null | null + null | null | garbageEtag | null + null | null | null | receivedEtag + } + + @Unroll + def "Sync copy dest AC"() { + setup: + cu.setAccessPolicy(PublicAccessType.CONTAINER, null).blockingGet() + BlobURL bu2 = cu.createBlockBlobURL(generateBlobName()) + bu2.upload(defaultFlowable, defaultDataSize, null, null, + null, null).blockingGet() + match = setupBlobMatchCondition(bu2, match) + leaseID = setupBlobLeaseCondition(bu2, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + expect: + bu2.syncCopyFromURL(bu.toURL(), null, null, bac, null).blockingGet().statusCode() == 202 + + where: + modified | unmodified | match | noneMatch | leaseID + null | null | null | null | null + oldDate | null | null | null | null + null | newDate | null | null | null + null | null | receivedEtag | null | null + null | null | null | garbageEtag | null + null | null | null | null | receivedLeaseID + } + + @Unroll + def "Sync copy dest AC fail"() { + setup: + cu.setAccessPolicy(PublicAccessType.CONTAINER, null).blockingGet() + BlobURL bu2 = cu.createBlockBlobURL(generateBlobName()) + bu2.upload(defaultFlowable, defaultDataSize, null, null, + null, null).blockingGet() + noneMatch = setupBlobMatchCondition(bu2, noneMatch) + setupBlobLeaseCondition(bu2, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + when: + bu2.syncCopyFromURL(bu.toURL(), null, null, bac, null).blockingGet() + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch | leaseID + newDate | null | null | null | null + null | oldDate | null | null | null + null | null | garbageEtag | null | null + null | null | null | receivedEtag | null + null | null | null | null | garbageLeaseID + } + + def "Sync copy error"() { + setup: + def bu2 = cu.createBlockBlobURL(generateBlobName()) + + when: + bu2.syncCopyFromURL(bu.toURL(), null, null, null, null).blockingGet() + + then: + thrown(StorageException) + } + + def "Sync copy context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(202, BlobCopyFromURLHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.syncCopyFromURL(new URL("http://www.example.com"), null, null, null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + def "Delete"() { + when: + BlobDeleteResponse response = bu.delete(null, null, null).blockingGet() + BlobDeleteHeaders headers = response.headers() + + then: + response.statusCode() == 202 + headers.requestId() != null + headers.version() != null + headers.date() != null + } + + def "Delete min"() { + expect: + bu.delete().blockingGet().statusCode() == 202 + } + + @Unroll + def "Delete options"() { + setup: + bu.createSnapshot(null, null, null).blockingGet() + // Create an extra blob so the list isn't empty (null) when we delete base blob, too + BlockBlobURL bu2 = cu.createBlockBlobURL(generateBlobName()) + bu2.upload(defaultFlowable, defaultDataSize, null, null, null, null) + .blockingGet() + + when: + bu.delete(option, null, null).blockingGet() + + then: + cu.listBlobsFlatSegment(null, null, null).blockingGet() + .body().segment().blobItems().size() == blobsRemaining + + where: + option | blobsRemaining + DeleteSnapshotsOptionType.INCLUDE | 1 + DeleteSnapshotsOptionType.ONLY | 2 + } + + @Unroll + def "Delete AC"() { + setup: + match = setupBlobMatchCondition(bu, match) + leaseID = setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + expect: + bu.delete(DeleteSnapshotsOptionType.INCLUDE, bac, null).blockingGet().statusCode() == 202 + + where: + modified | unmodified | match | noneMatch | leaseID + null | null | null | null | null + oldDate | null | null | null | null + null | newDate | null | null | null + null | null | receivedEtag | null | null + null | null | null | garbageEtag | null + null | null | null | null | receivedLeaseID + } + + @Unroll + def "Delete AC fail"() { + setup: + noneMatch = setupBlobMatchCondition(bu, noneMatch) + setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + when: + bu.delete(DeleteSnapshotsOptionType.INCLUDE, bac, null).blockingGet() + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch | leaseID + newDate | null | null | null | null + null | oldDate | null | null | null + null | null | garbageEtag | null | null + null | null | null | receivedEtag | null + null | null | null | null | garbageLeaseID + } + + def "Blob delete error"() { + setup: + bu = cu.createBlockBlobURL(generateBlobName()) + + when: + bu.delete(null, null, null).blockingGet() + + then: + thrown(StorageException) + } + + def "Delete context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(202, BlobDeleteHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.delete(null, null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + @Unroll + def "Set tier block blob"() { + setup: + ContainerURL cu = blobStorageServiceURL.createContainerURL(generateContainerName()) + BlockBlobURL bu = cu.createBlockBlobURL(generateBlobName()) + cu.create(null, null, null).blockingGet() + bu.upload(defaultFlowable, defaultData.remaining(), null, null, null, null) + .blockingGet() + + when: + BlobSetTierResponse initialResponse = bu.setTier(tier, null, null).blockingGet() + + then: + initialResponse.statusCode() == 200 || initialResponse.statusCode() == 202 + initialResponse.headers().version() != null + initialResponse.headers().requestId() != null + bu.getProperties(null, null).blockingGet().headers().accessTier() == tier.toString() + cu.listBlobsFlatSegment(null, null, null).blockingGet().body().segment().blobItems().get(0) + .properties().accessTier() == tier + + where: + tier | _ + AccessTier.HOT | _ + AccessTier.COOL | _ + AccessTier.ARCHIVE | _ + } + + @Unroll + def "Set tier page blob"() { + setup: + ContainerURL cu = premiumServiceURL.createContainerURL(generateContainerName()) + PageBlobURL bu = cu.createPageBlobURL(generateBlobName()) + cu.create(null, null, null).blockingGet() + bu.create(512, null, null, null, null, null).blockingGet() + + when: + bu.setTier(tier, null, null).blockingGet() + + then: + bu.getProperties(null, null).blockingGet().headers().accessTier() == tier.toString() + cu.listBlobsFlatSegment(null, null, null).blockingGet().body().segment().blobItems().get(0) + .properties().accessTier() == tier + cu.delete(null, null).blockingGet() + + where: + tier | _ + AccessTier.P4 | _ + AccessTier.P6 | _ + AccessTier.P10 | _ + AccessTier.P20 | _ + AccessTier.P30 | _ + AccessTier.P40 | _ + AccessTier.P50 | _ + } + + def "Set tier min"() { + setup: + ContainerURL cu = blobStorageServiceURL.createContainerURL(generateContainerName()) + BlockBlobURL bu = cu.createBlockBlobURL(generateBlobName()) + cu.create(null, null, null).blockingGet() + bu.upload(defaultFlowable, defaultData.remaining(), null, null, null, null) + .blockingGet() + + when: + def statusCode = bu.setTier(AccessTier.HOT).blockingGet().statusCode() + + then: + statusCode == 200 || statusCode == 202 + } + + def "Set tier inferred"() { + setup: + ContainerURL cu = blobStorageServiceURL.createContainerURL(generateBlobName()) + BlockBlobURL bu = cu.createBlockBlobURL(generateBlobName()) + cu.create(null, null, null).blockingGet() + bu.upload(defaultFlowable, defaultDataSize, null, null, null, null).blockingGet() + + when: + boolean inferred1 = bu.getProperties(null, null).blockingGet().headers().accessTierInferred() + Boolean inferredList1 = cu.listBlobsFlatSegment(null, null, null).blockingGet().body().segment() + .blobItems().get(0).properties().accessTierInferred() + + bu.setTier(AccessTier.HOT, null, null).blockingGet() + + BlobGetPropertiesHeaders headers = bu.getProperties(null, null).blockingGet().headers() + Boolean inferred2 = headers.accessTierInferred() + Boolean inferredList2 = cu.listBlobsFlatSegment(null, null, null).blockingGet().body().segment() + .blobItems().get(0).properties().accessTierInferred() + + then: + inferred1 + inferredList1 + inferred2 == null + inferredList2 == null + } + + @Unroll + def "Set tier archive status"() { + setup: + ContainerURL cu = blobStorageServiceURL.createContainerURL(generateBlobName()) + BlockBlobURL bu = cu.createBlockBlobURL(generateBlobName()) + cu.create(null, null, null).blockingGet() + bu.upload(defaultFlowable, defaultDataSize, null, null, null, null).blockingGet() + + when: + bu.setTier(sourceTier, null, null).blockingGet() + bu.setTier(destTier, null, null).blockingGet() + + then: + bu.getProperties(null, null).blockingGet().headers().archiveStatus() == status.toString() + cu.listBlobsFlatSegment(null, null, null).blockingGet().body().segment().blobItems() + .get(0).properties().archiveStatus() + + where: + sourceTier | destTier | status + AccessTier.ARCHIVE | AccessTier.COOL | ArchiveStatus.REHYDRATE_PENDING_TO_COOL + AccessTier.ARCHIVE | AccessTier.HOT | ArchiveStatus.REHYDRATE_PENDING_TO_HOT + } + + def "Set tier error"() { + setup: + ContainerURL cu = blobStorageServiceURL.createContainerURL(generateBlobName()) + BlockBlobURL bu = cu.createBlockBlobURL(generateBlobName()) + cu.create(null, null, null).blockingGet() + bu.upload(defaultFlowable, defaultDataSize, null, null, null, null).blockingGet() + + when: + bu.setTier(AccessTier.fromString("garbage"), null, null).blockingGet() + + then: + def e = thrown(StorageException) + e.errorCode() == StorageErrorCode.INVALID_HEADER_VALUE + } + + def "Set tier illegal argument"() { + when: + bu.setTier(null, null, null) + + then: + thrown(IllegalArgumentException) + } + + def "Set tier lease"() { + setup: + ContainerURL cu = blobStorageServiceURL.createContainerURL(generateBlobName()) + BlockBlobURL bu = cu.createBlockBlobURL(generateBlobName()) + cu.create(null, null, null).blockingGet() + bu.upload(defaultFlowable, defaultDataSize, null, null, null, null).blockingGet() + def leaseID = setupBlobLeaseCondition(bu, receivedLeaseID) + + when: + bu.setTier(AccessTier.HOT, new LeaseAccessConditions().withLeaseId(leaseID), null).blockingGet() + + then: + notThrown(StorageException) + } + + def "Set tier lease fail"() { + setup: + ContainerURL cu = blobStorageServiceURL.createContainerURL(generateBlobName()) + BlockBlobURL bu = cu.createBlockBlobURL(generateBlobName()) + cu.create(null, null, null).blockingGet() + bu.upload(defaultFlowable, defaultDataSize, null, null, null, null).blockingGet() + + when: + bu.setTier(AccessTier.HOT, new LeaseAccessConditions().withLeaseId("garbage"), null).blockingGet() + + then: + thrown(StorageException) + } + + def "Set tier context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(202, BlobSetTierHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.setTier(AccessTier.HOT, null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + def "Undelete"() { + setup: + enableSoftDelete() + bu.delete(null, null, null).blockingGet() + + when: + def response = bu.undelete(null).blockingGet() + bu.getProperties(null, null).blockingGet() + + then: + notThrown(StorageException) + response.headers().requestId() != null + response.headers().version() != null + response.headers().date() != null + + disableSoftDelete() == null + } + + def "Undelete min"() { + setup: + enableSoftDelete() + bu.delete().blockingGet() + + expect: + bu.undelete().blockingGet().statusCode() == 200 + } + + def "Undelete error"() { + bu = cu.createBlockBlobURL(generateBlobName()) + + when: + bu.undelete(null).blockingGet() + + then: + thrown(StorageException) + } + + def "Undelete context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(200, BlobUndeleteHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.undelete(defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + def "Get account info"() { + when: + def response = primaryServiceURL.getAccountInfo(null).blockingGet() + + then: + response.headers().date() != null + response.headers().version() != null + response.headers().requestId() != null + response.headers().accountKind() != null + response.headers().skuName() != null + } + + def "Get account info min"() { + expect: + bu.getAccountInfo().blockingGet().statusCode() == 200 + } + + def "Get account info error"() { + when: + ServiceURL serviceURL = new ServiceURL(primaryServiceURL.toURL(), + StorageURL.createPipeline(new AnonymousCredentials(), new PipelineOptions())) + serviceURL.createContainerURL(generateContainerName()).createBlobURL(generateBlobName()) + .getAccountInfo(null).blockingGet() + + then: + thrown(StorageException) + } + + def "Get account info context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(200, BlobGetAccountInfoHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.getAccountInfo(defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } +} diff --git a/storage/data-plane/src/test/java/com/microsoft/azure/storage/BlockBlobAPITest.groovy b/storage/data-plane/src/test/java/com/microsoft/azure/storage/BlockBlobAPITest.groovy new file mode 100644 index 0000000000000..59dd7abbc2eca --- /dev/null +++ b/storage/data-plane/src/test/java/com/microsoft/azure/storage/BlockBlobAPITest.groovy @@ -0,0 +1,752 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.microsoft.azure.storage + +import com.microsoft.azure.storage.blob.* +import com.microsoft.azure.storage.blob.models.* +import com.microsoft.rest.v2.http.HttpPipeline +import com.microsoft.rest.v2.http.UnexpectedLengthException +import com.microsoft.rest.v2.util.FlowableUtil +import io.reactivex.Flowable +import spock.lang.Unroll + +import java.nio.ByteBuffer +import java.security.MessageDigest + +class BlockBlobAPITest extends APISpec { + BlockBlobURL bu + + def setup() { + bu = cu.createBlockBlobURL(generateBlobName()) + bu.upload(defaultFlowable, defaultDataSize, null, null, + null, null).blockingGet() + } + + def getBlockID() { + return new String(Base64.encoder.encode(UUID.randomUUID().toString().bytes)) + } + + def "Stage block"() { + setup: + BlockBlobStageBlockResponse response = bu.stageBlock(getBlockID(), defaultFlowable, defaultDataSize, + null, null).blockingGet() + BlockBlobStageBlockHeaders headers = response.headers() + + expect: + response.statusCode() == 201 + headers.contentMD5() != null + headers.requestId() != null + headers.version() != null + headers.date() != null + headers.isServerEncrypted() + } + + def "Stage block min"() { + expect: + bu.stageBlock(getBlockID(), defaultFlowable, defaultDataSize).blockingGet().statusCode() == 201 + } + + @Unroll + def "Stage block illegal arguments"() { + when: + bu.stageBlock(blockID, data, dataSize, null, null).blockingGet() + + then: + def e = thrown(Exception) + exceptionType.isInstance(e) + + where: + blockID | data | dataSize | exceptionType + null | defaultFlowable | defaultDataSize | IllegalArgumentException + getBlockID() | null | defaultDataSize | IllegalArgumentException + getBlockID() | defaultFlowable | defaultDataSize + 1 | UnexpectedLengthException + getBlockID() | defaultFlowable | defaultDataSize - 1 | UnexpectedLengthException + } + + def "Stage block empty body"() { + when: + bu.stageBlock(getBlockID(), Flowable.just(ByteBuffer.wrap(new byte[0])), 0, null, null) + .blockingGet() + + then: + thrown(StorageException) + } + + def "Stage block null body"() { + when: + bu.stageBlock(getBlockID(), Flowable.just(null), 0, null, null).blockingGet() + + then: + thrown(NullPointerException) // Thrown by Flowable.just(). + } + + def "Stage block lease"() { + setup: + String leaseID = setupBlobLeaseCondition(bu, receivedLeaseID) + + expect: + bu.stageBlock(getBlockID(), defaultFlowable, defaultDataSize, new LeaseAccessConditions().withLeaseId(leaseID), + null).blockingGet().statusCode() == 201 + } + + def "Stage block lease fail"() { + setup: + setupBlobLeaseCondition(bu, receivedLeaseID) + + when: + bu.stageBlock(getBlockID(), defaultFlowable, defaultDataSize, new LeaseAccessConditions() + .withLeaseId(garbageLeaseID), null).blockingGet() + + then: + def e = thrown(StorageException) + e.errorCode() == StorageErrorCode.LEASE_ID_MISMATCH_WITH_BLOB_OPERATION + } + + def "Stage block error"() { + setup: + bu = cu.createBlockBlobURL(generateBlobName()) + + when: + bu.stageBlock("id", defaultFlowable, defaultDataSize, null, null) + .blockingGet() + + then: + thrown(StorageException) + } + + def "Stage block context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(201, BlockBlobStageBlockHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.stageBlock("id", defaultFlowable, defaultDataSize, null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + def "Stage block from url"() { + setup: + cu.setAccessPolicy(PublicAccessType.CONTAINER, null, null, null).blockingGet() + def bu2 = cu.createBlockBlobURL(generateBlobName()) + def blockID = getBlockID() + + when: + def response = bu2.stageBlockFromURL(blockID, bu.toURL(), null, null, + null, null).blockingGet() + def listResponse = bu2.getBlockList(BlockListType.ALL, null, null).blockingGet() + bu2.commitBlockList(Arrays.asList(blockID), null, null, null, null).blockingGet() + + then: + response.headers().requestId() != null + response.headers().version() != null + response.headers().requestId() != null + response.headers().contentMD5() != null + response.headers().isServerEncrypted() != null + + listResponse.body().uncommittedBlocks().get(0).name() == blockID + listResponse.body().uncommittedBlocks().size() == 1 + + FlowableUtil.collectBytesInBuffer(bu2.download(null, null, false, null) + .blockingGet().body(null)).blockingGet() == defaultData + } + + def "Stage block from url min"() { + setup: + cu.setAccessPolicy(PublicAccessType.CONTAINER, null, null, null).blockingGet() + def bu2 = cu.createBlockBlobURL(generateBlobName()) + def blockID = getBlockID() + + expect: + bu2.stageBlockFromURL(blockID, bu.toURL(), null).blockingGet().statusCode() == 201 + } + + @Unroll + def "Stage block from URL IA"() { + when: + bu.stageBlockFromURL(blockID, sourceURL, null, null, null, null) + .blockingGet() + + then: + thrown(IllegalArgumentException) + + where: + blockID | sourceURL + null | new URL("http://www.example.com") + getBlockID() | null + } + + def "Stage block from URL range"() { + setup: + cu.setAccessPolicy(PublicAccessType.CONTAINER, null, null, null).blockingGet() + def destURL = cu.createBlockBlobURL(generateBlobName()) + + when: + destURL.stageBlockFromURL(getBlockID(), bu.toURL(), new BlobRange().withOffset(2).withCount(3), null, null, + null).blockingGet() + + then: + destURL.getBlockList(BlockListType.ALL, null, null).blockingGet().body().uncommittedBlocks().get(0) + .size() == 3 + } + + def "Stage block from URL MD5"() { + setup: + cu.setAccessPolicy(PublicAccessType.CONTAINER, null, null, null).blockingGet() + def destURL = cu.createBlockBlobURL(generateBlobName()) + + when: + destURL.stageBlockFromURL(getBlockID(), bu.toURL(), null, + MessageDigest.getInstance("MD5").digest(defaultData.array()), null, null).blockingGet() + + then: + notThrown(StorageException) + } + + def "Stage block from URL MD5 fail"() { + setup: + cu.setAccessPolicy(PublicAccessType.CONTAINER, null, null, null).blockingGet() + def destURL = cu.createBlockBlobURL(generateBlobName()) + + when: + destURL.stageBlockFromURL(getBlockID(), bu.toURL(), null, "garbage".getBytes(), + null, null).blockingGet() + + then: + thrown(StorageException) + } + + def "Stage block from URL lease"() { + setup: + cu.setAccessPolicy(PublicAccessType.CONTAINER, null, null, null).blockingGet() + def lease = new LeaseAccessConditions().withLeaseId(setupBlobLeaseCondition(bu, receivedLeaseID)) + + when: + bu.stageBlockFromURL(getBlockID(), bu.toURL(), null, null, lease, null).blockingGet() + + then: + notThrown(StorageException) + } + + def "Stage block from URL lease fail"() { + setup: + cu.setAccessPolicy(PublicAccessType.CONTAINER, null, null, null).blockingGet() + def lease = new LeaseAccessConditions().withLeaseId("garbage") + + when: + bu.stageBlockFromURL(getBlockID(), bu.toURL(), null, null, lease, null).blockingGet() + + then: + thrown(StorageException) + } + + def "Stage block from URL error"() { + setup: + cu = primaryServiceURL.createContainerURL(generateContainerName()) + bu = cu.createBlockBlobURL(generateBlobName()) + + when: + bu.stageBlockFromURL(getBlockID(), bu.toURL(), null, null, null, null) + .blockingGet() + + then: + thrown(StorageException) + } + + def "Stage block from URL context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(201, BlockBlobStageBlockFromURLHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.stageBlockFromURL("id", bu.toURL(), null, null, null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + def "Commit block list"() { + setup: + String blockID = getBlockID() + bu.stageBlock(blockID, defaultFlowable, defaultDataSize, + null, null).blockingGet() + ArrayList ids = new ArrayList<>() + ids.add(blockID) + + when: + BlockBlobCommitBlockListResponse response = + bu.commitBlockList(ids, null, null, null, null).blockingGet() + BlockBlobCommitBlockListHeaders headers = response.headers() + + then: + response.statusCode() == 201 + validateBasicHeaders(headers) + headers.contentMD5() + headers.isServerEncrypted() + } + + def "Commit block list min"() { + setup: + String blockID = getBlockID() + bu.stageBlock(blockID, defaultFlowable, defaultDataSize, + null, null).blockingGet() + ArrayList ids = new ArrayList<>() + ids.add(blockID) + + expect: + bu.commitBlockList(ids).blockingGet().statusCode() == 201 + } + + def "Commit block list null"() { + expect: + bu.commitBlockList(null, null, null, null, null) + .blockingGet().statusCode() == 201 + } + + @Unroll + def "Commit block list headers"() { + setup: + String blockID = getBlockID() + bu.stageBlock(blockID, defaultFlowable, defaultDataSize, + null, null).blockingGet() + ArrayList ids = new ArrayList<>() + ids.add(blockID) + BlobHTTPHeaders headers = new BlobHTTPHeaders().withBlobCacheControl(cacheControl) + .withBlobContentDisposition(contentDisposition) + .withBlobContentEncoding(contentEncoding) + .withBlobContentLanguage(contentLanguage) + .withBlobContentMD5(contentMD5) + .withBlobContentType(contentType) + + when: + bu.commitBlockList(ids, headers, null, null, null).blockingGet() + BlobGetPropertiesResponse response = bu.getProperties(null, null).blockingGet() + + then: + response.statusCode() == 200 + validateBlobHeaders(response.headers(), cacheControl, contentDisposition, contentEncoding, contentLanguage, + contentMD5, contentType == null ? "application/octet-stream" : contentType) + // HTTP default content type is application/octet-stream + + where: + cacheControl | contentDisposition | contentEncoding | contentLanguage | contentMD5 | contentType + null | null | null | null | null | null + "control" | "disposition" | "encoding" | "language" | MessageDigest.getInstance("MD5").digest(defaultData.array()) | "type" + } + + @Unroll + def "Commit block list metadata"() { + setup: + Metadata metadata = new Metadata() + if (key1 != null) { + metadata.put(key1, value1) + } + if (key2 != null) { + metadata.put(key2, value2) + } + + when: + bu.commitBlockList(null, null, metadata, null, null).blockingGet() + BlobGetPropertiesResponse response = bu.getProperties(null, null).blockingGet() + + then: + response.statusCode() == 200 + response.headers().metadata() == metadata + + where: + key1 | value1 | key2 | value2 + null | null | null | null + "foo" | "bar" | "fizz" | "buzz" + } + + @Unroll + def "Commit block list AC"() { + setup: + match = setupBlobMatchCondition(bu, match) + leaseID = setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + expect: + bu.commitBlockList(null, null, null, bac, null).blockingGet().statusCode() == 201 + + where: + modified | unmodified | match | noneMatch | leaseID + null | null | null | null | null + oldDate | null | null | null | null + null | newDate | null | null | null + null | null | receivedEtag | null | null + null | null | null | garbageEtag | null + null | null | null | null | receivedLeaseID + } + + @Unroll + def "Commit block list AC fail"() { + setup: + noneMatch = setupBlobMatchCondition(bu, noneMatch) + setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + when: + bu.commitBlockList(null, null, null, bac, null).blockingGet() + + then: + def e = thrown(StorageException) + e.errorCode() == StorageErrorCode.CONDITION_NOT_MET || + e.errorCode() == StorageErrorCode.LEASE_ID_MISMATCH_WITH_BLOB_OPERATION + + where: + modified | unmodified | match | noneMatch | leaseID + newDate | null | null | null | null + null | oldDate | null | null | null + null | null | garbageEtag | null | null + null | null | null | receivedEtag | null + null | null | null | null | garbageLeaseID + } + + def "Commit block list error"() { + setup: + bu = cu.createBlockBlobURL(generateBlobName()) + + when: + bu.commitBlockList(new ArrayList(), null, null, new BlobAccessConditions().withLeaseAccessConditions( + new LeaseAccessConditions().withLeaseId("garbage")), null).blockingGet() + + then: + thrown(StorageException) + } + + def "Commit block list info context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(201, BlockBlobCommitBlockListHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.commitBlockList(new ArrayList(), null, null, null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + def "Get block list"() { + setup: + List committedBlocks = Arrays.asList(getBlockID(), getBlockID()) + bu.stageBlock(committedBlocks.get(0), defaultFlowable, defaultDataSize, null, null).blockingGet() + bu.stageBlock(committedBlocks.get(1), defaultFlowable, defaultDataSize, null, null).blockingGet() + bu.commitBlockList(committedBlocks, null, null, null, null).blockingGet() + + List uncommittedBlocks = Arrays.asList(getBlockID(), getBlockID()) + bu.stageBlock(uncommittedBlocks.get(0), defaultFlowable, defaultDataSize, null, null).blockingGet() + bu.stageBlock(uncommittedBlocks.get(1), defaultFlowable, defaultDataSize, null, null).blockingGet() + uncommittedBlocks.sort(true) + + when: + BlockBlobGetBlockListResponse response = bu.getBlockList(BlockListType.ALL, null, null) + .blockingGet() + + then: + for (int i = 0; i < committedBlocks.size(); i++) { + assert response.body().committedBlocks().get(i).name() == committedBlocks.get(i) + assert response.body().committedBlocks().get(i).size() == defaultDataSize + assert response.body().uncommittedBlocks().get(i).name() == uncommittedBlocks.get(i) + assert response.body().uncommittedBlocks().get(i).size() == defaultDataSize + } + validateBasicHeaders(response.headers()) + response.headers().contentType() != null + response.headers().blobContentLength() == defaultDataSize * 2L + } + + def "Get block list min"() { + expect: + bu.getBlockList(BlockListType.ALL).blockingGet().statusCode() == 200 + } + + @Unroll + def "Get block list type"() { + setup: + String blockID = getBlockID() + bu.stageBlock(blockID, defaultFlowable, defaultDataSize, + null, null).blockingGet() + ArrayList ids = new ArrayList<>() + ids.add(blockID) + bu.commitBlockList(ids, null, null, null, null).blockingGet() + blockID = new String(getBlockID()) + bu.stageBlock(blockID, defaultFlowable, defaultDataSize, + null, null).blockingGet() + + when: + BlockBlobGetBlockListResponse response = bu.getBlockList(type, null, null).blockingGet() + + then: + response.body().committedBlocks().size() == committedCount + response.body().uncommittedBlocks().size() == uncommittedCount + + where: + type | committedCount | uncommittedCount + BlockListType.ALL | 1 | 1 + BlockListType.COMMITTED | 1 | 0 + BlockListType.UNCOMMITTED | 0 | 1 + } + + def "Get block list type null"() { + when: + bu.getBlockList(null, null, null).blockingGet() + + then: + thrown(IllegalArgumentException) + } + + def "Get block list lease"() { + setup: + String leaseID = setupBlobLeaseCondition(bu, receivedLeaseID) + + expect: + bu.getBlockList(BlockListType.ALL, new LeaseAccessConditions().withLeaseId(leaseID), null) + .blockingGet().statusCode() == 200 + } + + def "Get block list lease fail"() { + setup: + setupBlobLeaseCondition(bu, garbageLeaseID) + + when: + bu.getBlockList(BlockListType.ALL, new LeaseAccessConditions().withLeaseId(garbageLeaseID), null).blockingGet() + + then: + def e = thrown(StorageException) + e.errorCode() == StorageErrorCode.LEASE_ID_MISMATCH_WITH_BLOB_OPERATION + } + + def "Get block list error"() { + setup: + bu = cu.createBlockBlobURL(generateBlobName()) + + when: + bu.getBlockList(BlockListType.ALL, null, null).blockingGet() + + then: + thrown(StorageException) + } + + def "Get block list context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(200, BlockBlobGetBlockListHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.getBlockList(BlockListType.ALL, null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + def "Upload"() { + when: + BlockBlobUploadResponse response = bu.upload(defaultFlowable, defaultDataSize, + null, null, null, null).blockingGet() + BlockBlobUploadHeaders headers = response.headers() + + then: + response.statusCode() == 201 + FlowableUtil.collectBytesInBuffer( + bu.download(null, null, false, null).blockingGet().body(null)) + .blockingGet() == defaultData + validateBasicHeaders(headers) + headers.contentMD5() != null + headers.isServerEncrypted() + } + + def "Upload min"() { + expect: + bu.upload(defaultFlowable, defaultDataSize).blockingGet().statusCode() == 201 + } + + @Unroll + def "Upload illegal argument"() { + when: + bu.upload(data, dataSize, null, null, null, null).blockingGet() + + then: + def e = thrown(Exception) + exceptionType.isInstance(e) + + where: + data | dataSize | exceptionType + null | defaultDataSize | IllegalArgumentException + defaultFlowable | defaultDataSize + 1 | UnexpectedLengthException + defaultFlowable | defaultDataSize - 1 | UnexpectedLengthException + } + + def "Upload empty body"() { + expect: + bu.upload(Flowable.just(ByteBuffer.wrap(new byte[0])), 0, null, null, + null, null).blockingGet().statusCode() == 201 + } + + def "Upload null body"() { + when: + bu.upload(Flowable.just(null), 0, null, null, null, null).blockingGet() + + then: + thrown(NullPointerException) // Thrown by Flowable.just(). + } + + @Unroll + def "Upload headers"() { + setup: + BlobHTTPHeaders headers = new BlobHTTPHeaders().withBlobCacheControl(cacheControl) + .withBlobContentDisposition(contentDisposition) + .withBlobContentEncoding(contentEncoding) + .withBlobContentLanguage(contentLanguage) + .withBlobContentMD5(contentMD5) + .withBlobContentType(contentType) + + when: + bu.upload(defaultFlowable, defaultDataSize, + headers, null, null, null).blockingGet() + BlobGetPropertiesResponse response = bu.getProperties(null, null).blockingGet() + + then: + validateBlobHeaders(response.headers(), cacheControl, contentDisposition, contentEncoding, contentLanguage, + MessageDigest.getInstance("MD5").digest(defaultData.array()), + contentType == null ? "application/octet-stream" : contentType) + // For uploading a block blob, the service will auto calculate an MD5 hash if not present + // HTTP default content type is application/octet-stream + + where: + cacheControl | contentDisposition | contentEncoding | contentLanguage | contentMD5 | contentType + null | null | null | null | null | null + "control" | "disposition" | "encoding" | "language" | MessageDigest.getInstance("MD5").digest(defaultData.array()) | "type" + } + + @Unroll + def "Upload metadata"() { + setup: + Metadata metadata = new Metadata() + if (key1 != null) { + metadata.put(key1, value1) + } + if (key2 != null) { + metadata.put(key2, value2) + } + + when: + bu.upload(defaultFlowable, defaultDataSize, + null, metadata, null, null).blockingGet() + BlobGetPropertiesResponse response = bu.getProperties(null, null).blockingGet() + + then: + response.statusCode() == 200 + response.headers().metadata() == metadata + + where: + key1 | value1 | key2 | value2 + null | null | null | null + "foo" | "bar" | "fizz" | "buzz" + } + + @Unroll + def "Upload AC"() { + setup: + match = setupBlobMatchCondition(bu, match) + leaseID = setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + expect: + bu.upload(defaultFlowable, defaultDataSize, + null, null, bac, null).blockingGet().statusCode() == 201 + + where: + modified | unmodified | match | noneMatch | leaseID + null | null | null | null | null + oldDate | null | null | null | null + null | newDate | null | null | null + null | null | receivedEtag | null | null + null | null | null | garbageEtag | null + null | null | null | null | receivedLeaseID + } + + @Unroll + def "Upload AC fail"() { + setup: + noneMatch = setupBlobMatchCondition(bu, noneMatch) + setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + when: + bu.upload(defaultFlowable, defaultDataSize, null, null, bac, null).blockingGet() + + then: + def e = thrown(StorageException) + e.errorCode() == StorageErrorCode.CONDITION_NOT_MET || + e.errorCode() == StorageErrorCode.LEASE_ID_MISMATCH_WITH_BLOB_OPERATION + + where: + modified | unmodified | match | noneMatch | leaseID + newDate | null | null | null | null + null | oldDate | null | null | null + null | null | garbageEtag | null | null + null | null | null | receivedEtag | null + null | null | null | null | garbageLeaseID + } + + def "Upload error"() { + setup: + bu = cu.createBlockBlobURL(generateBlobName()) + + when: + bu.upload(defaultFlowable, defaultDataSize, null, null, + new BlobAccessConditions().withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId("id")), + null).blockingGet() + + then: + thrown(StorageException) + } + + def "Upload context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(201, BlockBlobUploadHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.upload(defaultFlowable, defaultDataSize, null, null, null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } +} diff --git a/storage/data-plane/src/test/java/com/microsoft/azure/storage/ContainerAPITest.groovy b/storage/data-plane/src/test/java/com/microsoft/azure/storage/ContainerAPITest.groovy new file mode 100644 index 0000000000000..bf56e023d19ed --- /dev/null +++ b/storage/data-plane/src/test/java/com/microsoft/azure/storage/ContainerAPITest.groovy @@ -0,0 +1,1816 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.microsoft.azure.storage + +import com.microsoft.azure.storage.blob.* +import com.microsoft.azure.storage.blob.models.* +import com.microsoft.rest.v2.http.HttpPipeline +import com.microsoft.rest.v2.http.HttpRequest +import com.microsoft.rest.v2.http.HttpResponse +import com.microsoft.rest.v2.policy.RequestPolicy +import com.microsoft.rest.v2.policy.RequestPolicyFactory +import com.microsoft.rest.v2.policy.RequestPolicyOptions +import io.reactivex.Flowable +import io.reactivex.Single +import spock.lang.Unroll + +import java.time.OffsetDateTime +import java.time.ZoneId + +class ContainerAPITest extends APISpec { + + def "Create all null"() { + setup: + // Overwrite the existing cu, which has already been created + cu = primaryServiceURL.createContainerURL(generateContainerName()) + + when: + ContainerCreateResponse response = cu.create(null, null, null).blockingGet() + + then: + response.statusCode() == 201 + validateBasicHeaders(response.headers()) + } + + def "Create min"() { + expect: + primaryServiceURL.createContainerURL(generateContainerName()).create().blockingGet().statusCode() == 201 + } + + @Unroll + def "Create metadata"() { + setup: + cu = primaryServiceURL.createContainerURL(generateContainerName()) + Metadata metadata = new Metadata() + if (key1 != null) { + metadata.put(key1, value1) + } + if (key2 != null) { + metadata.put(key2, value2) + } + + when: + cu.create(metadata, null, null).blockingGet() + ContainerGetPropertiesResponse response = cu.getProperties(null, null).blockingGet() + + then: + response.headers().metadata() == metadata + + where: + key1 | value1 | key2 | value2 + null | null | null | null + "foo" | "bar" | "fizz" | "buzz" + } + + @Unroll + def "Create publicAccess"() { + setup: + cu = primaryServiceURL.createContainerURL(generateContainerName()) + + when: + cu.create(null, publicAccess, null).blockingGet() + PublicAccessType access = + cu.getProperties(null, null).blockingGet().headers().blobPublicAccess() + + then: + access.toString() == publicAccess.toString() + + where: + publicAccess | _ + PublicAccessType.BLOB | _ + PublicAccessType.CONTAINER | _ + null | _ + } + + def "Create error"() { + when: + cu.create(null, null, null).blockingGet() + + then: + def e = thrown(StorageException) + e.response().statusCode() == 409 + e.errorCode() == StorageErrorCode.CONTAINER_ALREADY_EXISTS + e.message().contains("The specified container already exists.") + } + + def "Create context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(201, ContainerCreateHeaders))) + + cu = cu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + cu.create(null, null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + def "Get properties null"() { + when: + ContainerGetPropertiesHeaders headers = + cu.getProperties(null, null).blockingGet().headers() + + then: + validateBasicHeaders(headers) + headers.blobPublicAccess() == null + headers.leaseDuration() == null + headers.leaseState() == LeaseStateType.AVAILABLE + headers.leaseStatus() == LeaseStatusType.UNLOCKED + headers.metadata().size() == 0 + !headers.hasImmutabilityPolicy() + !headers.hasLegalHold() + } + + def "Get properties min"() { + expect: + cu.getProperties().blockingGet().statusCode() == 200 + } + + def "Get properties lease"() { + setup: + String leaseID = setupContainerLeaseCondition(cu, receivedLeaseID) + + expect: + cu.getProperties(new LeaseAccessConditions().withLeaseId(leaseID), null).blockingGet().statusCode() == 200 + } + + def "Get properties lease fail"() { + when: + cu.getProperties(new LeaseAccessConditions().withLeaseId("garbage"), null).blockingGet() + + then: + thrown(StorageException) + } + + def "Get properties error"() { + setup: + cu = primaryServiceURL.createContainerURL(generateContainerName()) + + when: + cu.getProperties(null, null).blockingGet() + + then: + thrown(StorageException) + } + + def "Get properties context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(200, ContainerGetPropertiesHeaders))) + + cu = cu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + cu.getProperties(null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + def "Set metadata"() { + setup: + cu = primaryServiceURL.createContainerURL(generateContainerName()) + Metadata metadata = new Metadata() + metadata.put("key", "value") + cu.create(metadata, null, null).blockingGet() + ContainerSetMetadataResponse response = cu.setMetadata(null, null, null).blockingGet() + + expect: + response.statusCode() == 200 + validateBasicHeaders(response.headers()) + cu.getProperties(null, null).blockingGet().headers().metadata().size() == 0 + } + + def "Set metadata min"() { + setup: + Metadata metadata = new Metadata() + metadata.put("foo", "bar") + + when: + cu.setMetadata(metadata).blockingGet() + + then: + cu.getProperties().blockingGet().headers().metadata() == metadata + } + + @Unroll + def "Set metadata metadata"() { + setup: + Metadata metadata = new Metadata() + if (key1 != null) { + metadata.put(key1, value1) + } + if (key2 != null) { + metadata.put(key2, value2) + } + + expect: + cu.setMetadata(metadata, null, null).blockingGet().statusCode() == 200 + cu.getProperties(null, null).blockingGet().headers().metadata() == metadata + + where: + key1 | value1 | key2 | value2 + null | null | null | null + "foo" | "bar" | "fizz" | "buzz" + } + + @Unroll + def "Set metadata AC"() { + setup: + leaseID = setupContainerLeaseCondition(cu, leaseID) + ContainerAccessConditions cac = new ContainerAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + expect: + cu.setMetadata(null, cac, null).blockingGet().statusCode() == 200 + + where: + modified | leaseID + null | null + oldDate | null + null | receivedLeaseID + } + + @Unroll + def "Set metadata AC fail"() { + setup: + ContainerAccessConditions cac = new ContainerAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + when: + cu.setMetadata(null, cac, null).blockingGet() + + then: + thrown(StorageException) + + where: + modified | leaseID + newDate | null + null | garbageLeaseID + } + + @Unroll + def "Set metadata AC illegal"() { + setup: + ModifiedAccessConditions mac = new ModifiedAccessConditions().withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch) + + when: + cu.setMetadata(null, new ContainerAccessConditions().withModifiedAccessConditions(mac), null) + + then: + thrown(UnsupportedOperationException) + + where: + unmodified | match | noneMatch + newDate | null | null + null | receivedEtag | null + null | null | garbageEtag + } + + def "Set metadata error"() { + setup: + cu = primaryServiceURL.createContainerURL(generateContainerName()) + + when: + cu.setMetadata(null, null, null).blockingGet() + + then: + thrown(StorageException) + } + + def "Set metadata context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(200, ContainerSetMetadataHeaders))) + + cu = cu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + cu.setMetadata(null, null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + @Unroll + def "Set access policy"() { + setup: + def response = cu.setAccessPolicy(access, null, null, null).blockingGet() + + expect: + validateBasicHeaders(response.headers()) + cu.getProperties(null, null).blockingGet() + .headers().blobPublicAccess() == access + + where: + access | _ + PublicAccessType.BLOB | _ + PublicAccessType.CONTAINER | _ + null | _ + } + + def "Set access policy min access"() { + when: + cu.setAccessPolicy(PublicAccessType.CONTAINER, null).blockingGet() + + then: + cu.getProperties().blockingGet().headers().blobPublicAccess() == PublicAccessType.CONTAINER + } + + def "Set access policy min ids"() { + setup: + SignedIdentifier identifier = new SignedIdentifier() + .withId("0000") + .withAccessPolicy(new AccessPolicy() + .withStart(OffsetDateTime.now().atZoneSameInstant(ZoneId.of("UTC")).toOffsetDateTime()) + .withExpiry(OffsetDateTime.now().atZoneSameInstant(ZoneId.of("UTC")).toOffsetDateTime() + .plusDays(1)) + .withPermission("r")) + + List ids = new ArrayList<>() + ids.push(identifier) + + when: + cu.setAccessPolicy(null, ids).blockingGet() + + then: + cu.getAccessPolicy(null, null).blockingGet().body().get(0).id() == "0000" + } + + def "Set access policy ids"() { + setup: + SignedIdentifier identifier = new SignedIdentifier() + .withId("0000") + .withAccessPolicy(new AccessPolicy() + .withStart(OffsetDateTime.now().atZoneSameInstant(ZoneId.of("UTC")).toOffsetDateTime()) + .withExpiry(OffsetDateTime.now().atZoneSameInstant(ZoneId.of("UTC")).toOffsetDateTime() + .plusDays(1)) + .withPermission("r")) + SignedIdentifier identifier2 = new SignedIdentifier() + .withId("0001") + .withAccessPolicy(new AccessPolicy() + .withStart(OffsetDateTime.now().atZoneSameInstant(ZoneId.of("UTC")).toOffsetDateTime()) + .withExpiry(OffsetDateTime.now().atZoneSameInstant(ZoneId.of("UTC")).toOffsetDateTime() + .plusDays(2)) + .withPermission("w")) + List ids = new ArrayList<>() + ids.push(identifier) + ids.push(identifier2) + + when: + ContainerSetAccessPolicyResponse response = + cu.setAccessPolicy(null, ids, null, null).blockingGet() + List receivedIdentifiers = cu.getAccessPolicy(null, null).blockingGet().body() + + then: + response.statusCode() == 200 + validateBasicHeaders(response.headers()) + receivedIdentifiers.get(0).accessPolicy().expiry() == identifier.accessPolicy().expiry() + receivedIdentifiers.get(0).accessPolicy().start() == identifier.accessPolicy().start() + receivedIdentifiers.get(0).accessPolicy().permission() == identifier.accessPolicy().permission() + receivedIdentifiers.get(1).accessPolicy().expiry() == identifier2.accessPolicy().expiry() + receivedIdentifiers.get(1).accessPolicy().start() == identifier2.accessPolicy().start() + receivedIdentifiers.get(1).accessPolicy().permission() == identifier2.accessPolicy().permission() + } + + @Unroll + def "Set access policy AC"() { + setup: + leaseID = setupContainerLeaseCondition(cu, leaseID) + ContainerAccessConditions cac = new ContainerAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + expect: + cu.setAccessPolicy(null, null, cac, null).blockingGet().statusCode() == 200 + + where: + modified | unmodified | leaseID + null | null | null + oldDate | null | null + null | newDate | null + null | null | receivedLeaseID + } + + @Unroll + def "Set access policy AC fail"() { + setup: + ContainerAccessConditions cac = new ContainerAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + when: + cu.setAccessPolicy(null, null, cac, null).blockingGet() + + then: + thrown(StorageException) + + where: + modified | unmodified | leaseID + newDate | null | null + null | oldDate | null + null | null | garbageLeaseID + } + + @Unroll + def "Set access policy AC illegal"() { + setup: + ModifiedAccessConditions mac = new ModifiedAccessConditions().withIfMatch(match).withIfNoneMatch(noneMatch) + + when: + cu.setAccessPolicy(null, null, new ContainerAccessConditions().withModifiedAccessConditions(mac), null) + + then: + thrown(UnsupportedOperationException) + + where: + match | noneMatch + receivedEtag | null + null | garbageEtag + } + + def "Set access policy error"() { + setup: + cu = primaryServiceURL.createContainerURL(generateContainerName()) + + when: + cu.setAccessPolicy(null, null, null, null).blockingGet() + + then: + thrown(StorageException) + } + + def "Set access policy context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(200, ContainerSetAccessPolicyHeaders))) + + cu = cu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + cu.setAccessPolicy(null, null, null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + def "Get access policy"() { + setup: + SignedIdentifier identifier = new SignedIdentifier() + .withId("0000") + .withAccessPolicy(new AccessPolicy() + .withStart(OffsetDateTime.now().atZoneSameInstant(ZoneId.of("UTC")).toOffsetDateTime()) + .withExpiry(OffsetDateTime.now().atZoneSameInstant(ZoneId.of("UTC")).toOffsetDateTime() + .plusDays(1)) + .withPermission("r")) + List ids = new ArrayList<>() + ids.push(identifier) + cu.setAccessPolicy(PublicAccessType.BLOB, ids, null, null).blockingGet() + ContainerGetAccessPolicyResponse response = cu.getAccessPolicy(null, null).blockingGet() + + expect: + response.statusCode() == 200 + response.headers().blobPublicAccess() == PublicAccessType.BLOB + validateBasicHeaders(response.headers()) + response.body().get(0).accessPolicy().expiry() == identifier.accessPolicy().expiry() + response.body().get(0).accessPolicy().start() == identifier.accessPolicy().start() + response.body().get(0).accessPolicy().permission() == identifier.accessPolicy().permission() + } + + def "Get access policy lease"() { + setup: + String leaseID = setupContainerLeaseCondition(cu, receivedLeaseID) + + expect: + cu.getAccessPolicy(new LeaseAccessConditions().withLeaseId(leaseID), null).blockingGet().statusCode() == 200 + } + + def "Get access policy lease fail"() { + when: + cu.getAccessPolicy(new LeaseAccessConditions().withLeaseId(garbageLeaseID), null).blockingGet() + + then: + thrown(StorageException) + } + + def "Get access policy error"() { + setup: + cu = primaryServiceURL.createContainerURL(generateContainerName()) + + when: + cu.getAccessPolicy(null, null).blockingGet() + + then: + thrown(StorageException) + } + + def "Get access policy context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(200, ContainerGetAccessPolicyHeaders))) + + cu = cu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + cu.getAccessPolicy(null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + def "Delete"() { + when: + ContainerDeleteResponse response = cu.delete(null, null).blockingGet() + + then: + response.statusCode() == 202 + response.headers().requestId() != null + response.headers().version() != null + response.headers().date() != null + } + + def "Delete min"() { + expect: + cu.delete().blockingGet().statusCode() == 202 + } + + @Unroll + def "Delete AC"() { + setup: + leaseID = setupContainerLeaseCondition(cu, leaseID) + ContainerAccessConditions cac = new ContainerAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + + expect: + cu.delete(cac, null).blockingGet().statusCode() == 202 + + where: + modified | unmodified | leaseID + null | null | null + oldDate | null | null + null | newDate | null + null | null | receivedLeaseID + } + + @Unroll + def "Delete AC fail"() { + setup: + ContainerAccessConditions cac = new ContainerAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + when: + cu.delete(cac, null).blockingGet() + + then: + thrown(StorageException) + + where: + modified | unmodified | leaseID + newDate | null | null + null | oldDate | null + null | null | garbageLeaseID + } + + @Unroll + def "Delete AC illegal"() { + setup: + ModifiedAccessConditions mac = new ModifiedAccessConditions().withIfMatch(match).withIfNoneMatch(noneMatch) + + when: + cu.delete(new ContainerAccessConditions().withModifiedAccessConditions(mac), null) + + then: + thrown(UnsupportedOperationException) + + where: + match | noneMatch + receivedEtag | null + null | garbageEtag + } + + def "Delete error"() { + setup: + cu = primaryServiceURL.createContainerURL(generateContainerName()) + + when: + cu.delete(null, null).blockingGet() + + then: + thrown(StorageException) + } + + def "Delete context"() { + setup: + def pipeline = HttpPipeline.build(getStubFactory(getContextStubPolicy(202, ContainerDeleteHeaders))) + + cu = cu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + cu.delete(null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + def "List blobs flat"() { + setup: + String name = generateBlobName() + PageBlobURL bu = cu.createPageBlobURL(name) + bu.create(512, null, null, null, null, null).blockingGet() + + when: + ContainerListBlobFlatSegmentResponse response = cu.listBlobsFlatSegment(null, null, null) + .blockingGet() + ContainerListBlobFlatSegmentHeaders headers = response.headers() + List blobs = response.body().segment().blobItems() + + then: + response.statusCode() == 200 + headers.contentType() != null + headers.requestId() != null + headers.version() != null + headers.date() != null + blobs.size() == 1 + blobs.get(0).name() == name + blobs.get(0).properties().blobType() == BlobType.PAGE_BLOB + blobs.get(0).properties().copyCompletionTime() == null + blobs.get(0).properties().copyStatusDescription() == null + blobs.get(0).properties().copyId() == null + blobs.get(0).properties().copyProgress() == null + blobs.get(0).properties().copySource() == null + blobs.get(0).properties().copyStatus() == null + blobs.get(0).properties().incrementalCopy() == null + blobs.get(0).properties().destinationSnapshot() == null + blobs.get(0).properties().leaseDuration() == null + blobs.get(0).properties().leaseState() == LeaseStateType.AVAILABLE + blobs.get(0).properties().leaseStatus() == LeaseStatusType.UNLOCKED + blobs.get(0).properties().contentLength() != null + blobs.get(0).properties().contentType() != null + blobs.get(0).properties().contentMD5() == null + blobs.get(0).properties().contentEncoding() == null + blobs.get(0).properties().contentDisposition() == null + blobs.get(0).properties().contentLanguage() == null + blobs.get(0).properties().cacheControl() == null + blobs.get(0).properties().blobSequenceNumber() == 0 + blobs.get(0).properties().serverEncrypted() + blobs.get(0).properties().accessTierInferred() + blobs.get(0).properties().accessTier() == AccessTier.HOT + blobs.get(0).properties().archiveStatus() == null + blobs.get(0).properties().creationTime() != null + } + + def "List blobs flat min"() { + expect: + cu.listBlobsFlatSegment(null, null).blockingGet().statusCode() == 200 + } + + def setupListBlobsTest(String normalName, String copyName, String metadataName, String uncommittedName) { + PageBlobURL normal = cu.createPageBlobURL(normalName) + normal.create(512, null, null, null, null, null).blockingGet() + + PageBlobURL copyBlob = cu.createPageBlobURL(copyName) + waitForCopy(copyBlob, copyBlob.startCopyFromURL(normal.toURL(), + null, null, null, null).blockingGet().headers().copyStatus()) + + PageBlobURL metadataBlob = cu.createPageBlobURL(metadataName) + Metadata values = new Metadata() + values.put("foo", "bar") + metadataBlob.create(512, null, null, values, null, null).blockingGet() + + String snapshotTime = normal.createSnapshot(null, null, null) + .blockingGet().headers().snapshot() + + BlockBlobURL uncommittedBlob = cu.createBlockBlobURL(uncommittedName) + + uncommittedBlob.stageBlock("0000", Flowable.just(defaultData), defaultData.remaining(), + null, null).blockingGet() + + return snapshotTime + } + + def "List blobs flat options copy"() { + setup: + ListBlobsOptions options = new ListBlobsOptions().withDetails(new BlobListingDetails().withCopy(true)) + String normalName = "a" + generateBlobName() + String copyName = "c" + generateBlobName() + String metadataName = "m" + generateBlobName() + String uncommittedName = "u" + generateBlobName() + setupListBlobsTest(normalName, copyName, metadataName, uncommittedName) + + when: + List blobs = cu.listBlobsFlatSegment(null, options, null).blockingGet().body().segment().blobItems() + + then: + blobs.get(0).name() == normalName + blobs.get(1).name() == copyName + blobs.get(1).properties().copyId() != null + // Comparing the urls isn't reliable because the service may use https. + blobs.get(1).properties().copySource().contains(normalName) + blobs.get(1).properties().copyStatus() == CopyStatusType.SUCCESS // We waited for the copy to complete. + blobs.get(1).properties().copyProgress() != null + blobs.get(1).properties().copyCompletionTime() != null + blobs.size() == 3 // Normal, copy, metadata + } + + def "List blobs flat options metadata"() { + setup: + ListBlobsOptions options = new ListBlobsOptions().withDetails(new BlobListingDetails().withMetadata(true)) + String normalName = "a" + generateBlobName() + String copyName = "c" + generateBlobName() + String metadataName = "m" + generateBlobName() + String uncommittedName = "u" + generateBlobName() + setupListBlobsTest(normalName, copyName, metadataName, uncommittedName) + + when: + List blobs = cu.listBlobsFlatSegment(null, options, null).blockingGet().body().segment().blobItems() + + then: + blobs.get(0).name() == normalName + blobs.get(1).name() == copyName + blobs.get(1).properties().copyCompletionTime() == null + blobs.get(2).name() == metadataName + blobs.get(2).metadata().get("foo") == "bar" + blobs.size() == 3 // Normal, copy, metadata + } + + def "List blobs flat options snapshots"() { + setup: + ListBlobsOptions options = new ListBlobsOptions().withDetails(new BlobListingDetails().withSnapshots(true)) + String normalName = "a" + generateBlobName() + String copyName = "c" + generateBlobName() + String metadataName = "m" + generateBlobName() + String uncommittedName = "u" + generateBlobName() + String snapshotTime = setupListBlobsTest(normalName, copyName, metadataName, uncommittedName) + + when: + List blobs = cu.listBlobsFlatSegment(null, options, null).blockingGet().body().segment().blobItems() + + then: + blobs.get(0).name() == normalName + blobs.get(0).snapshot() == snapshotTime + blobs.get(1).name() == normalName + blobs.size() == 4 // Normal, snapshot, copy, metadata + } + + def "List blobs flat options uncommitted"() { + setup: + ListBlobsOptions options = new ListBlobsOptions().withDetails(new BlobListingDetails() + .withUncommittedBlobs(true)) + String normalName = "a" + generateBlobName() + String copyName = "c" + generateBlobName() + String metadataName = "m" + generateBlobName() + String uncommittedName = "u" + generateBlobName() + setupListBlobsTest(normalName, copyName, metadataName, uncommittedName) + + when: + List blobs = cu.listBlobsFlatSegment(null, options, null).blockingGet().body().segment().blobItems() + + then: + blobs.get(0).name() == normalName + blobs.get(3).name() == uncommittedName + blobs.size() == 4 // Normal, copy, metadata, uncommitted + } + + def "List blobs flat options deleted"() { + setup: + enableSoftDelete() + String name = generateBlobName() + AppendBlobURL bu = cu.createAppendBlobURL(name) + bu.create(null, null, null, null).blockingGet() + bu.delete(null, null, null).blockingGet() + + when: + List blobs = cu.listBlobsFlatSegment(null, new ListBlobsOptions().withDetails(new BlobListingDetails() + .withDeletedBlobs(true)), null).blockingGet().body().segment().blobItems() + + then: + blobs.get(0).name() == name + blobs.size() == 1 + + disableSoftDelete() == null // Must produce a true value or test will fail. + } + + def "List blobs flat options prefix"() { + setup: + ListBlobsOptions options = new ListBlobsOptions().withPrefix("a") + String normalName = "a" + generateBlobName() + String copyName = "c" + generateBlobName() + String metadataName = "m" + generateBlobName() + String uncommittedName = "u" + generateBlobName() + setupListBlobsTest(normalName, copyName, metadataName, uncommittedName) + + when: + List blobs = cu.listBlobsFlatSegment(null, options, null).blockingGet().body().segment().blobItems() + + then: + blobs.get(0).name() == normalName + blobs.size() == 1 // Normal + } + + def "List blobs flat options maxResults"() { + setup: + ListBlobsOptions options = new ListBlobsOptions().withDetails(new BlobListingDetails().withCopy(true) + .withSnapshots(true).withUncommittedBlobs(true)).withMaxResults(2) + String normalName = "a" + generateBlobName() + String copyName = "c" + generateBlobName() + String metadataName = "m" + generateBlobName() + String uncommittedName = "u" + generateBlobName() + setupListBlobsTest(normalName, copyName, metadataName, uncommittedName) + + when: + List blobs = cu.listBlobsFlatSegment(null, options, null).blockingGet().body().segment().blobItems() + + then: + blobs.size() == 2 + } + + def "List blobs flat options fail"() { + when: + new ListBlobsOptions().withMaxResults(0) + + then: + thrown(IllegalArgumentException) + } + + def "List blobs flat marker"() { + setup: + for (int i = 0; i < 10; i++) { + PageBlobURL bu = cu.createPageBlobURL(generateBlobName()) + bu.create(512, null, null, null, null, null).blockingGet() + } + + ContainerListBlobFlatSegmentResponse response = cu.listBlobsFlatSegment(null, + new ListBlobsOptions().withMaxResults(6), null) + .blockingGet() + String marker = response.body().nextMarker() + int firstSegmentSize = response.body().segment().blobItems().size() + response = cu.listBlobsFlatSegment(marker, null, null).blockingGet() + + expect: + firstSegmentSize == 6 + response.body().nextMarker() == null + response.body().segment().blobItems().size() == 4 + } + + def "List blobs flat error"() { + setup: + cu = primaryServiceURL.createContainerURL(generateContainerName()) + + when: + cu.listBlobsFlatSegment(null, null, null).blockingGet() + + then: + thrown(StorageException) + } + + def "List blobs flat context"() { + setup: + def pipeline = + HttpPipeline.build(getStubFactory(getContextStubPolicy(200, ContainerListBlobFlatSegmentHeaders))) + + cu = cu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + cu.listBlobsFlatSegment(null, null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + def "List blobs hierarchy"() { + setup: + String name = generateBlobName() + PageBlobURL bu = cu.createPageBlobURL(name) + bu.create(512, null, null, null, null, null).blockingGet() + + when: + ContainerListBlobHierarchySegmentResponse response = + cu.listBlobsHierarchySegment(null, "/", null, null) + .blockingGet() + ContainerListBlobHierarchySegmentHeaders headers = response.headers() + List blobs = response.body().segment().blobItems() + + then: + response.statusCode() == 200 + headers.contentType() != null + headers.requestId() != null + headers.version() != null + headers.date() != null + blobs.size() == 1 + blobs.get(0).name() == name + } + + def "List blobs hierarchy min"() { + expect: + cu.listBlobsHierarchySegment(null, "/", null).blockingGet().statusCode() == 200 + } + + def "List blobs hier options copy"() { + setup: + ListBlobsOptions options = new ListBlobsOptions().withDetails(new BlobListingDetails().withCopy(true)) + String normalName = "a" + generateBlobName() + String copyName = "c" + generateBlobName() + String metadataName = "m" + generateBlobName() + String uncommittedName = "u" + generateBlobName() + setupListBlobsTest(normalName, copyName, metadataName, uncommittedName) + + when: + List blobs = cu.listBlobsHierarchySegment(null, "", options, null) + .blockingGet().body().segment().blobItems() + + then: + blobs.get(0).name() == normalName + blobs.get(1).name() == copyName + blobs.get(1).properties().copyId() != null + // Comparing the urls isn't reliable because the service may use https. + blobs.get(1).properties().copySource().contains(normalName) + blobs.get(1).properties().copyStatus() == CopyStatusType.SUCCESS // We waited for the copy to complete. + blobs.get(1).properties().copyProgress() != null + blobs.get(1).properties().copyCompletionTime() != null + blobs.size() == 3 // Normal, copy, metadata + } + + def "List blobs hier options metadata"() { + setup: + ListBlobsOptions options = new ListBlobsOptions().withDetails(new BlobListingDetails().withMetadata(true)) + String normalName = "a" + generateBlobName() + String copyName = "c" + generateBlobName() + String metadataName = "m" + generateBlobName() + String uncommittedName = "u" + generateBlobName() + setupListBlobsTest(normalName, copyName, metadataName, uncommittedName) + + when: + List blobs = cu.listBlobsHierarchySegment(null, "", options, null) + .blockingGet().body().segment().blobItems() + + then: + blobs.get(0).name() == normalName + blobs.get(1).name() == copyName + blobs.get(1).properties().copyCompletionTime() == null + blobs.get(2).name() == metadataName + blobs.get(2).metadata().get("foo") == "bar" + blobs.size() == 3 // Normal, copy, metadata + } + + def "List blobs hier options uncommitted"() { + setup: + ListBlobsOptions options = new ListBlobsOptions().withDetails(new BlobListingDetails() + .withUncommittedBlobs(true)) + String normalName = "a" + generateBlobName() + String copyName = "c" + generateBlobName() + String metadataName = "m" + generateBlobName() + String uncommittedName = "u" + generateBlobName() + setupListBlobsTest(normalName, copyName, metadataName, uncommittedName) + + when: + List blobs = cu.listBlobsHierarchySegment(null, "", options, null) + .blockingGet().body().segment().blobItems() + + then: + blobs.get(0).name() == normalName + blobs.get(3).name() == uncommittedName + blobs.size() == 4 // Normal, copy, metadata, uncommitted + } + + def "List blobs hier options deleted"() { + setup: + enableSoftDelete() + String name = generateBlobName() + AppendBlobURL bu = cu.createAppendBlobURL(name) + bu.create(null, null, null, null).blockingGet() + bu.delete(null, null, null).blockingGet() + + when: + List blobs = cu.listBlobsHierarchySegment(null, "", + new ListBlobsOptions().withDetails(new BlobListingDetails().withDeletedBlobs(true)), null).blockingGet() + .body().segment().blobItems() + + then: + blobs.get(0).name() == name + blobs.size() == 1 + + disableSoftDelete() == null + } + + def "List blobs hier options prefix"() { + setup: + ListBlobsOptions options = new ListBlobsOptions().withPrefix("a") + String normalName = "a" + generateBlobName() + String copyName = "c" + generateBlobName() + String metadataName = "m" + generateBlobName() + String uncommittedName = "u" + generateBlobName() + setupListBlobsTest(normalName, copyName, metadataName, uncommittedName) + + when: + List blobs = cu.listBlobsHierarchySegment(null, "", options, null) + .blockingGet().body().segment().blobItems() + + then: + blobs.get(0).name() == normalName + blobs.size() == 1 // Normal + } + + def "List blobs hier options maxResults"() { + setup: + ListBlobsOptions options = new ListBlobsOptions().withDetails(new BlobListingDetails().withCopy(true) + .withUncommittedBlobs(true)).withMaxResults(1) + String normalName = "a" + generateBlobName() + String copyName = "c" + generateBlobName() + String metadataName = "m" + generateBlobName() + String uncommittedName = "u" + generateBlobName() + setupListBlobsTest(normalName, copyName, metadataName, uncommittedName) + + when: + List blobs = cu.listBlobsHierarchySegment(null, "", options, null) + .blockingGet().body().segment().blobItems() + + then: + blobs.size() == 1 + } + + @Unroll + def "List blobs hier options fail"() { + when: + def options = new ListBlobsOptions().withDetails(new BlobListingDetails().withSnapshots(snapshots)) + .withMaxResults(maxResults) + cu.listBlobsHierarchySegment(null, null, options, null) + + then: + def e = thrown(Exception) + exceptionType.isInstance(e) + + where: + snapshots | maxResults | exceptionType + true | 5 | UnsupportedOperationException + false | 0 | IllegalArgumentException + } + + def "List blobs hier delim"() { + setup: + def blobNames = Arrays.asList("a", "b/a", "c", "d/a", "e", "f", "g/a") + for (String blobName : blobNames) { + def bu = cu.createAppendBlobURL(blobName) + bu.create().blockingGet() + } + + when: + ContainerListBlobHierarchySegmentResponse response = + cu.listBlobsHierarchySegment(null, "/", null, null).blockingGet() + + and: + def expectedBlobs = Arrays.asList("a", "c", "e", "f") + def expectedPrefixes = Arrays.asList("b/", "d/", "g/") + + then: + response.body().segment().blobItems().size() == 4 + for (int i=0; i blobs = cu.listBlobsFlatSegment(null, null, null).blockingGet() + .body().segment().blobItems() + + then: + blobs.get(0).name() == name + blobs.get(1).name() == name + "2" + blobs.get(2).name() == name + "3" + + where: + name | _ + "中文" | _ + "az[]" | _ + "hello world" | _ + "hello/world" | _ + "hello&world" | _ + "!*'();:@&=+\$,/?#[]" | _ + } + + def "Root explicit"() { + setup: + cu = primaryServiceURL.createContainerURL(ContainerURL.ROOT_CONTAINER_NAME) + // Create root container if not exist. + try { + cu.create(null, null, null).blockingGet() + } + catch (StorageException se) { + if (se.errorCode() != StorageErrorCode.CONTAINER_ALREADY_EXISTS) { + throw se + } + } + BlobURL bu = cu.createAppendBlobURL("rootblob") + + expect: + bu.create(null, null, null, null).blockingGet().statusCode() == 201 + } + + def "Root implicit"() { + setup: + cu = primaryServiceURL.createContainerURL(ContainerURL.ROOT_CONTAINER_NAME) + // Create root container if not exist. + try { + cu.create(null, null, null).blockingGet() + } + catch (StorageException se) { + if (se.errorCode() != StorageErrorCode.CONTAINER_ALREADY_EXISTS) { + throw se + } + } + PipelineOptions po = new PipelineOptions() + po.withClient(getHttpClient()) + HttpPipeline pipeline = StorageURL.createPipeline(primaryCreds, po) + AppendBlobURL bu = new AppendBlobURL(new URL("http://" + primaryCreds.getAccountName() + ".blob.core.windows.net/rootblob"), + pipeline) + + when: + AppendBlobCreateResponse createResponse = bu.create(null, null, null, null) + .blockingGet() + BlobGetPropertiesResponse propsResponse = bu.getProperties(null, null).blockingGet() + + then: + createResponse.statusCode() == 201 + propsResponse.statusCode() == 200 + propsResponse.headers().blobType() == BlobType.APPEND_BLOB + } + + def "Web container"() { + setup: + cu = primaryServiceURL.createContainerURL(ContainerURL.STATIC_WEBSITE_CONTAINER_NAME) + // Create root container if not exist. + try { + cu.create(null, null, null).blockingGet() + } + catch (StorageException se) { + if (se.errorCode() != StorageErrorCode.CONTAINER_ALREADY_EXISTS) { + throw se + } + } + def webContainer = primaryServiceURL.createContainerURL(ContainerURL.STATIC_WEBSITE_CONTAINER_NAME) + + when: + // Validate some basic operation. + webContainer.setAccessPolicy(null, null, null, null).blockingGet() + + then: + notThrown(StorageException) + } + + def "With pipeline"() { + setup: + ContainerURL withPipeline = cu.withPipeline(HttpPipeline.build(new RequestPolicyFactory() { + @Override + RequestPolicy create(RequestPolicy requestPolicy, RequestPolicyOptions requestPolicyOptions) { + return new RequestPolicy() { + @Override + Single sendAsync(HttpRequest httpRequest) { + return Single.error(new Exception("Expected error")) + } + } + } + })) + + when: + withPipeline.create(null, null, null).blockingGet() + + then: + def e = thrown(Exception) + e.getMessage().contains("Expected error") + } + + def "Get account info"() { + when: + def response = primaryServiceURL.getAccountInfo(null).blockingGet() + + then: + response.headers().date() != null + response.headers().version() != null + response.headers().requestId() != null + response.headers().accountKind() != null + response.headers().skuName() != null + } + + def "Get account info min"() { + expect: + primaryServiceURL.getAccountInfo().blockingGet().statusCode() == 200 + } + + def "Get account info error"() { + when: + ServiceURL serviceURL = new ServiceURL(primaryServiceURL.toURL(), + StorageURL.createPipeline(new AnonymousCredentials(), new PipelineOptions())) + serviceURL.createContainerURL(generateContainerName()).getAccountInfo(null).blockingGet() + + then: + thrown(StorageException) + } + + def "Get account info context"() { + setup: + def pipeline = + HttpPipeline.build(getStubFactory(getContextStubPolicy(200, ContainerGetAccountInfoHeaders))) + + cu = cu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + cu.getAccountInfo(defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } +} diff --git a/storage/data-plane/src/test/java/com/microsoft/azure/storage/PageBlobAPITest.groovy b/storage/data-plane/src/test/java/com/microsoft/azure/storage/PageBlobAPITest.groovy new file mode 100644 index 0000000000000..62dd21067f93f --- /dev/null +++ b/storage/data-plane/src/test/java/com/microsoft/azure/storage/PageBlobAPITest.groovy @@ -0,0 +1,950 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.microsoft.azure.storage + +import com.microsoft.azure.storage.blob.* +import com.microsoft.azure.storage.blob.models.* +import com.microsoft.rest.v2.http.HttpPipeline +import com.microsoft.rest.v2.http.UnexpectedLengthException +import io.reactivex.Flowable +import spock.lang.Unroll + +import java.security.MessageDigest + +class PageBlobAPITest extends APISpec { + PageBlobURL bu + + def setup() { + bu = cu.createPageBlobURL(generateBlobName()) + bu.create(PageBlobURL.PAGE_BYTES, null, null, null, null, null).blockingGet() + } + + def "Create all null"() { + setup: + bu = cu.createPageBlobURL(generateBlobName()) + + when: + PageBlobCreateResponse response = + bu.create(PageBlobURL.PAGE_BYTES, null, null, null, + null, null).blockingGet() + + then: + response.statusCode() == 201 + validateBasicHeaders(response.headers()) + response.headers().contentMD5() == null + response.headers().isServerEncrypted() + } + + def "Create min"() { + expect: + bu.create(PageBlobURL.PAGE_BYTES).blockingGet().statusCode() == 201 + } + + def "Create sequence number"() { + when: + bu.create(PageBlobURL.PAGE_BYTES, 2, null, null, + null, null).blockingGet() + + then: + bu.getProperties(null, null).blockingGet().headers().blobSequenceNumber() == 2 + } + + @Unroll + def "Create headers"() { + setup: + BlobHTTPHeaders headers = new BlobHTTPHeaders().withBlobCacheControl(cacheControl) + .withBlobContentDisposition(contentDisposition) + .withBlobContentEncoding(contentEncoding) + .withBlobContentLanguage(contentLanguage) + .withBlobContentMD5(contentMD5) + .withBlobContentType(contentType) + + when: + bu.create(PageBlobURL.PAGE_BYTES, null, headers, null, null, null) + .blockingGet() + BlobGetPropertiesResponse response = bu.getProperties(null, null).blockingGet() + + then: + validateBlobHeaders(response.headers(), cacheControl, contentDisposition, contentEncoding, contentLanguage, + contentMD5, contentType == null ? "application/octet-stream" : contentType) + // HTTP default content type is application/octet-stream + + where: + cacheControl | contentDisposition | contentEncoding | contentLanguage | contentMD5 | contentType + null | null | null | null | null | null + "control" | "disposition" | "encoding" | "language" | Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(defaultData.array())) | "type" + } + + @Unroll + def "Create metadata"() { + setup: + Metadata metadata = new Metadata() + if (key1 != null) { + metadata.put(key1, value1) + } + if (key2 != null) { + metadata.put(key2, value2) + } + + when: + bu.create(PageBlobURL.PAGE_BYTES, null, null, metadata, null, null) + .blockingGet() + BlobGetPropertiesResponse response = bu.getProperties(null, null).blockingGet() + + then: + response.statusCode() == 200 + response.headers().metadata() == metadata + + where: + key1 | value1 | key2 | value2 + null | null | null | null + "foo" | "bar" | "fizz" | "buzz" + } + + @Unroll + def "Create AC"() { + setup: + match = setupBlobMatchCondition(bu, match) + leaseID = setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + + expect: + bu.create(PageBlobURL.PAGE_BYTES, null, null, null, bac, null).blockingGet() + .statusCode() == 201 + + where: + modified | unmodified | match | noneMatch | leaseID + null | null | null | null | null + oldDate | null | null | null | null + null | newDate | null | null | null + null | null | receivedEtag | null | null + null | null | null | garbageEtag | null + null | null | null | null | receivedLeaseID + } + + @Unroll + def "Create AC fail"() { + + where: + modified | unmodified | match | noneMatch | leaseID + newDate | null | null | null | null + null | oldDate | null | null | null + null | null | garbageEtag | null | null + null | null | null | receivedEtag | null + null | null | null | null | garbageLeaseID + } + + def "Create error"() { + when: + bu.create(PageBlobURL.PAGE_BYTES, null, null, null, new BlobAccessConditions().withLeaseAccessConditions( + new LeaseAccessConditions().withLeaseId("id")), null).blockingGet() + + + then: + thrown(StorageException) + } + + def "Create context"() { + setup: + def pipeline = + HttpPipeline.build(getStubFactory(getContextStubPolicy(201, PageBlobCreateHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.create(512, null, null, null, null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + def "Upload page"() { + when: + PageBlobUploadPagesResponse response = bu.uploadPages( + new PageRange().withStart(0).withEnd(PageBlobURL.PAGE_BYTES - 1), + Flowable.just(getRandomData(PageBlobURL.PAGE_BYTES)), null, null).blockingGet() + PageBlobUploadPagesHeaders headers = response.headers() + + then: + response.statusCode() == 201 + validateBasicHeaders(headers) + headers.contentMD5() != null + headers.blobSequenceNumber() == 0 + headers.isServerEncrypted() + } + + def "Upload page min"() { + expect: + bu.uploadPages(new PageRange().withStart(0).withEnd(PageBlobURL.PAGE_BYTES - 1), + Flowable.just(getRandomData(PageBlobURL.PAGE_BYTES))) + } + + @Unroll + def "Upload page IA"() { + when: + bu.uploadPages(new PageRange().withStart(0).withEnd(PageBlobURL.PAGE_BYTES * 2 - 1), data, + null, null).blockingGet() + + then: + def e = thrown(Exception) + exceptionType.isInstance(e) + + where: + data | exceptionType + null | IllegalArgumentException + Flowable.just(getRandomData(PageBlobURL.PAGE_BYTES)) | UnexpectedLengthException + Flowable.just(getRandomData(PageBlobURL.PAGE_BYTES * 3)) | UnexpectedLengthException + } + + @Unroll + def "Upload page AC"() { + setup: + match = setupBlobMatchCondition(bu, match) + leaseID = setupBlobLeaseCondition(bu, leaseID) + PageBlobAccessConditions pac = new PageBlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + .withSequenceNumberAccessConditions(new SequenceNumberAccessConditions() + .withIfSequenceNumberLessThan(sequenceNumberLT).withIfSequenceNumberLessThanOrEqualTo(sequenceNumberLTE) + .withIfSequenceNumberEqualTo(sequenceNumberEqual)) + + expect: + bu.uploadPages(new PageRange().withStart(0).withEnd(PageBlobURL.PAGE_BYTES - 1), + Flowable.just(getRandomData(PageBlobURL.PAGE_BYTES)), pac, null).blockingGet().statusCode() == 201 + + where: + modified | unmodified | match | noneMatch | leaseID | sequenceNumberLT | sequenceNumberLTE | sequenceNumberEqual + null | null | null | null | null | null | null | null + oldDate | null | null | null | null | null | null | null + null | newDate | null | null | null | null | null | null + null | null | receivedEtag | null | null | null | null | null + null | null | null | garbageEtag | null | null | null | null + null | null | null | null | receivedLeaseID | null | null | null + null | null | null | null | null | 5 | null | null + null | null | null | null | null | null | 3 | null + null | null | null | null | null | null | null | 0 + } + + @Unroll + def "Upload page AC fail"() { + setup: + noneMatch = setupBlobMatchCondition(bu, noneMatch) + setupBlobLeaseCondition(bu, leaseID) + PageBlobAccessConditions pac = new PageBlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + .withSequenceNumberAccessConditions(new SequenceNumberAccessConditions() + .withIfSequenceNumberLessThan(sequenceNumberLT).withIfSequenceNumberLessThanOrEqualTo(sequenceNumberLTE) + .withIfSequenceNumberEqualTo(sequenceNumberEqual)) + + when: + bu.uploadPages(new PageRange().withStart(0).withEnd(PageBlobURL.PAGE_BYTES - 1), + Flowable.just(getRandomData(PageBlobURL.PAGE_BYTES)), pac, null).blockingGet() + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch | leaseID | sequenceNumberLT | sequenceNumberLTE | sequenceNumberEqual + newDate | null | null | null | null | null | null | null + null | oldDate | null | null | null | null | null | null + null | null | garbageEtag | null | null | null | null | null + null | null | null | receivedEtag | null | null | null | null + null | null | null | null | garbageLeaseID | null | null | null + null | null | null | null | null | -1 | null | null + null | null | null | null | null | null | -1 | null + null | null | null | null | null | null | null | 100 + } + + def "Upload page error"() { + setup: + bu = cu.createPageBlobURL(generateBlobName()) + + when: + bu.uploadPages(new PageRange().withStart(0).withEnd(PageBlobURL.PAGE_BYTES - 1), + Flowable.just(getRandomData(PageBlobURL.PAGE_BYTES)), + new PageBlobAccessConditions().withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId("id")), + null).blockingGet() + + then: + thrown(StorageException) + } + + def "Upload page context"() { + setup: + def pipeline = + HttpPipeline.build(getStubFactory(getContextStubPolicy(201, PageBlobUploadPagesHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.uploadPages(new PageRange().withStart(0).withEnd(511), defaultFlowable, null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + def "Clear page"() { + setup: + bu.uploadPages(new PageRange().withStart(0).withEnd(PageBlobURL.PAGE_BYTES - 1), + Flowable.just(getRandomData(PageBlobURL.PAGE_BYTES)), null, null).blockingGet() + + when: + PageBlobClearPagesHeaders headers = + bu.clearPages(new PageRange().withStart(0).withEnd(PageBlobURL.PAGE_BYTES - 1), + null, null) + .blockingGet().headers() + + then: + bu.getPageRanges(null, null, null).blockingGet().body().pageRange().size() == 0 + validateBasicHeaders(headers) + headers.contentMD5() == null + headers.blobSequenceNumber() == 0 + } + + def "Clear page min"() { + expect: + bu.clearPages(new PageRange().withStart(0).withEnd(PageBlobURL.PAGE_BYTES - 1)) + } + + @Unroll + def "Clear pages AC"() { + setup: + bu.uploadPages(new PageRange().withStart(0).withEnd(PageBlobURL.PAGE_BYTES - 1), + Flowable.just(getRandomData(PageBlobURL.PAGE_BYTES)), null, null).blockingGet() + match = setupBlobMatchCondition(bu, match) + leaseID = setupBlobLeaseCondition(bu, leaseID) + PageBlobAccessConditions pac = new PageBlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + .withSequenceNumberAccessConditions(new SequenceNumberAccessConditions() + .withIfSequenceNumberLessThan(sequenceNumberLT).withIfSequenceNumberLessThanOrEqualTo(sequenceNumberLTE) + .withIfSequenceNumberEqualTo(sequenceNumberEqual)) + + expect: + bu.clearPages(new PageRange().withStart(0).withEnd(PageBlobURL.PAGE_BYTES - 1), pac, null).blockingGet() + .statusCode() == 201 + + where: + modified | unmodified | match | noneMatch | leaseID | sequenceNumberLT | sequenceNumberLTE | sequenceNumberEqual + null | null | null | null | null | null | null | null + oldDate | null | null | null | null | null | null | null + null | newDate | null | null | null | null | null | null + null | null | receivedEtag | null | null | null | null | null + null | null | null | garbageEtag | null | null | null | null + null | null | null | null | receivedLeaseID | null | null | null + null | null | null | null | null | 5 | null | null + null | null | null | null | null | null | 3 | null + null | null | null | null | null | null | null | 0 + } + + @Unroll + def "Clear pages AC fail"() { + setup: + bu.uploadPages(new PageRange().withStart(0).withEnd(PageBlobURL.PAGE_BYTES - 1), + Flowable.just(getRandomData(PageBlobURL.PAGE_BYTES)), null, null).blockingGet() + noneMatch = setupBlobMatchCondition(bu, noneMatch) + setupBlobLeaseCondition(bu, leaseID) + PageBlobAccessConditions pac = new PageBlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + .withSequenceNumberAccessConditions(new SequenceNumberAccessConditions() + .withIfSequenceNumberLessThan(sequenceNumberLT).withIfSequenceNumberLessThanOrEqualTo(sequenceNumberLTE) + .withIfSequenceNumberEqualTo(sequenceNumberEqual)) + + + when: + bu.clearPages(new PageRange().withStart(0).withEnd(PageBlobURL.PAGE_BYTES - 1), pac, null).blockingGet() + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch | leaseID | sequenceNumberLT | sequenceNumberLTE | sequenceNumberEqual + newDate | null | null | null | null | null | null | null + null | oldDate | null | null | null | null | null | null + null | null | garbageEtag | null | null | null | null | null + null | null | null | receivedEtag | null | null | null | null + null | null | null | null | garbageLeaseID | null | null | null + null | null | null | null | null | -1 | null | null + null | null | null | null | null | null | -1 | null + null | null | null | null | null | null | null | 100 + } + + def "Clear page error"() { + setup: + bu = cu.createPageBlobURL(generateBlobName()) + + when: + bu.clearPages(new PageRange().withStart(0).withEnd(PageBlobURL.PAGE_BYTES - 1), null, null) + .blockingGet() + + then: + thrown(StorageException) + } + + def "Clear page context"() { + setup: + def pipeline = + HttpPipeline.build(getStubFactory(getContextStubPolicy(201, PageBlobClearPagesHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.clearPages(new PageRange().withStart(0).withEnd(511), null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + def "Get page ranges"() { + setup: + bu.uploadPages(new PageRange().withStart(0).withEnd(PageBlobURL.PAGE_BYTES - 1), + Flowable.just(getRandomData(PageBlobURL.PAGE_BYTES)), null, null).blockingGet() + + when: + PageBlobGetPageRangesResponse response = + bu.getPageRanges(new BlobRange().withCount(PageBlobURL.PAGE_BYTES), null, null).blockingGet() + PageBlobGetPageRangesHeaders headers = response.headers() + + then: + response.statusCode() == 200 + response.body().pageRange().size() == 1 + validateBasicHeaders(headers) + headers.blobContentLength() == (long) PageBlobURL.PAGE_BYTES + } + + def "Get page ranges min"() { + expect: + bu.getPageRanges(null).blockingGet().statusCode() == 200 + } + + @Unroll + def "Get page ranges AC"() { + setup: + match = setupBlobMatchCondition(bu, match) + leaseID = setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + expect: + bu.getPageRanges(new BlobRange().withCount(PageBlobURL.PAGE_BYTES), bac, null).blockingGet().statusCode() == 200 + + where: + modified | unmodified | match | noneMatch | leaseID + null | null | null | null | null + oldDate | null | null | null | null + null | newDate | null | null | null + null | null | receivedEtag | null | null + null | null | null | garbageEtag | null + null | null | null | null | receivedLeaseID + } + + @Unroll + def "Get page ranges AC fail"() { + setup: + noneMatch = setupBlobMatchCondition(bu, noneMatch) + setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + when: + bu.getPageRanges(new BlobRange().withCount(PageBlobURL.PAGE_BYTES), bac, null).blockingGet().statusCode() + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch | leaseID + newDate | null | null | null | null + null | oldDate | null | null | null + null | null | garbageEtag | null | null + null | null | null | receivedEtag | null + null | null | null | null | garbageLeaseID + } + + def "Get page ranges error"() { + setup: + bu = cu.createPageBlobURL(generateBlobName()) + + when: + bu.getPageRanges(null, null, null).blockingGet() + + then: + thrown(StorageException) + } + + def "Get page ranges context"() { + setup: + def pipeline = + HttpPipeline.build(getStubFactory(getContextStubPolicy(200, PageBlobGetPageRangesHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.getPageRanges(null, null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + def "Get page ranges diff"() { + setup: + bu.create(PageBlobURL.PAGE_BYTES * 2, null, null, null, null, null) + .blockingGet() + bu.uploadPages(new PageRange().withStart(PageBlobURL.PAGE_BYTES).withEnd(PageBlobURL.PAGE_BYTES * 2 - 1), + Flowable.just(getRandomData(PageBlobURL.PAGE_BYTES)), + null, null).blockingGet() + String snapshot = bu.createSnapshot(null, null, null).blockingGet().headers().snapshot() + bu.uploadPages(new PageRange().withStart(0).withEnd(PageBlobURL.PAGE_BYTES - 1), + Flowable.just(getRandomData(PageBlobURL.PAGE_BYTES)), null, null).blockingGet() + bu.clearPages(new PageRange().withStart(PageBlobURL.PAGE_BYTES).withEnd(PageBlobURL.PAGE_BYTES * 2 - 1), + null, null).blockingGet() + + when: + PageBlobGetPageRangesDiffResponse response = + bu.getPageRangesDiff(new BlobRange().withCount(PageBlobURL.PAGE_BYTES * 2), snapshot, + null, null).blockingGet() + PageBlobGetPageRangesDiffHeaders headers = response.headers() + + then: + response.body().pageRange().size() == 1 + response.body().pageRange().get(0).start() == 0 + response.body().pageRange().get(0).end() == PageBlobURL.PAGE_BYTES - 1 + response.body().clearRange().size() == 1 + response.body().clearRange().get(0).start() == PageBlobURL.PAGE_BYTES + response.body().clearRange().get(0).end() == PageBlobURL.PAGE_BYTES * 2 - 1 + validateBasicHeaders(headers) + headers.blobContentLength() == PageBlobURL.PAGE_BYTES * 2 + } + + def "Get page ranges diff min"() { + setup: + def snapshot = bu.createSnapshot().blockingGet().headers().snapshot() + + expect: + bu.getPageRangesDiff(null, snapshot).blockingGet().statusCode() == 200 + } + + @Unroll + def "Get page ranges diff AC"() { + setup: + String snapshot = bu.createSnapshot(null, null, null).blockingGet().headers().snapshot() + match = setupBlobMatchCondition(bu, match) + leaseID = setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + expect: + bu.getPageRangesDiff(new BlobRange().withCount(PageBlobURL.PAGE_BYTES), snapshot, bac, null) + .blockingGet().statusCode() == 200 + + where: + modified | unmodified | match | noneMatch | leaseID + null | null | null | null | null + oldDate | null | null | null | null + null | newDate | null | null | null + null | null | receivedEtag | null | null + null | null | null | garbageEtag | null + null | null | null | null | receivedLeaseID + } + + @Unroll + def "Get page ranges diff AC fail"() { + setup: + String snapshot = bu.createSnapshot(null, null, null).blockingGet().headers().snapshot() + noneMatch = setupBlobMatchCondition(bu, noneMatch) + setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + when: + bu.getPageRangesDiff(new BlobRange().withCount(PageBlobURL.PAGE_BYTES), snapshot, bac, null).blockingGet() + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch | leaseID + newDate | null | null | null | null + null | oldDate | null | null | null + null | null | garbageEtag | null | null + null | null | null | receivedEtag | null + null | null | null | null | garbageLeaseID + } + + def "Get page ranges diff error"() { + setup: + bu = cu.createPageBlobURL(generateBlobName()) + + when: + bu.getPageRangesDiff(null, "snapshot", null, null).blockingGet() + + then: + thrown(StorageException) + } + + def "Get page ranges diff context"() { + setup: + def pipeline = + HttpPipeline.build(getStubFactory(getContextStubPolicy(200, PageBlobGetPageRangesDiffHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.getPageRangesDiff(null, "snapshot", null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + // Test the serialization of PageRange with illegal bounds + @Unroll + def "PageRange IA"() { + setup: + def range = new PageRange().withStart(start).withEnd(end) + + when: + bu.clearPages(range, null, null) + + then: + thrown(IllegalArgumentException) + + where: + start | end + 1 | 1 + -PageBlobURL.PAGE_BYTES | PageBlobURL.PAGE_BYTES - 1 + 0 | 0 + 1 | PageBlobURL.PAGE_BYTES - 1 + 0 | PageBlobURL.PAGE_BYTES + PageBlobURL.PAGE_BYTES * 2 | PageBlobURL.PAGE_BYTES - 1 + } + + def "Resize"() { + setup: + PageBlobResizeHeaders headers = bu.resize(PageBlobURL.PAGE_BYTES * 2, null, null).blockingGet() + .headers() + + expect: + bu.getProperties(null, null).blockingGet().headers().contentLength() == PageBlobURL.PAGE_BYTES * 2 + validateBasicHeaders(headers) + headers.blobSequenceNumber() != null + } + + def "Resize min"() { + expect: + bu.resize(PageBlobURL.PAGE_BYTES).blockingGet().statusCode() == 200 + } + + @Unroll + def "Resize AC"() { + setup: + match = setupBlobMatchCondition(bu, match) + leaseID = setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + expect: + bu.resize(PageBlobURL.PAGE_BYTES * 2, bac, null).blockingGet().statusCode() == 200 + + where: + modified | unmodified | match | noneMatch | leaseID + null | null | null | null | null + oldDate | null | null | null | null + null | newDate | null | null | null + null | null | receivedEtag | null | null + null | null | null | garbageEtag | null + null | null | null | null | receivedLeaseID + } + + @Unroll + def "Resize AC fail"() { + setup: + noneMatch = setupBlobMatchCondition(bu, noneMatch) + setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + when: + bu.resize(PageBlobURL.PAGE_BYTES * 2, bac, null).blockingGet() + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch | leaseID + newDate | null | null | null | null + null | oldDate | null | null | null + null | null | garbageEtag | null | null + null | null | null | receivedEtag | null + null | null | null | null | garbageLeaseID + } + + def "Resize error"() { + setup: + bu = cu.createPageBlobURL(generateBlobName()) + + when: + bu.resize(0, null, null).blockingGet() + + then: + thrown(StorageException) + } + + def "Resize context"() { + setup: + def pipeline = + HttpPipeline.build(getStubFactory(getContextStubPolicy(200, PageBlobResizeHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.resize(512, null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + @Unroll + def "Sequence number"() { + setup: + PageBlobUpdateSequenceNumberHeaders headers = + bu.updateSequenceNumber(action, number, null, null) + .blockingGet().headers() + + expect: + bu.getProperties(null, null).blockingGet().headers().blobSequenceNumber() == result + validateBasicHeaders(headers) + headers.blobSequenceNumber() == result + + where: + action | number || result + SequenceNumberActionType.UPDATE | 5 || 5 + SequenceNumberActionType.INCREMENT | null || 1 + SequenceNumberActionType.MAX | 2 || 2 + } + + def "Sequence number min"() { + expect: + bu.updateSequenceNumber(SequenceNumberActionType.INCREMENT, null).blockingGet().statusCode() == 200 + } + + @Unroll + def "Sequence number AC"() { + setup: + match = setupBlobMatchCondition(bu, match) + leaseID = setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + expect: + bu.updateSequenceNumber(SequenceNumberActionType.UPDATE, 1, bac, null).blockingGet() + .statusCode() == 200 + + where: + modified | unmodified | match | noneMatch | leaseID + null | null | null | null | null + oldDate | null | null | null | null + null | newDate | null | null | null + null | null | receivedEtag | null | null + null | null | null | garbageEtag | null + null | null | null | null | receivedLeaseID + } + + @Unroll + def "Sequence number AC fail"() { + setup: + noneMatch = setupBlobMatchCondition(bu, noneMatch) + setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + when: + bu.updateSequenceNumber(SequenceNumberActionType.UPDATE, 1, bac, null).blockingGet() + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch | leaseID + newDate | null | null | null | null + null | oldDate | null | null | null + null | null | garbageEtag | null | null + null | null | null | receivedEtag | null + null | null | null | null | garbageLeaseID + } + + def "Sequence number error"() { + setup: + bu = cu.createPageBlobURL(generateBlobName()) + + when: + bu.updateSequenceNumber(SequenceNumberActionType.UPDATE, 0, null, null).blockingGet() + + then: + thrown(StorageException) + } + + def "Sequence number context"() { + setup: + def pipeline = + HttpPipeline.build(getStubFactory(getContextStubPolicy(200, PageBlobUpdateSequenceNumberHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.updateSequenceNumber(SequenceNumberActionType.UPDATE, 3, null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } + + def "Start incremental copy"() { + setup: + cu.setAccessPolicy(PublicAccessType.BLOB, null, null, null).blockingGet() + PageBlobURL bu2 = cu.createPageBlobURL(generateBlobName()) + String snapshot = bu.createSnapshot(null, null, null).blockingGet().headers().snapshot() + PageBlobCopyIncrementalHeaders headers = bu2.copyIncremental(bu.toURL(), snapshot, null, null) + .blockingGet().headers() + waitForCopy(bu2, headers.copyStatus()) + + expect: + bu2.getProperties(null, null).blockingGet().headers().isIncrementalCopy() + bu2.getProperties(null, null).blockingGet().headers().destinationSnapshot() != null + validateBasicHeaders(headers) + headers.copyId() != null + headers.copyStatus() != null + } + + def "Start incremental copy min"() { + setup: + cu.setAccessPolicy(PublicAccessType.BLOB, null, null, null).blockingGet() + PageBlobURL bu2 = cu.createPageBlobURL(generateBlobName()) + String snapshot = bu.createSnapshot(null, null, null).blockingGet().headers().snapshot() + + expect: + bu2.copyIncremental(bu.toURL(), snapshot, null, null).blockingGet().statusCode() == 202 + } + + @Unroll + def "Start incremental copy AC"() { + setup: + cu.setAccessPolicy(PublicAccessType.BLOB, null, null, null).blockingGet() + PageBlobURL bu2 = cu.createPageBlobURL(generateBlobName()) + String snapshot = bu.createSnapshot(null, null, null).blockingGet().headers().snapshot() + def response = bu2.copyIncremental(bu.toURL(), snapshot, null, null).blockingGet() + waitForCopy(bu2, response.headers().copyStatus()) + snapshot = bu.createSnapshot(null, null, null).blockingGet().headers().snapshot() + match = setupBlobMatchCondition(bu2, match) + def mac = new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch) + + expect: + bu2.copyIncremental(bu.toURL(), snapshot, mac, null).blockingGet().statusCode() == 202 + + where: + modified | unmodified | match | noneMatch + null | null | null | null + oldDate | null | null | null + null | newDate | null | null + null | null | receivedEtag | null + null | null | null | garbageEtag + } + + @Unroll + def "Start incremental copy AC fail"() { + setup: + cu.setAccessPolicy(PublicAccessType.BLOB, null, null, null).blockingGet() + PageBlobURL bu2 = cu.createPageBlobURL(generateBlobName()) + String snapshot = bu.createSnapshot(null, null, null).blockingGet().headers().snapshot() + bu2.copyIncremental(bu.toURL(), snapshot, null, null).blockingGet() + snapshot = bu.createSnapshot(null, null, null).blockingGet().headers().snapshot() + noneMatch = setupBlobMatchCondition(bu2, noneMatch) + def mac = new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch) + + when: + bu2.copyIncremental(bu.toURL(), snapshot, mac, null).blockingGet().statusCode() + + then: + thrown(StorageException) + + where: + modified | unmodified | match | noneMatch + newDate | null | null | null + null | oldDate | null | null + null | null | garbageEtag | null + null | null | null | receivedEtag + } + + def "Start incremental copy error"() { + setup: + bu = cu.createPageBlobURL(generateBlobName()) + + when: + bu.copyIncremental(new URL("https://www.error.com"), "snapshot", null, null) + .blockingGet() + + then: + thrown(StorageException) + } + + def "Start incremental copy context"() { + setup: + def pipeline = + HttpPipeline.build(getStubFactory(getContextStubPolicy(202, PageBlobCopyIncrementalHeaders))) + + bu = bu.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + bu.copyIncremental(bu.toURL(), "snapshot", null, defaultContext).blockingGet() + + then: + notThrown(RuntimeException) + } +} diff --git a/storage/data-plane/src/test/java/com/microsoft/azure/storage/Samples.java b/storage/data-plane/src/test/java/com/microsoft/azure/storage/Samples.java new file mode 100644 index 0000000000000..aa7535563a063 --- /dev/null +++ b/storage/data-plane/src/test/java/com/microsoft/azure/storage/Samples.java @@ -0,0 +1,2274 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.microsoft.azure.storage; + +import com.microsoft.azure.storage.blob.*; +import com.microsoft.azure.storage.blob.models.*; +import com.microsoft.rest.v2.RestException; +import com.microsoft.rest.v2.http.HttpPipeline; +import com.microsoft.rest.v2.http.HttpPipelineLogLevel; +import com.microsoft.rest.v2.http.HttpPipelineLogger; +import com.microsoft.rest.v2.http.HttpResponse; +import com.microsoft.rest.v2.util.FlowableUtil; +import io.reactivex.Completable; +import io.reactivex.Flowable; +import io.reactivex.Observable; +import io.reactivex.Single; +import io.reactivex.functions.BiConsumer; +import org.junit.Test; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URL; +import java.net.UnknownHostException; +import java.nio.ByteBuffer; +import java.nio.channels.AsynchronousFileChannel; +import java.nio.file.StandardOpenOption; +import java.security.InvalidKeyException; +import java.time.OffsetDateTime; +import java.util.*; +import java.util.concurrent.TimeUnit; +import java.util.logging.Level; +import java.util.logging.Logger; + +public class Samples { + public static Single createContainerIfNotExists(ContainerURL containerURL) { + return containerURL.create(null, null, null).map((r) -> true).onErrorResumeNext((e) -> { + if (e instanceof RestException) { + RestException re = (RestException) e; + if (re.getMessage().contains("ContainerAlreadyExists")) { + return Single.just(false); + } + } + + return Single.error(e); + }); + } + + public static Single deleteContainerIfExists(ContainerURL containerURL) { + return containerURL.delete(null, null).map((r) -> true).onErrorResumeNext((e) -> { + if (e instanceof RestException) { + RestException re = (RestException) e; + if (re.getMessage().contains("ContainerNotFound")) { + return Single.just(false); + } + } + + return Single.error(e); + }); + } + + public static Observable listBlobsLazy(ContainerURL containerURL, ListBlobsOptions listBlobsOptions) { + return containerURL.listBlobsFlatSegment(null, listBlobsOptions, null) + .flatMapObservable((r) -> listContainersResultToContainerObservable(containerURL, listBlobsOptions, r)); + } + + private static Observable listContainersResultToContainerObservable( + ContainerURL containerURL, ListBlobsOptions listBlobsOptions, + ContainerListBlobFlatSegmentResponse response) { + Observable result = Observable.fromIterable(response.body().segment().blobItems()); + + System.out.println("!!! count: " + response.body().segment().blobItems()); + + if (response.body().nextMarker() != null) { + System.out.println("Hit continuation in listing at " + response.body().segment().blobItems().get( + response.body().segment().blobItems().size() - 1).name()); + // Recursively add the continuation items to the observable. + result = result.concatWith(containerURL.listBlobsFlatSegment(response.body().nextMarker(), listBlobsOptions, + null) + .flatMapObservable((r) -> + listContainersResultToContainerObservable(containerURL, listBlobsOptions, r))); + } + + return result; + } + + private String getAccountName() { + return System.getenv("ACCOUNT_NAME"); + } + + private String getAccountKey() { + return System.getenv("ACCOUNT_KEY"); + } + + /** + * This example shows how to start using the Azure Storage Blob SDK for Java. + */ + @Test + public void basicExample() throws InvalidKeyException, MalformedURLException { + // From the Azure portal, get your Storage account's name and account key. + String accountName = getAccountName(); + String accountKey = getAccountKey(); + + // Use your Storage account's name and key to create a credential object; this is used to access your account. + SharedKeyCredentials credential = new SharedKeyCredentials(accountName, accountKey); + + /* + Create a request pipeline that is used to process HTTP(S) requests and responses. It requires your account + credentials. In more advanced scenarios, you can configure telemetry, retry policies, logging, and other + options. Also you can configure multiple pipelines for different scenarios. + */ + HttpPipeline pipeline = StorageURL.createPipeline(credential, new PipelineOptions()); + + /* + From the Azure portal, get your Storage account blob service URL endpoint. + The URL typically looks like this: + */ + URL u = new URL(String.format(Locale.ROOT, "https://%s.blob.core.windows.net", accountName)); + + // Create a ServiceURL objet that wraps the service URL and a request pipeline. + ServiceURL serviceURL = new ServiceURL(u, pipeline); + + // Now you can use the ServiceURL to perform various container and blob operations. + + // This example shows several common operations just to get you started. + + /* + Create a URL that references a to-be-created container in your Azure Storage account. This returns a + ContainerURL object that wraps the container's URL and a request pipeline (inherited from serviceURL). + Note that container names require lowercase. + */ + ContainerURL containerURL = serviceURL.createContainerURL("myjavacontainerbasic" + System.currentTimeMillis()); + + /* + Create a URL that references a to-be-created blob in your Azure Storage account's container. + This returns a BlockBlobURL object that wraps the blob's URl and a request pipeline + (inherited from containerURL). Note that blob names can be mixed case. + */ + BlockBlobURL blobURL = containerURL.createBlockBlobURL("HelloWorld.txt"); + + String data = "Hello world!"; + + // Create the container on the service (with no metadata and no public access) + containerURL.create(null, null, null) + .flatMap(containerCreateResponse -> + /* + Create the blob with string (plain text) content. + NOTE: The Flowable containing the data must be replayable to support retries. That is, it must + yield the same data every time it is subscribed to. + NOTE: If the provided length does not match the actual length, this method will throw. + */ + blobURL.upload(Flowable.just(ByteBuffer.wrap(data.getBytes())), data.length(), + null, null, null, null)) + .flatMap(blobUploadResponse -> + // Download the blob's content. + blobURL.download(null, null, false, null)) + .flatMap(blobDownloadResponse -> + // Verify that the blob data round-tripped correctly. + FlowableUtil.collectBytesInBuffer(blobDownloadResponse.body(null)) + .doOnSuccess(byteBuffer -> { + if (byteBuffer.compareTo(ByteBuffer.wrap(data.getBytes())) != 0) { + throw new Exception("The downloaded data does not match the uploaded data."); + } + })) + .flatMap(byteBuffer -> + /* + List the blob(s) in our container; since a container may hold millions of blobs, this is done + one segment at a time. + */ + containerURL.listBlobsFlatSegment(null, new ListBlobsOptions().withMaxResults(1), null)) + .flatMap(containerListBlobFlatSegmentResponse -> + // The asynchronous requests require we use recursion to continue our listing. + listBlobsFlatHelper(containerURL, containerListBlobFlatSegmentResponse)) + .flatMap(containerListBlobFlatSegmentResponse -> + // Delete the blob we created earlier. + blobURL.delete(null, null, null)) + .flatMap(blobDeleteResponse -> + // Delete the container we created earlier. + containerURL.delete(null, null)) + /* + This will synchronize all the above operations. This is strongly discouraged for use in production as + it eliminates the benefits of asynchronous IO. We use it here to enable the sample to complete and + demonstrate its effectiveness. + */ + .blockingGet(); + } + + // + public Single listBlobsFlatHelper( + ContainerURL containerURL, ContainerListBlobFlatSegmentResponse response) { + + // Process the blobs returned in this result segment (if the segment is empty, blob() will be null. + if (response.body().segment().blobItems() != null) { + for (BlobItem b : response.body().segment().blobItems()) { + String output = "Blob name: " + b.name(); + if (b.snapshot() != null) { + output += ", Snapshot: " + b.snapshot(); + } + System.out.println(output); + } + } + + // If there is not another segment, return this response as the final response. + if (response.body().nextMarker() == null) { + return Single.just(response); + } else { + /* + IMPORTANT: ListBlobsFlatSegment returns the start of the next segment; you MUST use this to get the next + segment (after processing the current result segment + */ + String nextMarker = response.body().nextMarker(); + + /* + The presence of the marker indicates that there are more blobs to list, so we make another call to + listBlobsFlatSegment and pass the result through this helper function. + */ + return containerURL.listBlobsFlatSegment(nextMarker, new ListBlobsOptions().withMaxResults(1), null) + .flatMap(containersListBlobFlatSegmentResponse -> + listBlobsFlatHelper(containerURL, containersListBlobFlatSegmentResponse)); + } + } + // + + // + public Single listBlobsHierarchyHelper( + ContainerURL containerURL, ContainerListBlobHierarchySegmentResponse response) { + + // Process the blobs returned in this result segment (if the segment is empty, blob() will be null. + if (response.body().segment().blobItems() != null) { + for (BlobItem b : response.body().segment().blobItems()) { + String output = "Blob name: " + b.name(); + if (b.snapshot() != null) { + output += ", Snapshot: " + b.snapshot(); + } + System.out.println(output); + } + } + + // Process the blobsPrefixes returned in this result segment + if (response.body().segment().blobPrefixes() != null) { + for (BlobPrefix bp : response.body().segment().blobPrefixes()) { + // Process the prefixes. + } + } + + // If there is not another segment, return this response as the final response. + if (response.body().nextMarker() == null) { + return Single.just(response); + } else { + /* + IMPORTANT: ListBlobHierarchySegment returns the start of the next segment; you MUST use this to get the + next segment (after processing the current result segment + */ + String nextMarker = response.body().nextMarker(); + + /* + The presence of the marker indicates that there are more blobs to list, so we make another call to + listBlobsHierarchySegment and pass the result through this helper function. + */ + return containerURL.listBlobsHierarchySegment(nextMarker, response.body().delimiter(), + new ListBlobsOptions().withMaxResults(1), null) + .flatMap(containersListBlobHierarchySegmentResponse -> + listBlobsHierarchyHelper(containerURL, containersListBlobHierarchySegmentResponse)); + } + } + // + + // + public Single listContainersHelper( + ServiceURL serviceURL, ServiceListContainersSegmentResponse response) { + + // Process the containers returned in this result segment (if the segment is empty, containerItems will be null. + if (response.body().containerItems() != null) { + for (ContainerItem b : response.body().containerItems()) { + String output = "Container name: " + b.name(); + System.out.println(output); + } + } + + // If there is not another segment, return this response as the final response. + if (response.body().nextMarker() == null) { + return Single.just(response); + } else { + /* + IMPORTANT: ListContainersSegment returns the start of the next segment; you MUST use this to get the + next segment (after processing the current result segment + */ + String nextMarker = response.body().nextMarker(); + + /* + The presence of the marker indicates that there are more blobs to list, so we make another call to + listContainersSegment and pass the result through this helper function. + */ + return serviceURL.listContainersSegment(nextMarker, ListContainersOptions.DEFAULT, null) + .flatMap(containersListBlobHierarchySegmentResponse -> + listContainersHelper(serviceURL, response)); + } + } + // + + // This example shows how you can configure a pipeline for making HTTP requests to the Azure Storage blob Service. + @Test + public void exampleNewPipeline() throws MalformedURLException { + // This shows how to wire in your own logging mechanism. Here we use the built in java logger. + Logger logger = Logger.getGlobal(); + + /* + Create/configure a request pipeline options object. All PipelineOptions' fields are optional; + reasonable defaults are set for anything you do not specify. + */ + PipelineOptions po = new PipelineOptions(); + + /* + Set RetryOptions to control how HTTP requests are retried when retryable failures occur. + Here we: + - Use exponential backoff as opposed to linear. + - Try at most 3 times to perform the operation (set to 1 to disable retries). + - Maximum 3 seconds allowed for any single try. + - Backoff delay starts at 1 second. + - Maximum delay between retries is 3 seconds. + - We will not retry against a secondary host. + */ + po.withRequestRetryOptions(new RequestRetryOptions(RetryPolicyType.EXPONENTIAL, 3, 3, + 1000L, 3000L, null)); + + /* + Set LoggingOptions to control how each HTTP request and its response is logged. A successful response taking + more than 200ms will be logged as a warning. + */ + po.withLoggingOptions(new LoggingOptions(200)); + + // Set LogOptions to control what & where all pipeline log events go. + po.withLogger(new HttpPipelineLogger() { + @Override + public HttpPipelineLogLevel minimumLogLevel() { + // Log all events from informational to more severe. + return HttpPipelineLogLevel.INFO; + } + + @Override + public void log(HttpPipelineLogLevel httpPipelineLogLevel, String s, Object... objects) { + // This function is called to log each event. It is not called for filtered-out severities. + Level level = null; + if (httpPipelineLogLevel == HttpPipelineLogLevel.ERROR) { + level = Level.SEVERE; + } else if (httpPipelineLogLevel == HttpPipelineLogLevel.WARNING) { + level = Level.WARNING; + } else if (httpPipelineLogLevel == HttpPipelineLogLevel.INFO) { + level = Level.INFO; + } else if (httpPipelineLogLevel == HttpPipelineLogLevel.OFF) { + level = Level.OFF; + } + logger.log(level, s); + } + }); + + /* + Create a request pipeline object configured with credentials and with pipeline options. Once created, a + pipeline object is thread-safe and can be safely used with many XxxURL objects simultaneously. A pipeline + always requires some credential object. + */ + HttpPipeline p = ServiceURL.createPipeline(new AnonymousCredentials(), po); + + // Once you've created a pipeline object, associate it with an XxxURL object so that you can perform HTTP + // requests with it. + URL u = new URL("https://myaccount.blob.core.windows.net"); + ServiceURL serviceURL = new ServiceURL(u, p); + + // Use the serviceURL as desired... + + /* + NOTE: When you using an XxxURL object to create another XxxURl object, the new XxxURL object inherits the same + pipeline object as its parent. For example, the containerURL and blobURL objects below all share the same + pipeline. Any operations you perform with these objects will share the same behavior configured above. + */ + ContainerURL containerURL = serviceURL.createContainerURL("mycontainer"); + BlobURL blobURL = containerURL.createBlobURL("ReadMe.txt"); + + /* + If you would like to perform some operations with different behavior, create a new pipeline object and associate + it with a new XxxURL object by passing the new pipeline to the XxxURL object's withPipeline method. + */ + + /* + In this example, we reconfigure the retry policies, create a new pipeline, and then create a new ContainerURL + object that has the same URL as its parent. + + Here we: + - Use exponential backoff as opposed to linear. + - Try at most times to perform the operation (set to 1 to disable retries). + - Maximum 1 minute allowed for any single try. + - Backoff delay starts at 5 seconds. + - Maximum delay between retries is 10 seconds. + - We will not retry against a secondary host. + */ + po.withRequestRetryOptions(new RequestRetryOptions(RetryPolicyType.EXPONENTIAL, 4, 60, + 5000L, 10000L, null)); + ContainerURL newContainerURL = containerURL.withPipeline( + ServiceURL.createPipeline(new AnonymousCredentials(), po)); + + /* + Now any XxxBlobURL object created using newContainerURL inherits the pipeline with the new retry policy. + */ + BlobURL newBlobURL = newContainerURL.createBlobURL("ReadMe2.txt"); + } + + @Test + /* + * This example shows how to handle errors thrown by various XxxURL methods. Any client-side error will be + * propagated unmodified. However, any response from the service with an unexpected status code will be wrapped in a + * StorageException. If the pipeline includes the RequestRetryFactory, which is the default, some of these errors + * will be automatically retried if it makes sense to do so. The StorageException type exposes rich error + * information returned by the service. + */ + public void exampleStorageError() throws MalformedURLException { + ContainerURL containerURL = new ContainerURL(new URL("http://myaccount.blob.core.windows.net/mycontainer"), + StorageURL.createPipeline(new AnonymousCredentials(), new PipelineOptions())); + + containerURL.create(null, null, null) + // An error occurred. + .onErrorResumeNext(throwable -> { + // Check if this error is from the service. + if (throwable instanceof StorageException) { + StorageException exception = (StorageException) throwable; + // StorageErrorCode defines constants corresponding to all error codes returned by the service. + if (exception.errorCode() == StorageErrorCode.CONTAINER_BEING_DELETED) { + // Log more detailed information. + System.out.println("Extended details: " + exception.message()); + + // Examine the raw response. + HttpResponse response = exception.response(); + } else if (exception.errorCode() == StorageErrorCode.CONTAINER_ALREADY_EXISTS) { + // Process the error + } + } + // We just fake a successful response to prevent the example from crashing. + return Single.just( + new ContainerCreateResponse(null, 200, null, null, null)); + }) + /* + This will synchronize all the above operations. This is strongly discouraged for use in production as + it eliminates the benefits of asynchronous IO. We use it here to enable the sample to complete and + demonstrate its effectiveness. + */ + .blockingGet(); + } + + /* + This example shows how to break a URL into its parts so you can examine and/or change some of its values and then + construct a new URL. + */ + @Test + public void exampleBlobURLParts() throws MalformedURLException, UnknownHostException { + /* + Start with a URL that identifies a snapshot of a blob in a container and includes a Shared Access Signature + (SAS). + */ + URL u = new URL("https://myaccount.blob.core.windows.net/mycontainter/ReadMe.txt?" + + "snapshot=2011-03-09T01:42:34.9360000Z" + + "&sv=2015-02-21&sr=b&st=2111-01-09T01:42:34Z&se=2222-03-09T01:42:34Z&sp=rw" + + "&sip=168.1.5.60-168.1.5.70&spr=https,http&si=myIdentifier&ss=bf&srt=s" + + "&sig=92836758923659283652983562=="); + + // You can parse this URL into its constituent parts: + BlobURLParts parts = URLParser.parse(u); + + // Now, we access the parts (this example prints them). + System.out.println(String.join("\n", + parts.host(), + parts.containerName(), + parts.blobName(), + parts.snapshot())); + System.out.println(""); + SASQueryParameters sas = parts.sasQueryParameters(); + System.out.println(String.join("\n", + sas.version(), + sas.resource(), + sas.startTime().toString(), + sas.expiryTime().toString(), + sas.permissions(), + sas.ipRange().toString(), + sas.protocol().toString(), + sas.identifier(), + sas.services(), + sas.signature())); + + // You can then change some of the fields and construct a new URL. + parts.withSasQueryParameters(null) // Remove the SAS query parameters. + .withSnapshot(null) // Remove the snapshot timestamp. + .withContainerName("othercontainer"); // Change the container name. + // In this example, we'll keep the blob name as it is. + + // Construct a new URL from the parts: + URL newURL = parts.toURL(); + System.out.println(newURL); + // NOTE: You can pass the new URL to the constructor for any XxxURL to manipulate the resource. + } + + // This example shows how to create and use an Azure Storage account Shared Access Signature(SAS). + @Test + public void exampleAccountSASSignatureValues() throws InvalidKeyException, MalformedURLException { + // From the Azure portal, get your Storage account's name and account key. + String accountName = getAccountName(); + String accountKey = getAccountKey(); + + // Use your Storage account's name and key to create a credential object; this is required to sign a SAS. + SharedKeyCredentials credential = new SharedKeyCredentials(accountName, accountKey); + + /* + Set the desired SAS signature values and sign them with the shared key credentials to get the SAS query + parameters. + */ + AccountSASSignatureValues values = new AccountSASSignatureValues(); + values.withProtocol(SASProtocol.HTTPS_ONLY) // Users MUST use HTTPS (not HTTP). + .withExpiryTime(OffsetDateTime.now().plusDays(2)); // 2 days before expiration. + + AccountSASPermission permission = new AccountSASPermission() + .withRead(true) + .withList(true); + values.withPermissions(permission.toString()); + + AccountSASService service = new AccountSASService() + .withBlob(true); + values.withServices(service.toString()); + + AccountSASResourceType resourceType = new AccountSASResourceType() + .withContainer(true) + .withObject(true); + values.withResourceTypes(resourceType.toString()); + + SASQueryParameters params = values.generateSASQueryParameters(credential); + + // Calling encode will generate the query string. + String encodedParams = params.encode(); + + String urlToSendToSomeone = String.format(Locale.ROOT, "https://%s.blob.core.windows.net?%s", + accountName, encodedParams); + // At this point, you can send the urlToSendSomeone to someone via email or any other mechanism you choose. + + // *************************************************************************************************** + + // When someone receives the URL, the access the SAS-protected resource with code like this: + URL u = new URL(urlToSendToSomeone); + + /* + Create a ServiceURL object that wraps the serviceURL (and its SAS) and a pipeline. When using SAS URLs, + AnonymousCredentials are required. + */ + ServiceURL serviceURL = new ServiceURL(u, + StorageURL.createPipeline(new AnonymousCredentials(), new PipelineOptions())); + // Now, you can use this serviceURL just like any other to make requests of the resource. + } + + // This example shows how to create and use a Blob Service Shared Access Signature (SAS). + @Test + public void exampleBlobSASSignatureValues() throws InvalidKeyException, MalformedURLException { + // From the Azure portal, get your Storage account's name and account key. + String accountName = getAccountName(); + String accountKey = getAccountKey(); + + // Use your Storage account's name and key to create a credential object; this is required to sign a SAS. + SharedKeyCredentials credential = new SharedKeyCredentials(accountName, accountKey); + + // This is the name of the container and blob that we're creating a SAS to. + String containerName = "mycontainer"; // Container names require lowercase. + String blobName = "HelloWorld.txt"; // Blob names can be mixed case. + + /* + Set the desired SAS signature values and sign them with the shared key credentials to get the SAS query + parameters. + */ + ServiceSASSignatureValues values = new ServiceSASSignatureValues() + .withProtocol(SASProtocol.HTTPS_ONLY) // Users MUST use HTTPS (not HTTP). + .withExpiryTime(OffsetDateTime.now().plusDays(2)) // 2 days before expiration. + .withContainerName(containerName) + .withBlobName(blobName); + + /* + To produce a container SAS (as opposed to a blob SAS), assign to Permissions using ContainerSASPermissions, and + make sure the blobName field is null (the default). + */ + BlobSASPermission permission = new BlobSASPermission() + .withRead(true) + .withAdd(true) + .withWrite(true); + values.withPermissions(permission.toString()); + + SASQueryParameters params = values.generateSASQueryParameters(credential); + + // Calling encode will generate the query string. + String encodedParams = params.encode(); + + String urlToSendToSomeone = String.format(Locale.ROOT, "https://%s.blob.core.windows.net/%s/%s?%s", + accountName, containerName, blobName, encodedParams); + // At this point, you can send the urlToSendSomeone to someone via email or any other mechanism you choose. + + // *************************************************************************************************** + + // When someone receives the URL, the access the SAS-protected resource with code like this: + URL u = new URL(urlToSendToSomeone); + + /* + Create a BlobURL object that wraps the blobURL (and its SAS) and a pipeline. When using SAS URLs, + AnonymousCredentials are required. + */ + BlobURL blobURL = new BlobURL(u, + StorageURL.createPipeline(new AnonymousCredentials(), new PipelineOptions())); + // Now, you can use this blobURL just like any other to make requests of the resource. + } + + // This example shows how to manipulate a container's permissions. + @Test + public void exampleContainerURL_SetPermissions() throws InvalidKeyException, MalformedURLException { + // From the Azure portal, get your Storage account's name and account key. + String accountName = getAccountName(); + String accountKey = getAccountKey(); + + // Use your Storage account's name and key to create a credential object; this is required to sign a SAS. + SharedKeyCredentials credential = new SharedKeyCredentials(accountName, accountKey); + + // Create a containerURL object that wraps the container's URL and a default pipeline. + URL u = new URL(String.format(Locale.ROOT, "https://%s.blob.core.windows.net/myjavacontainerpermissions" + + System.currentTimeMillis(), accountName)); + ContainerURL containerURL = new ContainerURL(u, StorageURL.createPipeline(credential, new PipelineOptions())); + + /* + Create a URL that references a to-be-created blob in your Azure Storage account's container. This returns a + BlockBlobURL object that wraps the blob's URL and a request pipeline (inherited from containerURL). + */ + BlockBlobURL blobURL = containerURL.createBlockBlobURL("HelloWorld.txt"); + + // A blob URL with anonymous credentials to demonstrate public access. + BlobURL anonymousURL = new BlobURL(blobURL.toURL(), + StorageURL.createPipeline(new AnonymousCredentials(), new PipelineOptions())); + + String data = "Hello World!"; + + // Create the container (with no metadata and no public access) + containerURL.create(null, null, null) + .flatMap(containersCreateResponse -> + blobURL.upload(Flowable.just(ByteBuffer.wrap(data.getBytes())), data.length(), + null, null, null, null) + ) + .flatMap(blockBlobUploadResponse -> + // Attempt to read the blob with anonymous credentials. + anonymousURL.download(null, null, false, null) + ) + .ignoreElement() + .onErrorResumeNext(throwable -> { + /* + We expected this error because the service returns an HTTP 404 status code when a blob exists but + the request does not have permission to access it. + */ + if (throwable instanceof RestException && + ((RestException) throwable).response().statusCode() == 404) { + // This is how we change the container's permission to allow public/anonymous access. + return containerURL.setAccessPolicy(PublicAccessType.BLOB, null, null, null) + .ignoreElement(); + } else { + return Completable.error(throwable); + } + }) + /* + Container property changes may take up to 15 seconds to take effect. It would also be possible to poll + the container properties to check for the access policy to be updated. See the startCopy example for + an example of this pattern. + */ + .delay(31, TimeUnit.SECONDS) + // Now this will work. + .andThen(anonymousURL.download(null, null, + false, null)) + .flatMap(blobDownloadResponse -> + // Delete the container and the blob within in. + containerURL.delete(null, null)) + /* + This will synchronize all the above operations. This is strongly discouraged for use in production as + it eliminates the benefits of asynchronous IO. We use it here to enable the sample to complete and + demonstrate its effectiveness. + */ + .blockingGet(); + } + + // This example shows how to perform operations on blobs conditionally. + @Test + public void exampleBlobAccessConditions() throws MalformedURLException, InvalidKeyException { + // From the Azure portal, get your Storage account's name and account key. + String accountName = getAccountName(); + String accountKey = getAccountKey(); + + // Create a BlockBlobURL object that wraps a blob's URL and a default pipeline. + URL u = new URL(String.format(Locale.ROOT, "https://%s.blob.core.windows.net/", accountName)); + ServiceURL s = new ServiceURL(u, + StorageURL.createPipeline(new SharedKeyCredentials(accountName, accountKey), new PipelineOptions())); + ContainerURL containerURL = s.createContainerURL("myjavacontaineraccessconditions" + + System.currentTimeMillis()); + BlockBlobURL blobURL = containerURL.createBlockBlobURL("Data.txt"); + + // Create the container (unconditionally; succeeds) + containerURL.create(null, null, null) + .flatMap(containersCreateResponse -> + // Create the blob (unconditionally; succeeds) + blobURL.upload(Flowable.just(ByteBuffer.wrap("Text-1".getBytes())), "Text-1".length(), + null, null, null, null)) + .flatMap(blockBlobUploadResponse -> { + System.out.println("Success: " + blockBlobUploadResponse.statusCode()); + + // Download blob content if the blob has been modified since we uploaded it (fails). + return blobURL.download(null, + new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince( + blockBlobUploadResponse.headers().lastModified())), + + false, null); + }) + .onErrorResumeNext(throwable -> { + if (throwable instanceof RestException) { + System.out.println("Failure: " + ((RestException) throwable).response().statusCode()); + } else { + return Single.error(throwable); // Network failure. + } + // Download the blob content if the blob hasn't been modified in the last 24 hours (fails): + return blobURL.download(null, + new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfUnmodifiedSince( + OffsetDateTime.now().minusDays(1))), + false, null); + }) + /* + onErrorResume next expects to return a Single of the same type. Here, we are changing operations, which + means we will get a different return type and cannot directly recover from the error. To solve this, + we go through a completable which will give us more flexibility with types. + */ + .ignoreElement() + .onErrorResumeNext(throwable -> { + if (throwable instanceof RestException) { + System.out.println("Failure: " + ((RestException) throwable).response().statusCode()); + } else { + return Completable.error(throwable); + } + // We've logged the error, and now returning an empty Completable allows us to change course. + return Completable.complete(); + }) + // Get the blob properties to retrieve the current ETag. + .andThen(blobURL.getProperties(null, null)) + .flatMap(getPropertiesResponse -> + /* + Upload new content if the blob hasn't changed since the version identified by the ETag + (succeeds). + */ + blobURL.upload(Flowable.just(ByteBuffer.wrap("Text-2".getBytes())), "Text-2".length(), + null, null, + new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfMatch( + getPropertiesResponse.headers().eTag())), null)) + .flatMap(blockBlobUploadResponse -> { + System.out.println("Success: " + blockBlobUploadResponse.statusCode()); + + // Download content if it has changed since the version identified by ETag (fails): + return blobURL.download(null, + new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfNoneMatch( + blockBlobUploadResponse.headers().eTag())), false, null); + }) + .ignoreElement() + .onErrorResumeNext(throwable -> { + if (throwable instanceof RestException) { + System.out.println("Failure: " + ((RestException) throwable).response().statusCode()); + } else { + return Completable.error(throwable); + } + // We've logged the error, and now returning an empty Completable allows us to change course. + return Completable.complete(); + }).andThen( + // Delete the blob if it exists (succeeds). + blobURL.delete(DeleteSnapshotsOptionType.INCLUDE, + new BlobAccessConditions().withModifiedAccessConditions( + // Wildcard will match any etag. + new ModifiedAccessConditions().withIfMatch("*")), null)) + .flatMap(blobDeleteResponse -> { + System.out.println("Success: " + blobDeleteResponse.statusCode()); + return containerURL.delete(null, null); + }) + /* + This will synchronize all the above operations. This is strongly discouraged for use in production as + it eliminates the benefits of asynchronous IO. We use it here to enable the sample to complete and + demonstrate its effectiveness. + */ + .blockingGet(); + } + + // This example shows how to create a container with metadata and then how to read & update the metadata. + @Test + public void exampleMetadata_containers() throws MalformedURLException, InvalidKeyException { + // From the Azure portal, get your Storage account's name and account key. + String accountName = getAccountName(); + String accountKey = getAccountKey(); + + // Create a ContainerURL object that wraps a blob's URL and a default pipeline. + URL u = new URL(String.format(Locale.ROOT, "https://%s.blob.core.windows.net/", accountName)); + ServiceURL s = new ServiceURL(u, + StorageURL.createPipeline(new SharedKeyCredentials(accountName, accountKey), new PipelineOptions())); + ContainerURL containerURL = s.createContainerURL("myjavacontainercontainermetadata" + + System.currentTimeMillis()); + + /* + Create a container with some metadata (string key/value pairs). + NOTE: Metadata key names are always converted to lowercase before being sent to the Storage Service. Therefore, + you should always use lowercase letters; especially when querying a map for a metadata key. + */ + Metadata metadata = new Metadata(); + metadata.put("createdby", "Rick"); + metadata.put("createdon", "4/13/18"); + containerURL.create(metadata, null, null) + .flatMap(containersCreateResponse -> + // Query the container's metadata. + containerURL.getProperties(null, null) + ) + .flatMap(containersGetPropertiesResponse -> { + Metadata receivedMetadata = new Metadata(containersGetPropertiesResponse.headers().metadata()); + + // Show the container's metadata. + System.out.println(receivedMetadata); + + // Update the metadata and write it back to the container. + receivedMetadata.put("createdby", "Mary"); // NOTE: The keyname is in all lowercase. + return containerURL.setMetadata(receivedMetadata, null, null); + }) + .flatMap(response -> containerURL.delete(null, null)) + /* + This will synchronize all the above operations. This is strongly discouraged for use in production as + it eliminates the benefits of asynchronous IO. We use it here to enable the sample to complete and + demonstrate its effectiveness. + */ + .blockingGet(); + + // NOTE: The SetMetadata & SetProperties methods update the container's ETag & LastModified properties. + } + + /* + This example shows how to create a blob with metadata and then how to read & update the blob's read-only properties + and metadata. + */ + @Test + public void exampleMetadata_blob() throws MalformedURLException, InvalidKeyException { + // From the Azure portal, get your Storage account's name and account key. + String accountName = getAccountName(); + String accountKey = getAccountKey(); + + // Create a BlockBlobURL object that wraps a blob's URL and a default pipeline. + URL u = new URL(String.format(Locale.ROOT, "https://%s.blob.core.windows.net/", accountName)); + ServiceURL s = new ServiceURL(u, + StorageURL.createPipeline(new SharedKeyCredentials(accountName, accountKey), new PipelineOptions())); + ContainerURL containerURL = s.createContainerURL("myjavacontainerblobmetadata" + System.currentTimeMillis()); + BlockBlobURL blobURL = containerURL.createBlockBlobURL("Data.txt"); + + // Create the container. + containerURL.create(null, null, null) + .flatMap(containersCreateResponse -> { + /* + Create the blob with metadata (string key/value pairs). + NOTE: Metadata key names are always converted to lowercase before being sent to the Storage + Service. Therefore, you should always use lowercase letters; especially when querying a map for + a metadata key. + */ + Metadata metadata = new Metadata(); + metadata.put("createdby", "Rick"); + metadata.put("createdon", "4/13/18"); + return blobURL.upload(Flowable.just(ByteBuffer.wrap("Text-1".getBytes())), "Text-1".length(), + null, metadata, null, null); + }) + .flatMap(response -> + // Query the blob's properties and metadata. + blobURL.getProperties(null, null)) + .flatMap(response -> { + // Show some of the blob's read-only properties. + System.out.println(response.headers().blobType()); + System.out.println(response.headers().eTag()); + System.out.println(response.headers().lastModified()); + + // Show the blob's metadata. + System.out.println(response.headers().metadata()); + + // Update the blob's metadata and write it back to the blob. + Metadata receivedMetadata = new Metadata(response.headers().metadata()); + receivedMetadata.put("createdby", "Joseph"); + return blobURL.setMetadata(receivedMetadata, null, null); + }) + .flatMap(response -> + // Delete the container. + containerURL.delete(null, null) + ) + /* + This will synchronize all the above operations. This is strongly discouraged for use in production as + it eliminates the benefits of asynchronous IO. We use it here to enable the sample to complete and + demonstrate its effectiveness. + */ + .blockingGet(); + + // NOTE: The SetMetadata method updates the blob's ETag & LastModified properties. + } + + // This example shows how to create a blob with HTTP Headers and then how to read & update the blob's HTTP Headers. + @Test + public void exampleBlobHTTPHeaders() throws MalformedURLException, InvalidKeyException { + // From the Azure portal, get your Storage account's name and account key. + String accountName = getAccountName(); + String accountKey = getAccountKey(); + + // Create a BlockBlobURL object that wraps a blob's URL and a default pipeline. + URL u = new URL(String.format(Locale.ROOT, "https://%s.blob.core.windows.net/", accountName)); + ServiceURL s = new ServiceURL(u, + StorageURL.createPipeline(new SharedKeyCredentials(accountName, accountKey), new PipelineOptions())); + + ContainerURL containerURL = s.createContainerURL("myjavacontainerheaders" + System.currentTimeMillis()); + BlockBlobURL blobURL = containerURL.createBlockBlobURL("Data.txt"); + + // Create the container. + containerURL.create(null, null, null) + .flatMap(containersCreateResponse -> { + /* + Create the blob with HTTP headers. + */ + BlobHTTPHeaders headers = new BlobHTTPHeaders().withBlobContentDisposition("attachment") + .withBlobContentType("text/html; charset=utf-8"); + return blobURL.upload(Flowable.just(ByteBuffer.wrap("Text-1".getBytes())), "Text-1".length(), + headers, null, null, null); + }) + .flatMap(response -> + // Query the blob's properties and metadata. + blobURL.getProperties(null, null)) + .flatMap(response -> { + // Show some of the blob's read-only properties. + System.out.println(response.headers().blobType()); + System.out.println(response.headers().eTag()); + System.out.println(response.headers().lastModified()); + + // Show the blob's HTTP headers.. + System.out.println(response.headers().contentType()); + System.out.println(response.headers().contentDisposition()); + + /* + Update the blob's properties and write it back to the blob. + NOTE: If one of the HTTP properties is updated, any that are not included in the update request + will be cleared. In order to preserve the existing HTTP properties, they must be re-set along with + the added or updated properties. + */ + BlobHTTPHeaders headers = new BlobHTTPHeaders().withBlobContentType("text/plain"); + return blobURL.setHTTPHeaders(headers, null, null); + }) + .flatMap(response -> + // Delete the container. + containerURL.delete(null, null) + ) + /* + This will synchronize all the above operations. This is strongly discouraged for use in production as + it eliminates the benefits of asynchronous IO. We use it here to enable the sample to complete and + demonstrate its effectiveness. + */ + .blockingGet(); + + // NOTE: The SetHTTPHeaders method updates the blob's ETag & LastModified properties. + } + + /* + This example shows how to upload a lot of data (in blocks) to a blob. A block blob can have a maximum of 50,000 + blocks; each block can have a maximum of 100MB. Therefore, the maximum size ofa block blob is slightly more than + 4.75TB (100MB X 50,000 blocks). + NOTE: The TransferManager class contains methods which will upload large blobs in parallel using + stageBlock/commitBlockList. We recommend you use those methods if possible. + */ + @Test + public void exampleBlockBlobURL() throws MalformedURLException, InvalidKeyException { + // From the Azure portal, get your Storage account's name and account key. + String accountName = getAccountName(); + String accountKey = getAccountKey(); + + // Create a BlockBlobURL object that wraps a blob's URL and a default pipeline. + URL u = new URL(String.format(Locale.ROOT, "https://%s.blob.core.windows.net/", accountName)); + ServiceURL s = new ServiceURL(u, + StorageURL.createPipeline(new SharedKeyCredentials(accountName, accountKey), new PipelineOptions())); + + ContainerURL containerURL = s.createContainerURL("myjavacontainerblock" + System.currentTimeMillis()); + BlockBlobURL blobURL = containerURL.createBlockBlobURL("Data.txt"); + + String[] data = {"Michael", "Gabriel", "Raphael", "John"}; + + // Create the container. We convert to an Observable to be able to work with the block list effectively. + containerURL.create(null, null, null) + .flatMapObservable(response -> + // Create an Observable that will yield each of the Strings one at a time. + Observable.fromIterable(Arrays.asList(data)) + ) + // Items emitted by an Observable that results from a concatMap call will preserve the original order. + .concatMapEager(block -> { + /* + Generate a base64 encoded blockID. Note that all blockIDs must be the same length. It is generally + considered best practice to use UUIDs for the blockID. + */ + String blockId = Base64.getEncoder().encodeToString( + UUID.randomUUID().toString().getBytes()); + + /* + Upload a block to this blob specifying the BlockID and its content (up to 100MB); this block is + uncommitted. + NOTE: The Flowable containing the data must be replayable to support retries. That is, it must + yield the same data every time it is subscribed to. + NOTE: It is imperative that the provided length match the actual length of the data exactly. + */ + return blobURL.stageBlock(blockId, Flowable.just(ByteBuffer.wrap(block.getBytes())), + block.length(), null, null) + /* + We do not care for any data on the response object, but we do want to keep track of the + ID. + */ + .map(x -> blockId).toObservable(); + }) + // Gather all of the IDs emitted by the previous observable into a single list. + .collectInto(new ArrayList<>(data.length), (BiConsumer, String>) ArrayList::add) + .flatMap(idList -> { + /* + By this point, all the blocks are upload and we have an ordered list of their IDs. Here, we + atomically commit the whole list. + NOTE: The block list order need not match the order in which the blocks were uploaded. The order + of IDs in the commitBlockList call will determine the structure of the blob. + */ + return blobURL.commitBlockList(idList, null, null, null, null); + }) + .flatMap(response -> + /* + For the blob, show each block (ID and size) that is a committed part of it. It is also possible + to include blocks that have been staged but not committed. + */ + blobURL.getBlockList(BlockListType.ALL, null, null)) + .flatMap(response -> + // Delete the container + containerURL.delete(null, null)) + /* + This will synchronize all the above operations. This is strongly discouraged for use in production as + it eliminates the benefits of asynchronous IO. We use it here to enable the sample to complete and + demonstrate its effectiveness. + */ + .blockingGet(); + } + + /* + This example shows how to append data (in blocks) to an append blob. An append blob can have a maximum of 50,000 + blocks; each block can have a maximum of 100MB. Therefore, the maximum size of an append blob is slightly more than + 4.75TB (100MB X 50,000 blocks). + */ + @Test + public void exampleAppendBlobURL() throws MalformedURLException, InvalidKeyException { + // From the Azure portal, get your Storage account's name and account key. + String accountName = getAccountName(); + String accountKey = getAccountKey(); + + // Create a BlockBlobURL object that wraps a blob's URL and a default pipeline. + URL u = new URL(String.format(Locale.ROOT, "https://%s.blob.core.windows.net/", accountName)); + ServiceURL s = new ServiceURL(u, + StorageURL.createPipeline(new SharedKeyCredentials(accountName, accountKey), new PipelineOptions())); + + ContainerURL containerURL = s.createContainerURL("myjavacontainerappend" + System.currentTimeMillis()); + AppendBlobURL blobURL = containerURL.createAppendBlobURL("Data.txt"); + + // Create the container. + containerURL.create(null, null, null) + .flatMap(response -> + // Create the append blob. This creates a zero-length blob that we can now append to. + blobURL.create(null, null, null, null)) + .toObservable() + .flatMap(response -> + // This range will act as our for loop to create 5 blocks + Observable.range(0, 5)) + .concatMapCompletable(i -> { + String text = String.format(Locale.ROOT, "Appending block #%d\n", i); + /* + NOTE: The Flowable containing the data must be replayable to support retries. That is, it must + yield the same data every time it is subscribed to. + */ + return blobURL.appendBlock(Flowable.just(ByteBuffer.wrap(text.getBytes())), text.length(), null, + null).ignoreElement(); + }) + // Download the blob. + .andThen(blobURL.download(null, null, false, null)) + .flatMap(response -> + // Print out the data. + FlowableUtil.collectBytesInBuffer(response.body(null)) + .doOnSuccess(bytes -> + System.out.println(new String(bytes.array()))) + ) + .flatMap(response -> + // Delete the container. + containerURL.delete(null, null) + ) + /* + This will synchronize all the above operations. This is strongly discouraged for use in production as + it eliminates the benefits of asynchronous IO. We use it here to enable the sample to complete and + demonstrate its effectiveness. + */ + .blockingGet(); + + } + + // This example shows how to work with Page Blobs. + @Test + public void examplePageBlobURL() throws MalformedURLException, InvalidKeyException { + // From the Azure portal, get your Storage account's name and account key. + String accountName = getAccountName(); + String accountKey = getAccountKey(); + + // Create a BlockBlobURL object that wraps a blob's URL and a default pipeline. + URL u = new URL(String.format(Locale.ROOT, "https://%s.blob.core.windows.net/", accountName)); + ServiceURL s = new ServiceURL(u, + StorageURL.createPipeline(new SharedKeyCredentials(accountName, accountKey), new PipelineOptions())); + ContainerURL containerURL = s.createContainerURL("myjavacontainerpage" + System.currentTimeMillis()); + PageBlobURL blobURL = containerURL.createPageBlobURL("Data.txt"); + + // Create the container. + containerURL.create(null, null, null) + .flatMap(response -> + // Create the page blob with 4 512-byte pages. + blobURL.create(4 * PageBlobURL.PAGE_BYTES, null, null, + null, null, null)) + .flatMap(response -> { + /* + Upload data to a page. + NOTE: The page range must start on a multiple of the page size and end on + (multiple of page size) - 1. + */ + byte[] data = new byte[PageBlobURL.PAGE_BYTES]; + for (int i = 0; i < PageBlobURL.PAGE_BYTES; i++) { + data[i] = 'a'; + } + /* + NOTE: The Flowable containing the data must be replayable to support retries. That is, it must + yield the same data every time it is subscribed to. + */ + return blobURL.uploadPages(new PageRange().withStart(0).withEnd(PageBlobURL.PAGE_BYTES - 1), + Flowable.just(ByteBuffer.wrap(data)), null, null); + }) + .flatMap(response -> { + // Upload data to the third page in the blob. + byte[] data = new byte[PageBlobURL.PAGE_BYTES]; + for (int i = 0; i < PageBlobURL.PAGE_BYTES; i++) { + data[i] = 'b'; + } + return blobURL.uploadPages(new PageRange().withStart(2 * PageBlobURL.PAGE_BYTES) + .withEnd(3 * PageBlobURL.PAGE_BYTES - 1), + Flowable.just(ByteBuffer.wrap(data)), null, null); + }) + .flatMap(response -> + // Get the page ranges which have valid data. + blobURL.getPageRanges(null, null, null)) + .flatMap(response -> { + // Print the pages that are valid. + for (PageRange range : response.body().pageRange()) { + System.out.println(String.format(Locale.ROOT, "Start=%d, End=%d\n", range.start(), + range.end())); + } + + // Clear and invalidate the first range. + return blobURL.clearPages(new PageRange().withStart(0).withEnd(PageBlobURL.PAGE_BYTES - 1), + null, null); + }) + .flatMap(response -> + // Get the page ranges which have valid data. + blobURL.getPageRanges(null, null, null)) + .flatMap(response -> { + // Print the pages that are valid. + for (PageRange range : response.body().pageRange()) { + System.out.println(String.format(Locale.ROOT, "Start=%d, End=%d\n", range.start(), + range.end())); + } + + // Get the content of the whole blob. + return blobURL.download(null, null, false, null); + }) + .flatMap(response -> + // Print the received content. + FlowableUtil.collectBytesInBuffer(response.body(null)) + .doOnSuccess(data -> + System.out.println(new String(data.array()))) + .flatMap(data -> + // Delete the container. + containerURL.delete(null, null)) + ) + /* + This will synchronize all the above operations. This is strongly discouraged for use in production as + it eliminates the benefits of asynchronous IO. We use it here to enable the sample to complete and + demonstrate its effectiveness. + */ + .blockingGet(); + } + + /* + This example shows how to create a blob, take a snapshot of it, update the base blob, read from the blob snapshot, + list blobs with their snapshots, and how to delete blob snapshots. + */ + @Test + public void example_blobSnapshot() throws MalformedURLException, InvalidKeyException { + // From the Azure portal, get your Storage account's name and account key. + String accountName = getAccountName(); + String accountKey = getAccountKey(); + + // Create a BlockBlobURL object that wraps a blob's URL and a default pipeline. + URL u = new URL(String.format(Locale.ROOT, "https://%s.blob.core.windows.net/", accountName)); + ServiceURL s = new ServiceURL(u, + StorageURL.createPipeline(new SharedKeyCredentials(accountName, accountKey), new PipelineOptions())); + + ContainerURL containerURL = s.createContainerURL("myjavacontainersnapshot" + System.currentTimeMillis()); + BlockBlobURL blobURL = containerURL.createBlockBlobURL("Original.txt"); + + // Create the container. + containerURL.create(null, null, null) + .flatMap(response -> + // Create the original blob. + blobURL.upload(Flowable.just(ByteBuffer.wrap("Some text".getBytes())), "Some text".length(), + null, null, null, null)) + .flatMap(response -> + // Create a snapshot of the original blob. + blobURL.createSnapshot(null, null, null)) + .flatMap(response -> + blobURL.upload(Flowable.just(ByteBuffer.wrap("New text".getBytes())), "New text".length(), + null, null, null, null) + .flatMap(response1 -> + blobURL.download(null, null, false, null)) + .flatMap(response1 -> + // Print the received content. + FlowableUtil.collectBytesInBuffer(response1.body(null)) + .doOnSuccess(data -> + System.out.println(new String(data.array())))) + .flatMap(response1 -> { + // Show the snapshot blob via original blob URI & snapshot time. + BlockBlobURL snapshotURL = blobURL.withSnapshot(response.headers().snapshot()); + + /* + FYI: You can get the base blob URL from one of its snapshots by passing null to + withSnapshot. + */ + BlockBlobURL baseBlob = snapshotURL.withSnapshot(null); + + return snapshotURL + .download(null, null, false, null) + .flatMap(response2 -> + /* + List the blob(s) in our container, including their snapshots; since + a container may hold millions of blobs, this is done one segment at + a time. + */ + containerURL.listBlobsFlatSegment(null, + new ListBlobsOptions().withMaxResults(1), null)) + .flatMap(response2 -> + /* + The asynchronous requests require we use recursion to continue our + listing. + */ + listBlobsFlatHelper(containerURL, response2)) + .flatMap(response2 -> + blobURL.startCopyFromURL(snapshotURL.toURL(), null, + null, null, null)); + })) + .flatMap(response -> + // Delete the container. + containerURL.delete(null, null) + ) + /* + This will synchronize all the above operations. This is strongly discouraged for use in production as + it eliminates the benefits of asynchronous IO. We use it here to enable the sample to complete and + demonstrate its effectiveness. + */ + .blockingGet(); + } + + /* + This example shows how to add progress reporting to the upload and download of blobs. + */ + @Test + public void example_progressReporting() throws MalformedURLException, InvalidKeyException { + // From the Azure portal, get your Storage account's name and account key. + String accountName = getAccountName(); + String accountKey = getAccountKey(); + + // Create a BlockBlobURL object that wraps a blob's URL and a default pipeline. + URL u = new URL(String.format(Locale.ROOT, "https://%s.blob.core.windows.net/", accountName)); + ServiceURL s = new ServiceURL(u, + StorageURL.createPipeline(new SharedKeyCredentials(accountName, accountKey), new PipelineOptions())); + + ContainerURL containerURL = s.createContainerURL("myjavacontainerprogress" + System.currentTimeMillis()); + BlockBlobURL blobURL = containerURL.createBlockBlobURL("Data.bin"); + Flowable data = Flowable.just(ByteBuffer.wrap("Data".getBytes())); + + // Create the container. + containerURL.create(null, null, null) + .flatMap(response -> + /* + In the call to upload, we add progress reporting to the flowable. Here we choose to just print + out the progress. Note that for operations with the TransferManager, progress reporting need + not be pre-applied. A ProgressReceiver may simply be set on the options, and the TransferManager + will handle coordinating the reporting between parallel requests. + */ + blobURL.upload(ProgressReporter.addProgressReporting(data, System.out::println), + 4L, null, null, null, null)) + .flatMap(response -> + blobURL.download(null, null, false, null)) + .flatMapPublisher(response -> + /* + Here we add progress reporting to the download response in the same manner. + */ + ProgressReporter.addProgressReporting(response.body(null), System.out::println)) + /* + This will synchronize all the above operations. This is strongly discouraged for use in production as + it eliminates the benefits of asynchronous IO. We use it here to enable the sample to complete and + demonstrate its effectiveness. + */ + .blockingSubscribe(); + } + + // This example shows how to copy a source document on the Internet to a blob. + @Test + public void exampleBlobURL_startCopy() throws MalformedURLException, InvalidKeyException { + // From the Azure portal, get your Storage account's name and account key. + String accountName = getAccountName(); + String accountKey = getAccountKey(); + + // Create a BlockBlobURL object that wraps a blob's URL and a default pipeline. + URL u = new URL(String.format(Locale.ROOT, "https://%s.blob.core.windows.net/", accountName)); + ServiceURL s = new ServiceURL(u, + StorageURL.createPipeline(new SharedKeyCredentials(accountName, accountKey), new PipelineOptions())); + + ContainerURL containerURL = s.createContainerURL("myjavacontainercopy" + System.currentTimeMillis()); + BlockBlobURL blobURL = containerURL.createBlockBlobURL("CopiedBlob.bin"); + + // Create the container. + containerURL.create(null, null, null) + .flatMap(response -> + // Start the copy from the source url to the destination, which is the url pointed to by blobURL + blobURL.startCopyFromURL( + new URL("https://cdn2.auth0.com/docs/media/addons/azure_blob.svg"), + null, null, null, null)) + .flatMap(response -> + blobURL.getProperties(null, null)) + .flatMap(response -> + waitForCopyHelper(blobURL, response)) + .flatMap(response -> + // Delete the container we created earlier. + containerURL.delete(null, null)) + /* + This will synchronize all the above operations. This is strongly discouraged for use in production as + it eliminates the benefits of asynchronous IO. We use it here to enable the sample to complete and + demonstrate its effectiveness. + */ + .blockingGet(); + + } + + // + public Single waitForCopyHelper(BlobURL blobURL, BlobGetPropertiesResponse response) + throws InterruptedException { + System.out.println(response.headers().copyStatus()); + if (response.headers().copyStatus() == CopyStatusType.SUCCESS) { + return Single.just(response); + } + + Thread.sleep(2000); + return blobURL.getProperties(null, null) + .flatMap(response1 -> + waitForCopyHelper(blobURL, response1)); + + } + // + + /* + This example shows how to copy a large file in blocks (chunks) to a block blob and then download it from the blob + back to a file. + */ + @Test + public void exampleFileTransfer() throws IOException, InvalidKeyException { + // From the Azure portal, get your Storage account's name and account key. + String accountName = getAccountName(); + String accountKey = getAccountKey(); + + // Create a BlockBlobURL object that wraps a blob's URL and a default pipeline. + URL u = new URL(String.format(Locale.ROOT, "https://%s.blob.core.windows.net/", accountName)); + ServiceURL s = new ServiceURL(u, + StorageURL.createPipeline(new SharedKeyCredentials(accountName, accountKey), new PipelineOptions())); + ContainerURL containerURL = s.createContainerURL("myjavacontainerparallelupload" + System.currentTimeMillis()); + String filename = "BigFile.bin"; + BlockBlobURL blobURL = containerURL.createBlockBlobURL(filename); + File tempFile = File.createTempFile("BigFile", ".bin"); + tempFile.deleteOnExit(); + + // Create the container. + containerURL.create(null, null, null) + .flatMap(response -> Single.using( + () -> AsynchronousFileChannel.open(tempFile.toPath(), StandardOpenOption.WRITE), + channel -> Single.fromFuture(channel + .write(ByteBuffer.wrap("Big data".getBytes()), 0)), + AsynchronousFileChannel::close + )) + .flatMap(response -> Single.using( + () -> AsynchronousFileChannel.open(tempFile.toPath(), StandardOpenOption.READ), + channel -> TransferManager.uploadFileToBlockBlob(channel, blobURL, + BlockBlobURL.MAX_STAGE_BLOCK_BYTES, null), + AsynchronousFileChannel::close) + ) + .flatMap(response -> Single.using( + () -> AsynchronousFileChannel.open(tempFile.toPath(), StandardOpenOption.WRITE), + channel -> TransferManager.downloadBlobToFile(channel, blobURL, null, null), + AsynchronousFileChannel::close) + ) + .flatMap(response -> + // Delete the container. + containerURL.delete(null, null)) + /* + This will synchronize all the above operations. This is strongly discouraged for use in production as + it eliminates the benefits of asynchronous IO. We use it here to enable the sample to complete and + demonstrate its effectiveness. + */ + .blockingGet(); + } + + /* + This example shows how to upload an arbitrary data stream to a block blob. + */ + @Test public void exampleUploadNonReplayableFlowable() throws IOException, InvalidKeyException { + // From the Azure portal, get your Storage account's name and account key. + String accountName = getAccountName(); + String accountKey = getAccountKey(); + + // Create a BlockBlobURL object that wraps a blob's URL and a default pipeline. + URL u = new URL(String.format(Locale.ROOT, "https://%s.blob.core.windows.net/", accountName)); + ServiceURL s = new ServiceURL(u, + StorageURL.createPipeline(new SharedKeyCredentials(accountName, accountKey), new PipelineOptions())); + ContainerURL containerURL = s.createContainerURL("myjavacontainerparallelupload" + System.currentTimeMillis()); + String filename = "BigFile.bin"; + BlockBlobURL blobURL = containerURL.createBlockBlobURL(filename); + File tempFile = File.createTempFile("BigFile", ".bin"); + tempFile.deleteOnExit(); + + // Create the container. + containerURL.create(null, null, null) + .flatMap(response -> { + /* + We create a simple flowable for the purposes of demonstration, but the Flowable in question need not + produce a repeatable sequence of items. A network stream would be a common use for this api. + */ + Flowable data = Flowable.just(ByteBuffer.allocate(1)); + return TransferManager.uploadFromNonReplayableFlowable(data, blobURL, 4 * 1024 * 1024, 2, null); + }) + .flatMap(response -> + // Delete the container + containerURL.delete()) + /* + This will synchronize all the above operations. This is strongly discouraged for use in production as + it eliminates the benefits of asynchronous IO. We use it here to enable the sample to complete and + demonstrate its effectiveness. + */ + .blockingGet(); + } + + /* + This example shows how to download a large stream with intelligent retries. Specifically, if the connection fails + while reading, the stream automatically initiates a new downloadBlob call passing a range that starts from the last + byte successfully read before the failure. + */ + @Test + public void exampleReliableDownloadStream() throws IOException, InvalidKeyException { + // From the Azure portal, get your Storage account's name and account key. + String accountName = getAccountName(); + String accountKey = getAccountKey(); + + // Create a BlockBlobURL object that wraps a blob's URL and a default pipeline. + URL u = new URL(String.format(Locale.ROOT, "https://%s.blob.core.windows.net/", accountName)); + ServiceURL s = new ServiceURL(u, + StorageURL.createPipeline(new SharedKeyCredentials(accountName, accountKey), new PipelineOptions())); + ContainerURL containerURL = s.createContainerURL("myjavacontainerretrystream" + System.currentTimeMillis()); + BlockBlobURL blobURL = containerURL.createBlockBlobURL("Data.txt"); + + ReliableDownloadOptions options = new ReliableDownloadOptions(); + options.withMaxRetryRequests(5); + + File file = File.createTempFile("tempfile", "txt"); + FileOutputStream fos = new FileOutputStream(file); + fos.write(5); + file.deleteOnExit(); + + /* + Passing ReliableDownloadOptions to a call to body() will ensure the download stream is intelligently retried in case + of failures. The returned body is still a Flowable and may be used as a normal download stream. + */ + containerURL.create(null, null, null) + .flatMap(response -> + // Upload some data to a blob + Single.using(() -> AsynchronousFileChannel.open(file.toPath()), + fileChannel -> TransferManager.uploadFileToBlockBlob(fileChannel, blobURL, + BlockBlobURL.MAX_STAGE_BLOCK_BYTES, TransferManagerUploadToBlockBlobOptions.DEFAULT), + AsynchronousFileChannel::close)) + .flatMap(response -> + blobURL.download(null, null, false, null)) + .flatMapPublisher(response -> + response.body(options)) + .lastOrError() // Place holder for processing all the intermediary data. + // After the last piece of data, clean up by deleting the container and all its contents. + .flatMap(buffer -> + // Delete the container + containerURL.delete(null, null)) + /* + This will synchronize all the above operations. This is strongly discouraged for use in production as + it eliminates the benefits of asynchronous IO. We use it here to enable the sample to complete and + demonstrate its effectiveness. + */ + .blockingGet(); + + } + + /* + This example demonstrates two common patterns: 1. Creating a container if it does not exist and continuing normally + if it does already exist. 2. Deleting a container if it does exist and continuing normally if it does not. + */ + @Test + public void exampleCreateContainerIfNotExists() throws MalformedURLException, InvalidKeyException { + // From the Azure portal, get your Storage account's name and account key. + String accountName = getAccountName(); + String accountKey = getAccountKey(); + + // Create a BlockBlobURL object that wraps a blob's URL and a default pipeline. + URL u = new URL(String.format(Locale.ROOT, "https://%s.blob.core.windows.net/", accountName)); + ServiceURL s = new ServiceURL(u, + StorageURL.createPipeline(new SharedKeyCredentials(accountName, accountKey), new PipelineOptions())); + ContainerURL containerURL = s.createContainerURL("myjavacontainercreateifnotexist"); + + createContainerIfNotExists(containerURL) + .flatMap(r -> { + System.out.println("Container created: " + r.toString()); + return createContainerIfNotExists(containerURL); + }) + .flatMap(r -> { + System.out.println("Container created: " + r.toString()); + return deleteContainerIfExists(containerURL); + }) + .flatMap(r -> { + System.out.println("Container deleted: " + r.toString()); + return deleteContainerIfExists(containerURL); + }) + .doOnSuccess(r -> System.out.println("Container deleted: " + r.toString())) + /* + This will synchronize all the above operations. This is strongly discouraged for use in production as + it eliminates the benefits of asynchronous IO. We use it here to enable the sample to complete and + demonstrate its effectiveness. + */ + .blockingGet(); + } + + /* + The following example demonstrates a useful scenario in which it is desirable to receive listed elements as + individual items in an observable rather than a seqeuence of lists. + */ + @Test + public void exampleLazyEnumeration() throws MalformedURLException, InvalidKeyException { + // From the Azure portal, get your Storage account's name and account key. + String accountName = getAccountName(); + String accountKey = getAccountKey(); + + // Create a BlockBlobURL object that wraps a blob's URL and a default pipeline. + URL u = new URL(String.format(Locale.ROOT, "https://%s.blob.core.windows.net/", accountName)); + ServiceURL s = new ServiceURL(u, + StorageURL.createPipeline(new SharedKeyCredentials(accountName, accountKey), new PipelineOptions())); + ContainerURL containerURL = s.createContainerURL("myjavacontainerlistlazy" + System.currentTimeMillis()); + + containerURL.create(null, null, null).toCompletable() + .andThen(Observable.range(0, 5)) + .flatMap(integer -> { + AppendBlobURL bu = containerURL.createAppendBlobURL(integer.toString()); + return bu.create(null, null, null, null).toObservable(); + }) + .ignoreElements() + .andThen(listBlobsLazy(containerURL, null)) + .doOnNext(b -> System.out.println("Blob: " + b.name())) + .ignoreElements() + .andThen(containerURL.delete(null, null)) + /* + This will synchronize all the above operations. This is strongly discouraged for use in production as + it eliminates the benefits of asynchronous IO. We use it here to enable the sample to complete and + demonstrate its effectiveness. + */ + .blockingGet(); + } + + /* + The following is just used a place for quick code snippets that will be included in online documentation. This + is not meant to serve as a comprehensive example as the above examples are. + */ + public void apiRefs() throws IOException, InvalidKeyException { + // + // From the Azure portal, get your Storage account's name and account key. + String accountName = getAccountName(); + String accountKey = getAccountKey(); + + // Use your Storage account's name and key to create a credential object; this is used to access your account. + SharedKeyCredentials sharedKeyCredentials = new SharedKeyCredentials(accountName, accountKey); + + /* + Create a request pipeline that is used to process HTTP(S) requests and responses. It requires your account + credentials. In more advanced scenarios, you can configure telemetry, retry policies, logging, and other + options. Also you can configure multiple pipelines for different scenarios. + */ + HttpPipeline pipeline = StorageURL.createPipeline(sharedKeyCredentials, new PipelineOptions()); + + /* + From the Azure portal, get your Storage account blob service URL endpoint. + The URL typically looks like this: + */ + URL urlToBlob = new URL(String.format(Locale.ROOT, "https://%s.blob.core.windows.net", accountName)); + + // Create a ServiceURL objet that wraps the service URL and a request pipeline. + ServiceURL serviceURL = new ServiceURL(urlToBlob, pipeline); + // + + // + LoggingOptions loggingOptions = new LoggingOptions(2000); + RequestRetryOptions requestRetryOptions = new RequestRetryOptions(RetryPolicyType.EXPONENTIAL, 5, + 4, 1000L, 10000L, "secondary-host"); + PipelineOptions customOptions = new PipelineOptions() + .withLoggingOptions(loggingOptions) + .withRequestRetryOptions(requestRetryOptions); + StorageURL.createPipeline(new AnonymousCredentials(), customOptions); + // + + // + ContainerURL containerURL = serviceURL.createContainerURL("myjavacontainerbasic"); + + BlockBlobURL blobURL = containerURL.createBlockBlobURL("HelloWorld.txt"); + AppendBlobURL appendBlobURL = containerURL.createAppendBlobURL("Data.txt"); + PageBlobURL pageBlobURL = containerURL.createPageBlobURL("pageBlob"); + + String data = "Hello world!"; + + // Create the container on the service (with no metadata and no public access) + Single downloadResponse = containerURL.create(null, null, null) + .flatMap(containersCreateResponse -> + /* + Create the blob with string (plain text) content. + NOTE: It is imperative that the provided length matches the actual length exactly. + */ + blobURL.upload(Flowable.just(ByteBuffer.wrap(data.getBytes())), data.length(), + null, null, null, null)) + .flatMap(blobUploadResponse -> + // Download the blob's content. + blobURL.download(null, null, false, null)); + downloadResponse.flatMap(blobDownloadResponse -> + // Verify that the blob data round-tripped correctly. + FlowableUtil.collectBytesInBuffer(blobDownloadResponse.body(null)) + .doOnSuccess(byteBuffer -> { + if (byteBuffer.compareTo(ByteBuffer.wrap(data.getBytes())) != 0) { + throw new Exception("The downloaded data does not match the uploaded data."); + } + })); + downloadResponse.subscribe(); + // + + // + containerURL.create(null, null, null) + // An error occurred. + .onErrorResumeNext(throwable -> { + // Check if this error is from the service. + if (throwable instanceof StorageException) { + StorageException exception = (StorageException) throwable; + // StorageErrorCode defines constants corresponding to all error codes returned by the service. + if (exception.errorCode() == StorageErrorCode.CONTAINER_BEING_DELETED) { + // Log more detailed information. + System.out.println("Extended details: " + exception.message()); + + // Examine the raw response. + HttpResponse response = exception.response(); + } else if (exception.errorCode() == StorageErrorCode.CONTAINER_ALREADY_EXISTS) { + // Process the error + } + } + // We just fake a successful response to prevent the example from crashing. + return Single.just( + new ContainerCreateResponse(null, 200, null, null, null)); + }).subscribe(); + // + + // + /* + Start with a URL that identifies a snapshot of a blob in a container and includes a Shared Access Signature + (SAS). + */ + URL u = new URL("https://myaccount.blob.core.windows.net/mycontainter/ReadMe.txt?" + + "snapshot=2011-03-09T01:42:34.9360000Z" + + "&sv=2015-02-21&sr=b&st=2111-01-09T01:42:34Z&se=2222-03-09T01:42:34Z&sp=rw" + + "&sip=168.1.5.60-168.1.5.70&spr=https,http&si=myIdentifier&ss=bf&srt=s" + + "&sig=92836758923659283652983562=="); + + // You can parse this URL into its constituent parts: + BlobURLParts parts = URLParser.parse(u); + + // Now, we access the parts (this example prints them). + System.out.println(String.join("\n", + parts.host(), + parts.containerName(), + parts.blobName(), + parts.snapshot())); + System.out.println(""); + SASQueryParameters sas = parts.sasQueryParameters(); + System.out.println(String.join("\n", + sas.version(), + sas.resource(), + sas.startTime().toString(), + sas.expiryTime().toString(), + sas.permissions(), + sas.ipRange().toString(), + sas.protocol().toString(), + sas.identifier(), + sas.services(), + sas.signature())); + + // You can then change some of the fields and construct a new URL. + parts.withSasQueryParameters(null) // Remove the SAS query parameters. + .withSnapshot(null) // Remove the snapshot timestamp. + .withContainerName("othercontainer"); // Change the container name. + // In this example, we'll keep the blob name as it is. + + // Construct a new URL from the parts: + URL newURL = parts.toURL(); + System.out.println(newURL); + // NOTE: You can pass the new URL to the constructor for any XxxURL to manipulate the resource. + // + + // + // Use your Storage account's name and key to create a credential object; this is required to sign a SAS. + SharedKeyCredentials credential = new SharedKeyCredentials(getAccountName(), getAccountKey()); + + /* + Set the desired SAS signature values and sign them with the shared key credentials to get the SAS query + parameters. + */ + AccountSASSignatureValues values = new AccountSASSignatureValues(); + values.withProtocol(SASProtocol.HTTPS_ONLY) // Users MUST use HTTPS (not HTTP). + .withExpiryTime(OffsetDateTime.now().plusDays(2)); // 2 days before expiration. + + AccountSASPermission permission = new AccountSASPermission() + .withRead(true) + .withList(true); + values.withPermissions(permission.toString()); + + AccountSASService service = new AccountSASService() + .withBlob(true); + values.withServices(service.toString()); + + AccountSASResourceType resourceType = new AccountSASResourceType() + .withContainer(true) + .withObject(true); + values.withResourceTypes(resourceType.toString()); + + SASQueryParameters params = values.generateSASQueryParameters(credential); + + // Calling encode will generate the query string. + String encodedParams = params.encode(); + + String urlToSendToSomeone = String.format(Locale.ROOT, "https://%s.blob.core.windows.net?%s", + getAccountName(), encodedParams); + // At this point, you can send the urlToSendSomeone to someone via email or any other mechanism you choose. + + // *************************************************************************************************** + + // When someone receives the URL, the access the SAS-protected resource with code like this: + u = new URL(urlToSendToSomeone); + + /* + Create a ServiceURL object that wraps the serviceURL (and its SAS) and a pipeline. When using SAS URLs, + AnonymousCredentials are required. + */ + ServiceURL sURL = new ServiceURL(u, + StorageURL.createPipeline(new AnonymousCredentials(), new PipelineOptions())); + // Now, you can use this serviceURL just like any other to make requests of the resource. + // + + // + // Use your Storage account's name and key to create a credential object; this is required to sign a SAS. + credential = new SharedKeyCredentials(getAccountName(), getAccountKey()); + + // This is the name of the container and blob that we're creating a SAS to. + String containerName = "mycontainer"; // Container names require lowercase. + String blobName = "HelloWorld.txt"; // Blob names can be mixed case. + + /* + Set the desired SAS signature values and sign them with the shared key credentials to get the SAS query + parameters. + */ + ServiceSASSignatureValues blobValues = new ServiceSASSignatureValues() + .withProtocol(SASProtocol.HTTPS_ONLY) // Users MUST use HTTPS (not HTTP). + .withExpiryTime(OffsetDateTime.now().plusDays(2)) // 2 days before expiration. + .withContainerName(containerName) + .withBlobName(blobName); + + /* + To produce a container SAS (as opposed to a blob SAS), assign to Permissions using ContainerSASPermissions, and + make sure the blobName field is null (the default). + */ + BlobSASPermission blobPermission = new BlobSASPermission() + .withRead(true) + .withAdd(true) + .withWrite(true); + values.withPermissions(permission.toString()); + + SASQueryParameters serviceParams = values.generateSASQueryParameters(credential); + + // Calling encode will generate the query string. + encodedParams = serviceParams.encode(); + + urlToSendToSomeone = String.format(Locale.ROOT, "https://%s.blob.core.windows.net/%s/%s?%s", + getAccountName(), containerName, blobName, encodedParams); + // At this point, you can send the urlToSendSomeone to someone via email or any other mechanism you choose. + + // *************************************************************************************************** + + // When someone receives the URL, the access the SAS-protected resource with code like this: + u = new URL(urlToSendToSomeone); + + /* + Create a BlobURL object that wraps the blobURL (and its SAS) and a pipeline. When using SAS URLs, + AnonymousCredentials are required. + */ + BlobURL bURL = new BlobURL(u, + StorageURL.createPipeline(new AnonymousCredentials(), new PipelineOptions())); + // Now, you can use this blobURL just like any other to make requests of the resource. + // + + // + BlockBlobURL blockBlobURL = containerURL.createBlockBlobURL("Data.txt"); + + String[] blockData = {"Michael", "Gabriel", "Raphael", "John"}; + String initialBlockID = Base64.getEncoder().encodeToString( + UUID.randomUUID().toString().getBytes()); + + // Create the container. We convert to an Observable to be able to work with the block list effectively. + containerURL.create(null, null, null) + .flatMapObservable(response -> + // Create an Observable that will yield each of the Strings one at a time. + Observable.fromIterable(Arrays.asList(blockData)) + ) + // Items emitted by an Observable that results from a concatMap call will preserve the original order. + .concatMapEager(block -> { + /* + Generate a base64 encoded blockID. Note that all blockIDs must be the same length. It is generally + considered best practice to use UUIDs for the blockID. + */ + String blockId = Base64.getEncoder().encodeToString( + UUID.randomUUID().toString().getBytes()); + + /* + Upload a block to this blob specifying the BlockID and its content (up to 100MB); this block is + uncommitted. + NOTE: It is imperative that the provided length match the actual length of the data exactly. + */ + return blockBlobURL.stageBlock(blockId, Flowable.just(ByteBuffer.wrap(block.getBytes())), + block.length(), null, null) + /* + We do not care for any data on the response object, but we do want to keep track of the + ID. + */ + .map(x -> blockId).toObservable(); + }) + // Gather all of the IDs emitted by the previous observable into a single list. + .collectInto(new ArrayList<>(blockData.length), (BiConsumer, String>) ArrayList::add) + .flatMap(idList -> { + /* + By this point, all the blocks are upload and we have an ordered list of their IDs. Here, we + atomically commit the whole list. + NOTE: The block list order need not match the order in which the blocks were uploaded. The order + of IDs in the commitBlockList call will determine the structure of the blob. + */ + idList.add(0, initialBlockID); + return blockBlobURL.commitBlockList(idList, null, null, null, null); + }) + .flatMap(response -> + /* + For the blob, show each block (ID and size) that is a committed part of it. It is also possible + to include blocks that have been staged but not committed. + */ + blockBlobURL.getBlockList(BlockListType.ALL, null, null)) + .subscribe(); + // + + // + String blockID = Base64.getEncoder().encodeToString(UUID.randomUUID().toString().getBytes()); + blockBlobURL.stageBlockFromURL(blockID, blobURL.toURL(), null, null, + null, null) + .flatMap(response -> + blockBlobURL.commitBlockList(Arrays.asList(blockID), null, null, + null, null)) + .subscribe(); + // + + // + + // Create the container. + containerURL.create(null, null, null) + .flatMap(response -> + // Create the append blob. This creates a zero-length blob that we can now append to. + appendBlobURL.create(null, null, null, null)) + .flatMapObservable(response -> + // This range will act as our for loop to create 5 blocks + Observable.range(0, 5)) + .concatMapEager(i -> { + String text = String.format(Locale.ROOT, "Appending block #%d\n", i); + return appendBlobURL.appendBlock(Flowable.just(ByteBuffer.wrap(text.getBytes())), text.length(), + null, null).toObservable(); + }).subscribe(); + // + + + // + // Create the container. + containerURL.create(null, null, null) + .flatMap(response -> + // Create the original blob. + blobURL.upload(Flowable.just(ByteBuffer.wrap("Some text".getBytes())), "Some text".length(), + null, null, null, null)) + .flatMap(response -> + // Create a snapshot of the original blob. + blobURL.createSnapshot(null, null, null)) + .flatMap(response -> { + BlobURL snapshotURL = blobURL.withSnapshot(response.headers().snapshot()); + return snapshotURL.getProperties(null, null); + }).subscribe(); + // + + // + // Create the container. + containerURL.create(null, null, null) + .flatMap(response -> + // Start the copy from the source url to the destination, which is the url pointed to by blobURL + blobURL.startCopyFromURL( + new URL("https://cdn2.auth0.com/docs/media/addons/azure_blob.svg"), + null, null, null, null)) + .flatMap(response -> + blobURL.getProperties(null, null)) + .flatMap(response -> + waitForCopyHelper(blobURL, response)) + .subscribe(); + // + + // + containerURL.create(null, null, null) + .flatMap(response -> + // Start the copy from the source url to the destination, which is the url pointed to by blobURL + blobURL.startCopyFromURL( + new URL("https://cdn2.auth0.com/docs/media/addons/azure_blob.svg"), + null, null, null, null)) + .flatMap(response -> + blobURL.getProperties(null, null)) + .flatMap(response -> + blobURL.abortCopyFromURL(response.headers().copyId(), null, null)) + .subscribe(); + // + + // + // Create the container. + containerURL.create() + .flatMap(response -> + /* + Copy from the source url to the destination, which is the url pointed to by blobURL. Note that + the service will not return a response until the copy is complete, hence "sync" copy. + */ + blobURL.syncCopyFromURL(new URL("https://cdn2.auth0.com/docs/media/addons/azure_blob.svg"))) + .subscribe(); + // + + // + blobURL.delete(null, null, null) + .subscribe(); + // + + // + // This sample assumes that the account has a delete retention policy set. + blobURL.delete(null, null, null) + .flatMap(response -> + blobURL.undelete(null)) + .subscribe(); + // + + // + // BlockBlobs and PageBlobs have different sets of tiers. + blockBlobURL.setTier(AccessTier.HOT, null, null) + .subscribe(); + pageBlobURL.setTier(AccessTier.P6, null, null) + .subscribe(); + // + + // + containerURL.create(null, null, null) + .flatMap(containersCreateResponse -> + /* + Create the blob with string (plain text) content. + NOTE: It is imperative that the provided length matches the actual length exactly. + */ + blobURL.upload(Flowable.just(ByteBuffer.wrap(data.getBytes())), data.length(), + null, null, null, null)) + .flatMap(response -> + blobURL.getProperties(null, null)) + .flatMap(response -> { + Metadata newMetadata = new Metadata(response.headers().metadata()); + // If one of the HTTP properties is set, all must be set again or they will be cleared. + BlobHTTPHeaders newHeaders = new BlobHTTPHeaders() + .withBlobCacheControl(response.headers().cacheControl()) + .withBlobContentDisposition(response.headers().contentDisposition()) + .withBlobContentEncoding(response.headers().contentEncoding()) + .withBlobContentLanguage("new language") + .withBlobContentMD5(response.headers().contentMD5()) + .withBlobContentType("new content"); + return blobURL.setMetadata(newMetadata, null, null) + .flatMap(nextResponse -> blobURL.setHTTPHeaders(newHeaders, null, null)); + }) + .subscribe(); + // + + // + containerURL.create(null, null, null) + .flatMap(response -> + containerURL.getProperties(null, null)) + .flatMap(response -> { + Metadata metadata = new Metadata(); + metadata.put("key", "value"); + return containerURL.setMetadata(metadata, null, null); + }) + .flatMap(response -> + containerURL.delete(null, null)) + .subscribe(); + // + + // + containerURL.create(null, null, null) + .flatMap(response -> { + /* + Create a SignedIdentifier that gives read permissions and expires one day for now. This means that + any SAS associated with this policy has these properties. + */ + BlobSASPermission perms = new BlobSASPermission() + .withRead(true); + SignedIdentifier id = new SignedIdentifier().withId("policy1").withAccessPolicy( + new AccessPolicy().withPermission(perms.toString()).withExpiry(OffsetDateTime.now() + .plusDays(1))); + // Give public access to the blobs in this container and apply the SignedIdentifier. + return containerURL.setAccessPolicy(PublicAccessType.BLOB, Arrays.asList(id), null, null); + }) + .subscribe(); + // + + // + containerURL.listBlobsFlatSegment(null, new ListBlobsOptions().withMaxResults(1), null) + .flatMap(containersListBlobFlatSegmentResponse -> + // The asynchronous requests require we use recursion to continue our listing. + listBlobsFlatHelper(containerURL, containersListBlobFlatSegmentResponse)) + .subscribe(); + // + + // + containerURL.listBlobsHierarchySegment(null, "my_delimiter", new ListBlobsOptions().withMaxResults(1), null) + .flatMap(containersListBlobHierarchySegmentResponse -> + // The asynchronous requests require we use recursion to continue our listing. + listBlobsHierarchyHelper(containerURL, containersListBlobHierarchySegmentResponse)) + .subscribe(); + // + + // + containerURL.create(null, null, null) + .flatMap(response -> + // Create the page blob with 4 512-byte pages. + pageBlobURL.create(4 * PageBlobURL.PAGE_BYTES, null, null, + null, null, null)) + .flatMap(response -> { + /* + Upload data to a page. + NOTE: The page range must start on a multiple of the page size and end on + (multiple of page size) - 1. + */ + byte[] pageData = new byte[PageBlobURL.PAGE_BYTES]; + for (int i = 0; i < PageBlobURL.PAGE_BYTES; i++) { + pageData[i] = 'a'; + } + return pageBlobURL.uploadPages(new PageRange().withStart(0).withEnd(PageBlobURL.PAGE_BYTES - 1), + Flowable.just(ByteBuffer.wrap(pageData)), null, null); + }) + .flatMap(response -> + // Get the page ranges which have valid data. + pageBlobURL.getPageRanges(null, null, null)) + .flatMap(response -> { + // Print the pages that are valid. + for (PageRange range : response.body().pageRange()) { + System.out.println(String.format(Locale.ROOT, "Start=%d, End=%d\n", range.start(), + range.end())); + } + + // Clear and invalidate the first range. + return pageBlobURL.clearPages(new PageRange().withStart(0).withEnd(PageBlobURL.PAGE_BYTES - 1), + null, null); + }) + .flatMap(response -> + pageBlobURL.resize(1024, null, null)) + .flatMap(rsponse -> + pageBlobURL.updateSequenceNumber(SequenceNumberActionType.INCREMENT, null, + null, null)) + .subscribe(); + // + + // + pageBlobURL.create(4 * PageBlobURL.PAGE_BYTES, null, null, + null, null, null) + .flatMap(response -> + pageBlobURL.createSnapshot(null, null, null)) + .flatMap(response -> { + /* + Upload data to a page. + NOTE: The page range must start on a multiple of the page size and end on + (multiple of page size) - 1. + */ + byte[] pageData = new byte[PageBlobURL.PAGE_BYTES]; + for (int i = 0; i < PageBlobURL.PAGE_BYTES; i++) { + pageData[i] = 'a'; + } + return pageBlobURL.uploadPages(new PageRange().withStart(0).withEnd(PageBlobURL.PAGE_BYTES - 1), + Flowable.just(ByteBuffer.wrap(pageData)), null, null) + // We still need access to the snapshotResponse. + .flatMap(uploadResponse -> + pageBlobURL.getPageRangesDiff(null, response.headers().snapshot(), + null, null)); + }); + // + + // + PageBlobURL incrementalCopy = containerURL.createPageBlobURL("incremental"); + pageBlobURL.createSnapshot(null, null, null) + .flatMap(response -> + incrementalCopy.copyIncremental(pageBlobURL.toURL(), response.headers().snapshot(), null, null)) + .flatMap(response -> { + byte[] pageData = new byte[PageBlobURL.PAGE_BYTES]; + for (int i = 0; i < PageBlobURL.PAGE_BYTES; i++) { + pageData[i] = 'a'; + } + return pageBlobURL.uploadPages(new PageRange().withStart(0).withEnd(PageBlobURL.PAGE_BYTES - 1), + Flowable.just(ByteBuffer.wrap(pageData)), null, null); + }) + .flatMap(response -> + pageBlobURL.createSnapshot(null, null, null)) + .flatMap(response -> + incrementalCopy.copyIncremental(pageBlobURL.toURL(), response.headers().snapshot(), null, null)) + .subscribe(); + /* + The result is a new blob with two new snapshots that correspond to the source blob snapshots but with different + IDs. These snapshots may be read from like normal snapshots. + */ + // + + // + blobURL.acquireLease(null, 20, null, null) + .flatMap(response -> + blobURL.changeLease(response.headers().leaseId(), "proposed", null, null)) + .flatMap(response -> + blobURL.renewLease(response.headers().leaseId(), null, null)) + .flatMap(response -> + blobURL.breakLease(null, null, null) + .flatMap(breakResponse -> + blobURL.releaseLease(response.headers().leaseId(), null, null))) + .subscribe(); + // + + // + containerURL.acquireLease(null, 20, null, null) + .flatMap(response -> + containerURL.changeLease(response.headers().leaseId(), "proposed", + null, null)) + .flatMap(response -> + containerURL.renewLease(response.headers().leaseId(), null, null)) + .flatMap(response -> + containerURL.breakLease(null, null, null) + .flatMap(breakResponse -> + containerURL.releaseLease(response.headers().leaseId(), null, null))) + .subscribe(); + // + + ByteBuffer largeData = ByteBuffer.wrap("LargeData".getBytes()); + + ByteBuffer largeBuffer = ByteBuffer.allocate(10 * 1024); + + File tempFile = File.createTempFile("BigFile", ".bin"); + tempFile.deleteOnExit(); + // + Single.using( + () -> AsynchronousFileChannel.open(tempFile.toPath(), StandardOpenOption.WRITE), + channel -> Single.fromFuture(channel + .write(ByteBuffer.wrap("Big data".getBytes()), 0)), AsynchronousFileChannel::close) + .flatMap(response -> Single.using( + () -> AsynchronousFileChannel.open(tempFile.toPath(), StandardOpenOption.READ), + channel -> TransferManager.uploadFileToBlockBlob(channel, blobURL, + BlockBlobURL.MAX_STAGE_BLOCK_BYTES, null), + AsynchronousFileChannel::close) + ) + .flatMap(response -> Single.using( + () -> AsynchronousFileChannel.open(tempFile.toPath(), StandardOpenOption.WRITE), + channel -> TransferManager.downloadBlobToFile(channel, blobURL, null, null), + AsynchronousFileChannel::close) + ) + .flatMap(response -> + // Delete the container. + containerURL.delete(null, null)); + // + + // + /* + We create a simple flowable for the purposes of demonstration, but the Flowable in question need not + produce a repeatable sequence of items. A network stream would be a common use for this api. + */ + Flowable nonReplayableFlowable = Flowable.just(ByteBuffer.allocate(1)); + TransferManager.uploadFromNonReplayableFlowable(nonReplayableFlowable, blobURL, 4 * 1024 * 1024, 2, null); + // + + // + serviceURL.getProperties(null) + .flatMap(response -> { + StorageServiceProperties newProps = response.body(); + + // Remove the delete retention policy to disable soft delete. + newProps.withDeleteRetentionPolicy(null); + + return serviceURL.setProperties(newProps, null); + }) + .subscribe(); + // + + // + serviceURL.getStatistics(null) + .subscribe(); + // + + // + serviceURL.listContainersSegment(null, ListContainersOptions.DEFAULT, null) + .flatMap(listContainersSegmentResponse -> + // The asynchronous requests require we use recursion to continue our listing. + listContainersHelper(serviceURL, listContainersSegmentResponse)) + .subscribe(); + // + + // + serviceURL.getAccountInfo(null) + .subscribe(); + containerURL.getAccountInfo(null) + .subscribe(); + blobURL.getAccountInfo(null) + .subscribe(); + // + + // + Flowable flowableData = Flowable.just(ByteBuffer.wrap("Data".getBytes())); + flowableData = ProgressReporter.addProgressReporting(flowableData, System.out::println); + // + } +} + diff --git a/storage/data-plane/src/test/java/com/microsoft/azure/storage/ServiceAPITest.groovy b/storage/data-plane/src/test/java/com/microsoft/azure/storage/ServiceAPITest.groovy new file mode 100644 index 0000000000000..daa08c770adb9 --- /dev/null +++ b/storage/data-plane/src/test/java/com/microsoft/azure/storage/ServiceAPITest.groovy @@ -0,0 +1,406 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.microsoft.azure.storage + +import com.microsoft.azure.storage.blob.* +import com.microsoft.azure.storage.blob.models.* +import com.microsoft.rest.v2.http.HttpPipeline + +class ServiceAPITest extends APISpec { + def setup() { + RetentionPolicy disabled = new RetentionPolicy().withEnabled(false) + primaryServiceURL.setProperties(new StorageServiceProperties() + .withStaticWebsite(new StaticWebsite().withEnabled(false)) + .withDeleteRetentionPolicy(disabled) + .withCors(null) + .withHourMetrics(new Metrics().withVersion("1.0").withEnabled(false) + .withRetentionPolicy(disabled)) + .withMinuteMetrics(new Metrics().withVersion("1.0").withEnabled(false) + .withRetentionPolicy(disabled)) + .withLogging(new Logging().withVersion("1.0") + .withRetentionPolicy(disabled)) + .withDefaultServiceVersion("2018-03-28"), null).blockingGet() + } + + def cleanup() { + RetentionPolicy disabled = new RetentionPolicy().withEnabled(false) + primaryServiceURL.setProperties(new StorageServiceProperties() + .withStaticWebsite(new StaticWebsite().withEnabled(false)) + .withDeleteRetentionPolicy(disabled) + .withCors(null) + .withHourMetrics(new Metrics().withVersion("1.0").withEnabled(false) + .withRetentionPolicy(disabled)) + .withMinuteMetrics(new Metrics().withVersion("1.0").withEnabled(false) + .withRetentionPolicy(disabled)) + .withLogging(new Logging().withVersion("1.0") + .withRetentionPolicy(disabled)) + .withDefaultServiceVersion("2018-03-28"), null).blockingGet() + } + + def "List containers"() { + when: + ServiceListContainersSegmentResponse response = + primaryServiceURL.listContainersSegment(null, new ListContainersOptions().withPrefix(containerPrefix), + null).blockingGet() + + then: + for (ContainerItem c : response.body().containerItems()) { + assert c.name().startsWith(containerPrefix) + assert c.properties().lastModified() != null + assert c.properties().etag() != null + assert c.properties().leaseStatus() != null + assert c.properties().leaseState() != null + assert c.properties().leaseDuration() == null + assert c.properties().publicAccess() == null + assert !c.properties().hasLegalHold() + assert !c.properties().hasImmutabilityPolicy() + } + response.headers().requestId() != null + response.headers().version() != null + } + + def "List containers min"() { + expect: + primaryServiceURL.listContainersSegment(null, null).blockingGet().statusCode() == 200 + } + + def "List containers marker"() { + setup: + for (int i = 0; i < 10; i++) { + ContainerURL cu = primaryServiceURL.createContainerURL(generateContainerName()) + cu.create(null, null, null).blockingGet() + } + + ServiceListContainersSegmentResponse response = + primaryServiceURL.listContainersSegment(null, + new ListContainersOptions().withMaxResults(5), null).blockingGet() + String marker = response.body().nextMarker() + String firstContainerName = response.body().containerItems().get(0).name() + response = primaryServiceURL.listContainersSegment(marker, + new ListContainersOptions().withMaxResults(5), null).blockingGet() + + expect: + // Assert that the second segment is indeed after the first alphabetically + firstContainerName < response.body().containerItems().get(0).name() + } + + def "List containers details"() { + setup: + Metadata metadata = new Metadata() + metadata.put("foo", "bar") + cu = primaryServiceURL.createContainerURL("aaa" + generateContainerName()) + cu.create(metadata, null, null).blockingGet() + + expect: + primaryServiceURL.listContainersSegment(null, + new ListContainersOptions().withDetails(new ContainerListingDetails().withMetadata(true)) + .withPrefix("aaa" + containerPrefix), null).blockingGet().body().containerItems() + .get(0).metadata() == metadata + // Container with prefix "aaa" will not be cleaned up by normal test cleanup. + cu.delete(null, null).blockingGet().statusCode() == 202 + } + + def "List containers maxResults"() { + setup: + for (int i = 0; i < 11; i++) { + primaryServiceURL.createContainerURL(generateContainerName()).create(null, null, null) + .blockingGet() + } + expect: + primaryServiceURL.listContainersSegment(null, + new ListContainersOptions().withMaxResults(10), null) + .blockingGet().body().containerItems().size() == 10 + } + + def "List containers error"() { + when: + primaryServiceURL.listContainersSegment("garbage", null, null).blockingGet() + + then: + thrown(StorageException) + } + + def "List containers context"() { + setup: + def pipeline = + HttpPipeline.build(getStubFactory(getContextStubPolicy(200, ServiceListContainersSegmentHeaders))) + + def su = primaryServiceURL.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + su.listContainersSegment(null, null, defaultContext) + + then: + notThrown(RuntimeException) + } + + def validatePropsSet(StorageServiceProperties sent, StorageServiceProperties received) { + return received.logging().read() == sent.logging().read() && + received.logging().delete() == sent.logging().delete() && + received.logging().write() == sent.logging().write() && + received.logging().version() == sent.logging().version() && + received.logging().retentionPolicy().days() == sent.logging().retentionPolicy().days() && + received.logging().retentionPolicy().enabled() == sent.logging().retentionPolicy().enabled() && + + received.cors().size() == sent.cors().size() && + received.cors().get(0).allowedMethods() == sent.cors().get(0).allowedMethods() && + received.cors().get(0).allowedHeaders() == sent.cors().get(0).allowedHeaders() && + received.cors().get(0).allowedOrigins() == sent.cors().get(0).allowedOrigins() && + received.cors().get(0).exposedHeaders() == sent.cors().get(0).exposedHeaders() && + received.cors().get(0).maxAgeInSeconds() == sent.cors().get(0).maxAgeInSeconds() && + + received.defaultServiceVersion() == sent.defaultServiceVersion() && + + received.hourMetrics().enabled() == sent.hourMetrics().enabled() && + received.hourMetrics().includeAPIs() == sent.hourMetrics().includeAPIs() && + received.hourMetrics().retentionPolicy().enabled() == sent.hourMetrics().retentionPolicy().enabled() && + received.hourMetrics().retentionPolicy().days() == sent.hourMetrics().retentionPolicy().days() && + received.hourMetrics().version() == sent.hourMetrics().version() && + + received.minuteMetrics().enabled() == sent.minuteMetrics().enabled() && + received.minuteMetrics().includeAPIs() == sent.minuteMetrics().includeAPIs() && + received.minuteMetrics().retentionPolicy().enabled() == sent.minuteMetrics().retentionPolicy().enabled() && + received.minuteMetrics().retentionPolicy().days() == sent.minuteMetrics().retentionPolicy().days() && + received.minuteMetrics().version() == sent.minuteMetrics().version() && + + received.deleteRetentionPolicy().enabled() == sent.deleteRetentionPolicy().enabled() && + received.deleteRetentionPolicy().days() == sent.deleteRetentionPolicy().days() && + + received.staticWebsite().enabled() == sent.staticWebsite().enabled() && + received.staticWebsite().indexDocument() == sent.staticWebsite().indexDocument() && + received.staticWebsite().errorDocument404Path() == sent.staticWebsite().errorDocument404Path() + } + + def "Set get properties"() { + when: + RetentionPolicy retentionPolicy = new RetentionPolicy().withDays(5).withEnabled(true) + Logging logging = new Logging().withRead(true).withVersion("1.0") + .withRetentionPolicy(retentionPolicy) + ArrayList corsRules = new ArrayList<>() + corsRules.add(new CorsRule().withAllowedMethods("GET,PUT,HEAD") + .withAllowedOrigins("*") + .withAllowedHeaders("x-ms-version") + .withExposedHeaders("x-ms-client-request-id") + .withMaxAgeInSeconds(10)) + String defaultServiceVersion = "2016-05-31" + Metrics hourMetrics = new Metrics().withEnabled(true).withVersion("1.0") + .withRetentionPolicy(retentionPolicy).withIncludeAPIs(true) + Metrics minuteMetrics = new Metrics().withEnabled(true).withVersion("1.0") + .withRetentionPolicy(retentionPolicy).withIncludeAPIs(true) + StaticWebsite website = new StaticWebsite().withEnabled(true) + .withIndexDocument("myIndex.html") + .withErrorDocument404Path("custom/error/path.html") + + StorageServiceProperties sentProperties = new StorageServiceProperties() + .withLogging(logging).withCors(corsRules).withDefaultServiceVersion(defaultServiceVersion) + .withMinuteMetrics(minuteMetrics).withHourMetrics(hourMetrics) + .withDeleteRetentionPolicy(retentionPolicy) + .withStaticWebsite(website) + + ServiceSetPropertiesHeaders headers = primaryServiceURL.setProperties(sentProperties, null) + .blockingGet().headers() + + // Service properties may take up to 30s to take effect. If they weren't already in place, wait. + sleep(30 * 1000) + + StorageServiceProperties receivedProperties = primaryServiceURL.getProperties(null) + .blockingGet().body() + + then: + headers.requestId() != null + headers.version() != null + validatePropsSet(sentProperties, receivedProperties) + } + + // In java, we don't have support from the validator for checking the bounds on days. The service will catch these. + + def "Set props min"() { + setup: + RetentionPolicy retentionPolicy = new RetentionPolicy().withDays(5).withEnabled(true) + Logging logging = new Logging().withRead(true).withVersion("1.0") + .withRetentionPolicy(retentionPolicy) + ArrayList corsRules = new ArrayList<>() + corsRules.add(new CorsRule().withAllowedMethods("GET,PUT,HEAD") + .withAllowedOrigins("*") + .withAllowedHeaders("x-ms-version") + .withExposedHeaders("x-ms-client-request-id") + .withMaxAgeInSeconds(10)) + String defaultServiceVersion = "2016-05-31" + Metrics hourMetrics = new Metrics().withEnabled(true).withVersion("1.0") + .withRetentionPolicy(retentionPolicy).withIncludeAPIs(true) + Metrics minuteMetrics = new Metrics().withEnabled(true).withVersion("1.0") + .withRetentionPolicy(retentionPolicy).withIncludeAPIs(true) + StaticWebsite website = new StaticWebsite().withEnabled(true) + .withIndexDocument("myIndex.html") + .withErrorDocument404Path("custom/error/path.html") + + StorageServiceProperties sentProperties = new StorageServiceProperties() + .withLogging(logging).withCors(corsRules).withDefaultServiceVersion(defaultServiceVersion) + .withMinuteMetrics(minuteMetrics).withHourMetrics(hourMetrics) + .withDeleteRetentionPolicy(retentionPolicy) + .withStaticWebsite(website) + + expect: + primaryServiceURL.setProperties(sentProperties).blockingGet().statusCode() == 202 + } + + def "Set props error"() { + when: + new ServiceURL(new URL("https://error.blob.core.windows.net"), + StorageURL.createPipeline(primaryCreds, new PipelineOptions())) + .setProperties(new StorageServiceProperties(), null).blockingGet() + + then: + thrown(StorageException) + } + + def "Set props context"() { + setup: + def pipeline = + HttpPipeline.build(getStubFactory(getContextStubPolicy(200, ServiceSetPropertiesHeaders))) + + def su = primaryServiceURL.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + su.setProperties(new StorageServiceProperties(), defaultContext) + + then: + notThrown(RuntimeException) + } + + def "Get props min"() { + expect: + primaryServiceURL.getProperties().blockingGet().statusCode() == 200 + } + + def "Get props error"() { + when: + new ServiceURL(new URL("https://error.blob.core.windows.net"), + StorageURL.createPipeline(primaryCreds, new PipelineOptions())).getProperties(null).blockingGet() + + then: + thrown(StorageException) + } + + def "Get props context"() { + setup: + def pipeline = + HttpPipeline.build(getStubFactory(getContextStubPolicy(200, ServiceGetPropertiesHeaders))) + + def su = primaryServiceURL.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + su.getProperties(defaultContext) + + then: + notThrown(RuntimeException) + } + + def "Get stats"() { + setup: + BlobURLParts parts = URLParser.parse(primaryServiceURL.toURL()) + parts.withHost(primaryCreds.getAccountName() + "-secondary.blob.core.windows.net") + ServiceURL secondary = new ServiceURL(parts.toURL(), + StorageURL.createPipeline(primaryCreds, new PipelineOptions())) + ServiceGetStatisticsResponse response = secondary.getStatistics(null).blockingGet() + + expect: + response.headers().version() != null + response.headers().requestId() != null + response.headers().date() != null + response.body().geoReplication().status() != null + response.body().geoReplication().lastSyncTime() != null + } + + def "Get stats min"() { + setup: + BlobURLParts parts = URLParser.parse(primaryServiceURL.toURL()) + parts.withHost(primaryCreds.getAccountName() + "-secondary.blob.core.windows.net") + ServiceURL secondary = new ServiceURL(parts.toURL(), + StorageURL.createPipeline(primaryCreds, new PipelineOptions())) + + expect: + secondary.getStatistics(null).blockingGet().statusCode() == 200 + } + + def "Get stats error"() { + when: + primaryServiceURL.getStatistics(null).blockingGet() + + then: + thrown(StorageException) + } + + def "Get stats context"() { + setup: + def pipeline = + HttpPipeline.build(getStubFactory(getContextStubPolicy(200, ServiceGetStatisticsHeaders))) + + def su = primaryServiceURL.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + su.getStatistics(defaultContext) + + then: + notThrown(RuntimeException) + } + + def "Get account info"() { + when: + def response = primaryServiceURL.getAccountInfo(null).blockingGet() + + then: + response.headers().date() != null + response.headers().version() != null + response.headers().requestId() != null + response.headers().accountKind() != null + response.headers().skuName() != null + } + + def "Get account info min"() { + expect: + primaryServiceURL.getAccountInfo().blockingGet().statusCode() == 200 + } + + def "Get account info error"() { + when: + ServiceURL serviceURL = new ServiceURL(primaryServiceURL.toURL(), + StorageURL.createPipeline(new AnonymousCredentials(), new PipelineOptions())) + serviceURL.getAccountInfo(null).blockingGet() + + then: + thrown(StorageException) + } + + def "Get account info context"() { + setup: + def pipeline = + HttpPipeline.build(getStubFactory(getContextStubPolicy(200, ServiceGetAccountInfoHeaders))) + + def su = primaryServiceURL.withPipeline(pipeline) + + when: + // No service call is made. Just satisfy the parameters. + su.getAccountInfo(defaultContext) + + then: + notThrown(RuntimeException) + } +} diff --git a/storage/data-plane/src/test/java/com/microsoft/azure/storage/TransferManagerTest.groovy b/storage/data-plane/src/test/java/com/microsoft/azure/storage/TransferManagerTest.groovy new file mode 100644 index 0000000000000..8dad8636c1d66 --- /dev/null +++ b/storage/data-plane/src/test/java/com/microsoft/azure/storage/TransferManagerTest.groovy @@ -0,0 +1,1009 @@ +package com.microsoft.azure.storage + +import com.microsoft.azure.storage.blob.* +import com.microsoft.azure.storage.blob.models.* +import com.microsoft.rest.v2.http.HttpPipeline +import com.microsoft.rest.v2.http.HttpRequest +import com.microsoft.rest.v2.http.HttpResponse +import com.microsoft.rest.v2.policy.RequestPolicy +import com.microsoft.rest.v2.policy.RequestPolicyFactory +import com.microsoft.rest.v2.util.FlowableUtil +import io.reactivex.Flowable +import io.reactivex.Single +import io.reactivex.functions.Consumer +import org.reactivestreams.Publisher +import org.reactivestreams.Subscriber +import spock.lang.Unroll + +import java.nio.ByteBuffer +import java.nio.channels.AsynchronousFileChannel +import java.nio.file.StandardOpenOption +import java.security.MessageDigest + +class TransferManagerTest extends APISpec { + BlockBlobURL bu + + def setup() { + bu = cu.createBlockBlobURL(generateBlobName()) + + /* + We just print something out in between each test to keep Travis from being idle for too long. The tests seem + to run slower on Travis, and without this keep-alive, it may exceed the 10 minutes of no output and error the + CI build. + */ + System.out.println("Starting test") + } + + @Unroll + def "Upload file"() { + setup: + def channel = AsynchronousFileChannel.open(file.toPath()) + + when: + // Block length will be ignored for single shot. + CommonRestResponse response = TransferManager.uploadFileToBlockBlob(channel, + bu, (int) (BlockBlobURL.MAX_STAGE_BLOCK_BYTES / 10), + new TransferManagerUploadToBlockBlobOptions(null, null, null, null, 20)).blockingGet() + + then: + responseType.isInstance(response.response()) // Ensure we did the correct type of operation. + validateBasicHeaders(response) + compareDataToFile(bu.download(null, null, false, null).blockingGet().body(null), file) + + cleanup: + channel.close() + + where: + file || responseType + getRandomFile(10) || BlockBlobUploadResponse // Single shot + getRandomFile(BlockBlobURL.MAX_UPLOAD_BLOB_BYTES + 1) || BlockBlobCommitBlockListResponse // Multi part + } + + def compareDataToFile(Flowable data, File file) { + FileInputStream fis = new FileInputStream(file) + + for (ByteBuffer received : data.blockingIterable()) { + byte[] readBuffer = new byte[received.remaining()] + fis.read(readBuffer) + for (int i = 0; i < received.remaining(); i++) { + if (readBuffer[i] != received.get(i)) { + return false + } + } + } + + fis.close() + return true + } + + def "Upload file illegal arguments null"() { + when: + TransferManager.uploadFileToBlockBlob(file, url, 5, null).blockingGet() + + then: + thrown(IllegalArgumentException) + + where: + file | url + null | new BlockBlobURL(new URL("http://account.com"), StorageURL.createPipeline(primaryCreds, new PipelineOptions())) + AsynchronousFileChannel.open(getRandomFile(10).toPath()) | null + } + + @Unroll + def "Upload file illegal arguments blocks"() { + setup: + def channel = AsynchronousFileChannel.open(getRandomFile(fileSize).toPath()) + + when: + TransferManager.uploadFileToBlockBlob(channel, bu, + blockLength, null).blockingGet() + + then: + thrown(IllegalArgumentException) + + cleanup: + channel.close() + + where: + blockLength | fileSize + -1 | 10 // -1 is invalid. + BlockBlobURL.MAX_STAGE_BLOCK_BYTES + 1 | BlockBlobURL.MAX_STAGE_BLOCK_BYTES + 10 // Block size too big. + 10 | BlockBlobURL.MAX_UPLOAD_BLOB_BYTES + 10 // Too many blocks. + } + + @Unroll + def "Upload file headers"() { + setup: + // We have to use the defaultData here so we can calculate the MD5 on the uploadBlob case. + File file = File.createTempFile("testUpload", ".txt") + file.deleteOnExit() + if (fileSize == "small") { + FileOutputStream fos = new FileOutputStream(file) + fos.write(defaultData.array()) + fos.close() + } else { + file = getRandomFile(BlockBlobURL.MAX_UPLOAD_BLOB_BYTES + 10) + } + + def channel = AsynchronousFileChannel.open(file.toPath()) + + when: + TransferManager.uploadFileToBlockBlob(channel, bu, BlockBlobURL.MAX_STAGE_BLOCK_BYTES, + new TransferManagerUploadToBlockBlobOptions(null, new BlobHTTPHeaders() + .withBlobCacheControl(cacheControl).withBlobContentDisposition(contentDisposition) + .withBlobContentEncoding(contentEncoding).withBlobContentLanguage(contentLanguage) + .withBlobContentMD5(contentMD5).withBlobContentType(contentType), null, null, null)) + .blockingGet() + + BlobGetPropertiesResponse response = bu.getProperties(null, null).blockingGet() + + then: + validateBlobHeaders(response.headers(), cacheControl, contentDisposition, contentEncoding, contentLanguage, + fileSize == "small" ? MessageDigest.getInstance("MD5").digest(defaultData.array()) : contentMD5, + contentType == null ? "application/octet-stream" : contentType) + // For uploading a block blob single-shot, the service will auto calculate an MD5 hash if not present. + // HTTP default content type is application/octet-stream. + + cleanup: + channel.close() + + where: + // The MD5 is simply set on the blob for commitBlockList, not validated. + fileSize | cacheControl | contentDisposition | contentEncoding | contentLanguage | contentMD5 | contentType + "small" | null | null | null | null | null | null + "small" | "control" | "disposition" | "encoding" | "language" | MessageDigest.getInstance("MD5").digest(defaultData.array()) | "type" + "large" | null | null | null | null | null | null + "large" | "control" | "disposition" | "encoding" | "language" | MessageDigest.getInstance("MD5").digest(defaultData.array()) | "type" + } + + @Unroll + def "Upload file metadata"() { + setup: + Metadata metadata = new Metadata() + if (key1 != null) { + metadata.put(key1, value1) + } + if (key2 != null) { + metadata.put(key2, value2) + } + def channel = AsynchronousFileChannel.open(getRandomFile(dataSize).toPath()) + + when: + TransferManager.uploadFileToBlockBlob(channel, bu, BlockBlobURL.MAX_STAGE_BLOCK_BYTES, + new TransferManagerUploadToBlockBlobOptions(null, null, metadata, null, null)).blockingGet() + BlobGetPropertiesResponse response = bu.getProperties(null, null).blockingGet() + + then: + response.statusCode() == 200 + response.headers().metadata() == metadata + + cleanup: + channel.close() + + where: + dataSize | key1 | value1 | key2 | value2 + 10 | null | null | null | null + 10 | "foo" | "bar" | "fizz" | "buzz" + BlockBlobURL.MAX_UPLOAD_BLOB_BYTES + 10 | null | null | null | null + BlockBlobURL.MAX_UPLOAD_BLOB_BYTES + 10 | "foo" | "bar" | "fizz" | "buzz" + } + + @Unroll + def "Upload file AC"() { + setup: + bu.upload(defaultFlowable, defaultDataSize, null, null, null, null).blockingGet() + match = setupBlobMatchCondition(bu, match) + leaseID = setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + def channel = AsynchronousFileChannel.open(getRandomFile(dataSize).toPath()) + + expect: + TransferManager.uploadFileToBlockBlob(channel, bu, BlockBlobURL.MAX_STAGE_BLOCK_BYTES, + new TransferManagerUploadToBlockBlobOptions(null, null, null, bac, null)) + .blockingGet().statusCode() == 201 + + cleanup: + channel.close() + + where: + dataSize | modified | unmodified | match | noneMatch | leaseID + 10 | null | null | null | null | null + 10 | oldDate | null | null | null | null + 10 | null | newDate | null | null | null + 10 | null | null | receivedEtag | null | null + 10 | null | null | null | garbageEtag | null + 10 | null | null | null | null | receivedLeaseID + BlockBlobURL.MAX_UPLOAD_BLOB_BYTES + 10 | null | null | null | null | null + BlockBlobURL.MAX_UPLOAD_BLOB_BYTES + 10 | oldDate | null | null | null | null + BlockBlobURL.MAX_UPLOAD_BLOB_BYTES + 10 | null | newDate | null | null | null + BlockBlobURL.MAX_UPLOAD_BLOB_BYTES + 10 | null | null | receivedEtag | null | null + BlockBlobURL.MAX_UPLOAD_BLOB_BYTES + 10 | null | null | null | garbageEtag | null + BlockBlobURL.MAX_UPLOAD_BLOB_BYTES + 10 | null | null | null | null | receivedLeaseID + } + + @Unroll + def "Upload file AC fail"() { + setup: + bu.upload(defaultFlowable, defaultDataSize, null, null, null, null).blockingGet() + noneMatch = setupBlobMatchCondition(bu, noneMatch) + setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + def channel = AsynchronousFileChannel.open(getRandomFile(dataSize).toPath()) + + when: + TransferManager.uploadFileToBlockBlob(channel, bu, BlockBlobURL.MAX_STAGE_BLOCK_BYTES, + new TransferManagerUploadToBlockBlobOptions(null, null, null, bac, null)).blockingGet() + + then: + def e = thrown(StorageException) + e.errorCode() == StorageErrorCode.CONDITION_NOT_MET || + e.errorCode() == StorageErrorCode.LEASE_ID_MISMATCH_WITH_BLOB_OPERATION + + cleanup: + channel.close() + + where: + dataSize | modified | unmodified | match | noneMatch | leaseID + 10 | newDate | null | null | null | null + 10 | null | oldDate | null | null | null + 10 | null | null | garbageEtag | null | null + 10 | null | null | null | receivedEtag | null + 10 | null | null | null | null | garbageLeaseID + BlockBlobURL.MAX_UPLOAD_BLOB_BYTES + 10 | newDate | null | null | null | null + BlockBlobURL.MAX_UPLOAD_BLOB_BYTES + 10 | null | oldDate | null | null | null + BlockBlobURL.MAX_UPLOAD_BLOB_BYTES + 10 | null | null | garbageEtag | null | null + BlockBlobURL.MAX_UPLOAD_BLOB_BYTES + 10 | null | null | null | receivedEtag | null + BlockBlobURL.MAX_UPLOAD_BLOB_BYTES + 10 | null | null | null | null | garbageLeaseID + } + + /* + We require that any Flowable passed as a request body be replayable to support retries. This test ensures that + whatever means of getting data from a file we use produces a replayable Flowable so that we abide by our own + contract. + */ + + def "Upload replayable flowable"() { + setup: + // Write default data to a file + File file = File.createTempFile(UUID.randomUUID().toString(), ".txt") + file.deleteOnExit() + FileOutputStream fos = new FileOutputStream(file) + fos.write(defaultData.array()) + + // Mock a response that will always be retried. + def mockHttpResponse = Mock(HttpResponse) { + statusCode() >> 500 + bodyAsString() >> Single.just("") + } + + // Mock a policy that will always then check that the data is still the same and return a retryable error. + def mockPolicy = Mock(RequestPolicy) { + sendAsync(_) >> { HttpRequest request -> + if (!(FlowableUtil.collectBytesInBuffer(request.body()).blockingGet() == defaultData)) { + throw new IllegalArgumentException() + } + return Single.just(mockHttpResponse) + } + } + + // Mock a factory that always returns our mock policy. + def mockFactory = Mock(RequestPolicyFactory) { + create(*_) >> mockPolicy + } + + // Build the pipeline + def testPipeline = HttpPipeline.build(new RequestRetryFactory(new RequestRetryOptions(null, 3, null, null, null, + null)), mockFactory) + bu = bu.withPipeline(testPipeline) + def channel = AsynchronousFileChannel.open(file.toPath()) + + when: + TransferManager.uploadFileToBlockBlob(channel, bu, 50, null).blockingGet() + + then: + def e = thrown(StorageException) + e.statusCode() == 500 + + cleanup: + channel.close() + } + + def "Upload options fail"() { + when: + new TransferManagerUploadToBlockBlobOptions(null, null, null, null, -1) + + then: + thrown(IllegalArgumentException) + } + + /* + Here we're testing that progress is properly added to a single upload. The size of the file must be less than + the max upload value. + */ + + def "Upload file progress sequential"() { + setup: + def channel = AsynchronousFileChannel.open(getRandomFile(BlockBlobURL.MAX_UPLOAD_BLOB_BYTES - 1).toPath()) + def mockReceiver = Mock(IProgressReceiver) + def prevCount = 0 + + when: + // Block length will be ignored for single shot. + CommonRestResponse response = TransferManager.uploadFileToBlockBlob(channel, + bu, BlockBlobURL.MAX_STAGE_BLOCK_BYTES, + new TransferManagerUploadToBlockBlobOptions(mockReceiver, null, null, null, 20)).blockingGet() + + then: + /* + The best we can do here is to check that the total is reported at the end. It is unclear how many ByteBuffers + will be needed to break up the file, so we can't check intermediary values. + */ + 1 * mockReceiver.reportProgress(BlockBlobURL.MAX_UPLOAD_BLOB_BYTES - 1) + + /* + We may receive any number of intermediary calls depending on the implementation. For any of these notifications, + we assert that they are strictly increasing. + */ + _ * mockReceiver.reportProgress(!channel.size()) >> { long bytesTransferred -> + if (!(bytesTransferred > prevCount)) { + throw new IllegalArgumentException("Reported progress should monotonically increase") + } else { + prevCount = bytesTransferred + } + } + + 0 * mockReceiver.reportProgress({ it > BlockBlobURL.MAX_UPLOAD_BLOB_BYTES - 1 }) + + cleanup: + channel.close() + } + + def "Upload file progress parallel"() { + setup: + def channel = AsynchronousFileChannel.open(getRandomFile(BlockBlobURL.MAX_UPLOAD_BLOB_BYTES + 1).toPath()) + def numBlocks = channel.size() / BlockBlobURL.MAX_STAGE_BLOCK_BYTES + long prevCount = 0 + def mockReceiver = Mock(IProgressReceiver) + + + when: + TransferManager.uploadFileToBlockBlob(channel, + bu, BlockBlobURL.MAX_STAGE_BLOCK_BYTES, + new TransferManagerUploadToBlockBlobOptions(mockReceiver, null, null, null, 20)).blockingGet() + + then: + // We should receive exactly one notification of the completed progress. + 1 * mockReceiver.reportProgress(channel.size()) + + /* + We should receive at least one notification reporting an intermediary value per block, but possibly more + notifications will be received depending on the implementation. We specify numBlocks - 1 because the last block + will be the total size as above. Finally, we assert that the number reported monotonically increases. + */ + (numBlocks - 1.._) * mockReceiver.reportProgress(!channel.size()) >> { long bytesTransferred -> + if (!(bytesTransferred > prevCount)) { + throw new IllegalArgumentException("Reported progress should monotonically increase") + } else { + prevCount = bytesTransferred + } + } + + // We should receive no notifications that report more progress than the size of the file. + 0 * mockReceiver.reportProgress({ it > channel.size() }) + notThrown(IllegalArgumentException) + + cleanup: + channel.close() + } + + @Unroll + def "Download file"() { + setup: + def channel = AsynchronousFileChannel.open(file.toPath(), StandardOpenOption.READ, StandardOpenOption.WRITE) + TransferManager.uploadFileToBlockBlob(channel, bu, BlockBlobURL.MAX_STAGE_BLOCK_BYTES, null) + .blockingGet() + def outChannel = AsynchronousFileChannel.open(getRandomFile(0).toPath(), StandardOpenOption.WRITE, + StandardOpenOption.READ) + + when: + def headers = TransferManager.downloadBlobToFile(outChannel, bu, null, null).blockingGet() + + then: + compareFiles(channel, 0, channel.size(), outChannel) + headers.blobType() == BlobType.BLOCK_BLOB + + cleanup: + channel.close() == null + outChannel.close() == null + + where: + file | _ + getRandomFile(20) | _ // small file + getRandomFile(16 * 1024 * 1024) | _ // medium file in several chunks + getRandomFile(8 * 1026 * 1024 + 10) | _ // medium file not aligned to block + getRandomFile(0) | _ // empty file + // Files larger than 2GB to test no integer overflow are left to stress/perf tests to keep test passes short. + } + + def compareFiles(AsynchronousFileChannel channel1, long offset, long count, AsynchronousFileChannel channel2) { + int chunkSize = 8 * 1024 * 1024 + long pos = 0 + + while (pos < count) { + chunkSize = Math.min(chunkSize, count - pos) + def buf1 = FlowableUtil.collectBytesInBuffer(FlowableUtil.readFile(channel1, offset + pos, chunkSize)) + .blockingGet() + def buf2 = FlowableUtil.collectBytesInBuffer(FlowableUtil.readFile(channel2, pos, chunkSize)).blockingGet() + + buf1.position(0) + buf2.position(0) + + if (buf1.compareTo(buf2) != 0) { + return false + } + + pos += chunkSize + } + if (pos != count && pos != channel2.size()) { + return false + } + return true + } + + @Unroll + def "Download file range"() { + setup: + def channel = AsynchronousFileChannel.open(file.toPath(), StandardOpenOption.READ, StandardOpenOption.WRITE) + TransferManager.uploadFileToBlockBlob(channel, bu, BlockBlobURL.MAX_STAGE_BLOCK_BYTES, null) + .blockingGet() + File outFile = getRandomFile(0) + def outChannel = AsynchronousFileChannel.open(outFile.toPath(), StandardOpenOption.WRITE, + StandardOpenOption.READ) + + when: + TransferManager.downloadBlobToFile(outChannel, bu, range, null).blockingGet() + + then: + compareFiles(channel, range.offset(), range.count(), outChannel) + + cleanup: + channel.close() + outChannel.close() + + where: + file | range | dataSize + getRandomFile(defaultDataSize) | new BlobRange().withCount(defaultDataSize) | defaultDataSize + getRandomFile(defaultDataSize) | new BlobRange().withOffset(1).withCount(defaultDataSize - 1) | defaultDataSize - 1 + getRandomFile(defaultDataSize) | new BlobRange().withCount(defaultDataSize - 1) | defaultDataSize - 1 + getRandomFile(defaultDataSize) | new BlobRange().withCount(10L * 1024 * 1024 * 1024) | defaultDataSize + } + + def "Download file count null"() { + setup: + bu.upload(defaultFlowable, defaultDataSize, null, null, null, null).blockingGet() + File outFile = getRandomFile(0) + def outChannel = AsynchronousFileChannel.open(outFile.toPath(), StandardOpenOption.WRITE, + StandardOpenOption.READ) + + when: + TransferManager.downloadBlobToFile(outChannel, bu, new BlobRange(), null) + .blockingGet() + + then: + compareDataToFile(defaultFlowable, outFile) + + cleanup: + outChannel.close() + } + + @Unroll + def "Download file AC"() { + setup: + def channel = AsynchronousFileChannel.open(getRandomFile(defaultDataSize).toPath(), StandardOpenOption.READ, + StandardOpenOption.WRITE) + TransferManager.uploadFileToBlockBlob(channel, bu, BlockBlobURL.MAX_STAGE_BLOCK_BYTES, null) + .blockingGet() + def outChannel = AsynchronousFileChannel.open(getRandomFile(0).toPath(), StandardOpenOption.WRITE, + StandardOpenOption.READ) + + match = setupBlobMatchCondition(bu, match) + leaseID = setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + when: + TransferManager.downloadBlobToFile(outChannel, bu, null, new TransferManagerDownloadFromBlobOptions( + null, null, bac, null, null)).blockingGet() + + then: + compareFiles(channel, 0, channel.size(), outChannel) + + cleanup: + channel.close() + outChannel.close() + + where: + modified | unmodified | match | noneMatch | leaseID + null | null | null | null | null + oldDate | null | null | null | null + null | newDate | null | null | null + null | null | receivedEtag | null | null + null | null | null | garbageEtag | null + null | null | null | null | receivedLeaseID + } + + @Unroll + def "Download file AC fail"() { + setup: + def channel = AsynchronousFileChannel.open(getRandomFile(defaultDataSize).toPath(), StandardOpenOption.READ, + StandardOpenOption.WRITE) + TransferManager.uploadFileToBlockBlob(channel, bu, BlockBlobURL.MAX_STAGE_BLOCK_BYTES, null) + .blockingGet() + def outChannel = AsynchronousFileChannel.open(getRandomFile(0).toPath(), StandardOpenOption.WRITE, + StandardOpenOption.READ) + + noneMatch = setupBlobMatchCondition(bu, noneMatch) + setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + when: + TransferManager.downloadBlobToFile(outChannel, bu, null, + new TransferManagerDownloadFromBlobOptions(null, null, bac, null, null)).blockingGet() + + then: + def e = thrown(StorageException) + e.errorCode() == StorageErrorCode.CONDITION_NOT_MET || + e.errorCode() == StorageErrorCode.LEASE_ID_MISMATCH_WITH_BLOB_OPERATION + + where: + modified | unmodified | match | noneMatch | leaseID + newDate | null | null | null | null + null | oldDate | null | null | null + null | null | garbageEtag | null | null + null | null | null | receivedEtag | null + null | null | null | null | garbageLeaseID + } + + def "Download file etag lock"() { + setup: + bu.upload(Flowable.just(getRandomData(1 * 1024 * 1024)), 1 * 1024 * 1024, null, null, + null, null).blockingGet() + def outChannel = AsynchronousFileChannel.open(getRandomFile(0).toPath(), StandardOpenOption.WRITE, + StandardOpenOption.READ) + + when: + /* + Set up a large download in small chunks so it makes a lot of requests. This will give us time to cut in an + operation that will change the etag. + */ + def success = false + TransferManager.downloadBlobToFile(outChannel, bu, null, + new TransferManagerDownloadFromBlobOptions(1024, null, null, null, null)) + .subscribe( + new Consumer() { + @Override + void accept(BlobDownloadHeaders headers) throws Exception { + success = false + } + }, + new Consumer() { + @Override + void accept(Throwable throwable) throws Exception { + if (throwable instanceof StorageException && + ((StorageException) throwable).statusCode() == 412) { + success = true + return + } + success = false + } + }) + + + sleep(500) // Give some time for the download request to start. + bu.upload(defaultFlowable, defaultDataSize, null, null, null, null).blockingGet() + + sleep(1000) // Allow time for the upload operation + + then: + success + + cleanup: + outChannel.close() + } + + @Unroll + def "Download file options"() { + setup: + def channel = AsynchronousFileChannel.open(getRandomFile(defaultDataSize).toPath(), StandardOpenOption.READ, + StandardOpenOption.WRITE) + TransferManager.uploadFileToBlockBlob(channel, bu, BlockBlobURL.MAX_STAGE_BLOCK_BYTES, null) + .blockingGet() + def outChannel = AsynchronousFileChannel.open(getRandomFile(0).toPath(), StandardOpenOption.WRITE, + StandardOpenOption.READ) + def reliableDownloadOptions = new ReliableDownloadOptions() + reliableDownloadOptions.withMaxRetryRequests(retries) + + when: + TransferManager.downloadBlobToFile(outChannel, bu, null, new TransferManagerDownloadFromBlobOptions( + blockSize, null, null, reliableDownloadOptions, parallelism)).blockingGet() + + then: + compareFiles(channel, 0, channel.size(), outChannel) + + cleanup: + channel.close() + outChannel.close() + + where: + blockSize | parallelism | retries + 1 | null | 2 + null | 1 | 2 + null | null | 1 + } + + @Unroll + def "Download file IA null"() { + when: + TransferManager.downloadBlobToFile(file, blobURL, null, null).blockingGet() + + then: + thrown(IllegalArgumentException) + + /* + This test is just validating that exceptions are thrown if certain values are null. The values not being test do + not need to be correct, simply not null. Because order in which Spock initializes values, we can't just use the + bu property for the url. + */ + where: + file | blobURL + null | new BlockBlobURL(new URL("http://account.com"), StorageURL.createPipeline(primaryCreds, new PipelineOptions())) + AsynchronousFileChannel.open(getRandomFile(10).toPath()) | null + } + + @Unroll + def "Download options fail"() { + when: + new TransferManagerDownloadFromBlobOptions(blockSize, null, null, null, parallelism + ) + + then: + thrown(IllegalArgumentException) + + where: + parallelism | blockSize + 0 | 40 + 2 | 0 + } + + def "Download options progress receiver"() { + def fileSize = 8 * 1026 * 1024 + 10 + def channel = AsynchronousFileChannel.open(getRandomFile(fileSize).toPath(), + StandardOpenOption.READ, StandardOpenOption.WRITE) + TransferManager.uploadFileToBlockBlob(channel, bu, BlockBlobURL.MAX_STAGE_BLOCK_BYTES, null) + .blockingGet() + def outChannel = AsynchronousFileChannel.open(getRandomFile(0).toPath(), StandardOpenOption.WRITE, + StandardOpenOption.READ) + + def mockReceiver = Mock(IProgressReceiver) + + def numBlocks = fileSize / TransferManager.BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE + def prevCount = 0 + + when: + TransferManager.downloadBlobToFile(outChannel, bu, null, + new TransferManagerDownloadFromBlobOptions(null, mockReceiver, null, + new ReliableDownloadOptions().withMaxRetryRequests(3), 20)).blockingGet() + + then: + // We should receive exactly one notification of the completed progress. + 1 * mockReceiver.reportProgress(fileSize) + + /* + We should receive at least one notification reporting an intermediary value per block, but possibly more + notifications will be received depending on the implementation. We specify numBlocks - 1 because the last block + will be the total size as above. Finally, we assert that the number reported monotonically increases. + */ + (numBlocks - 1.._) * mockReceiver.reportProgress(!channel.size()) >> { long bytesTransferred -> + if (!(bytesTransferred > prevCount)) { + throw new IllegalArgumentException("Reported progress should monotonically increase") + } else { + prevCount = bytesTransferred + } + } + + // We should receive no notifications that report more progress than the size of the file. + 0 * mockReceiver.reportProgress({ it > fileSize }) + + cleanup: + channel.close() + } + + @Unroll + def "Upload NRF"() { + when: + def data = getRandomData(dataSize) + TransferManager.uploadFromNonReplayableFlowable(Flowable.just(data), bu, bufferSize, numBuffs, null) + .blockingGet() + data.position(0) + + then: + FlowableUtil.collectBytesInBuffer(bu.download().blockingGet().body(null)).blockingGet() == data + bu.getBlockList(BlockListType.ALL).blockingGet().body().committedBlocks().size() == blockCount + + where: + dataSize | bufferSize | numBuffs || blockCount + 350 | 50 | 2 || 7 + 350 | 50 | 5 || 7 + 10 * 1024 * 1024 | 1 * 1024 * 1024 | 2 || 10 + 10 * 1024 * 1024 | 1 * 1024 * 1024 | 5 || 10 + 10 * 1024 * 1024 | 1 * 1024 * 1024 | 10 || 10 + 500 * 1024 * 1024 | 100 * 1024 * 1024 | 2 || 5 + 500 * 1024 * 1024 | 100 * 1024 * 1024 | 4 || 5 + 10 * 1024 * 1024 | 3 * 512 * 1024 | 3 || 7 + } + + def compareListToBuffer(List buffers, ByteBuffer result) { + result.position(0) + for (ByteBuffer buffer : buffers) { + buffer.position(0) + result.limit(result.position() + buffer.remaining()) + if (buffer != result) { + return false + } + result.position(result.position() + buffer.remaining()) + } + return result.remaining() == 0 + } + + @Unroll + def "Upload NRF chunked source"() { + /* + This test should validate that the upload should work regardless of what format the passed data is in because + it will be chunked appropriately. + */ + setup: + TransferManager.uploadFromNonReplayableFlowable(Flowable.fromIterable(dataList), bu, bufferSize, numBuffers, + null).blockingGet() + + expect: + compareListToBuffer(dataList, FlowableUtil.collectBytesInBuffer(bu.download().blockingGet().body(null)) + .blockingGet()) + bu.getBlockList(BlockListType.ALL).blockingGet().body().committedBlocks().size() == blockCount + + where: + dataList | bufferSize | numBuffers || blockCount + [getRandomData(7), getRandomData(7)] | 10 | 2 || 2 + [getRandomData(3), getRandomData(3), getRandomData(3), getRandomData(3), getRandomData(3), getRandomData(3), getRandomData(3)] | 10 | 2 || 3 + [getRandomData(10), getRandomData(10)] | 10 | 2 || 2 + [getRandomData(50), getRandomData(51), getRandomData(49)] | 10 | 2 || 15 + // The case of one large buffer needing to be broken up is tested in the previous test. + } + + @Unroll + def "Upload NRF illegal arguments null"() { + when: + TransferManager.uploadFromNonReplayableFlowable(source, url, 4, 4, null) + + then: + thrown(IllegalArgumentException) + + where: + source | url + null | new BlockBlobURL(new URL("http://account.com"), StorageURL.createPipeline(primaryCreds)) + Flowable.just(defaultData) | null + } + + @Unroll + def "Upload NRF illegal args out of bounds"() { + when: + TransferManager.uploadFromNonReplayableFlowable(Flowable.just(defaultData), bu, bufferSize, numBuffs, null) + + then: + thrown(IllegalArgumentException) + + where: + bufferSize | numBuffs + 0 | 5 + BlockBlobURL.MAX_STAGE_BLOCK_BYTES + 1 | 5 + 5 | 1 + } + + @Unroll + def "Upload NRF headers"() { + when: + TransferManager.uploadFromNonReplayableFlowable(Flowable.just(defaultData), bu, 10, 2, + new TransferManagerUploadToBlockBlobOptions(null, new BlobHTTPHeaders() + .withBlobCacheControl(cacheControl).withBlobContentDisposition(contentDisposition) + .withBlobContentEncoding(contentEncoding).withBlobContentLanguage(contentLanguage) + .withBlobContentMD5(contentMD5).withBlobContentType(contentType), null, null, null)) + .blockingGet() + + BlobGetPropertiesResponse response = bu.getProperties(null, null).blockingGet() + defaultData.position(0) + + then: + validateBlobHeaders(response.headers(), cacheControl, contentDisposition, contentEncoding, contentLanguage, + contentMD5, contentType == null ? "application/octet-stream" : contentType) + // HTTP default content type is application/octet-stream. + + where: + // The MD5 is simply set on the blob for commitBlockList, not validated. + cacheControl | contentDisposition | contentEncoding | contentLanguage | contentMD5 | contentType + null | null | null | null | null | null + "control" | "disposition" | "encoding" | "language" | MessageDigest.getInstance("MD5").digest(defaultData.array()) | "type" + } + + @Unroll + def "Upload NRF metadata"() { + setup: + Metadata metadata = new Metadata() + if (key1 != null) { + metadata.put(key1, value1) + } + if (key2 != null) { + metadata.put(key2, value2) + } + + when: + TransferManager.uploadFromNonReplayableFlowable(Flowable.just(getRandomData(10)), bu, 10, 10, + new TransferManagerUploadToBlockBlobOptions(null, null, metadata, null, null)).blockingGet() + BlobGetPropertiesResponse response = bu.getProperties(null, null).blockingGet() + + then: + response.statusCode() == 200 + response.headers().metadata() == metadata + + where: + key1 | value1 | key2 | value2 + null | null | null | null + "foo" | "bar" | "fizz" | "buzz" + } + + @Unroll + def "Upload NRF AC"() { + setup: + bu.upload(defaultFlowable, defaultDataSize, null, null, null, null).blockingGet() + match = setupBlobMatchCondition(bu, match) + leaseID = setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + expect: + TransferManager.uploadFromNonReplayableFlowable(Flowable.just(getRandomData(10)), bu, 10, 2, + new TransferManagerUploadToBlockBlobOptions(null, null, null, bac, null)) + .blockingGet().statusCode() == 201 + + where: + modified | unmodified | match | noneMatch | leaseID + null | null | null | null | null + oldDate | null | null | null | null + null | newDate | null | null | null + null | null | receivedEtag | null | null + null | null | null | garbageEtag | null + null | null | null | null | receivedLeaseID + } + + @Unroll + def "Upload NRF AC fail"() { + setup: + bu.upload(defaultFlowable, defaultDataSize, null, null, null, null).blockingGet() + noneMatch = setupBlobMatchCondition(bu, noneMatch) + leaseID = setupBlobLeaseCondition(bu, leaseID) + BlobAccessConditions bac = new BlobAccessConditions().withModifiedAccessConditions( + new ModifiedAccessConditions().withIfModifiedSince(modified).withIfUnmodifiedSince(unmodified) + .withIfMatch(match).withIfNoneMatch(noneMatch)) + .withLeaseAccessConditions(new LeaseAccessConditions().withLeaseId(leaseID)) + + when: + TransferManager.uploadFromNonReplayableFlowable(Flowable.just(getRandomData(10)), bu, 10, 2, + new TransferManagerUploadToBlockBlobOptions(null, null, null, bac, null)) + .blockingGet() + + then: + def e = thrown(StorageException) + e.errorCode() == StorageErrorCode.CONDITION_NOT_MET || + e.errorCode() == StorageErrorCode.LEASE_ID_MISMATCH_WITH_BLOB_OPERATION + + where: + modified | unmodified | match | noneMatch | leaseID + newDate | null | null | null | null + null | oldDate | null | null | null + null | null | garbageEtag | null | null + null | null | null | receivedEtag | null + null | null | null | null | garbageLeaseID + } + + def "Upload NRF progress"() { + setup: + def data = getRandomData(BlockBlobURL.MAX_UPLOAD_BLOB_BYTES + 1) + def numBlocks = data.remaining() / BlockBlobURL.MAX_STAGE_BLOCK_BYTES + long prevCount = 0 + def mockReceiver = Mock(IProgressReceiver) + + + when: + TransferManager.uploadFromNonReplayableFlowable(Flowable.just(data), bu, BlockBlobURL.MAX_STAGE_BLOCK_BYTES, 10, + new TransferManagerUploadToBlockBlobOptions(mockReceiver, null, null, null, 20)).blockingGet() + data.position(0) + + then: + // We should receive exactly one notification of the completed progress. + 1 * mockReceiver.reportProgress(data.remaining()) + + /* + We should receive at least one notification reporting an intermediary value per block, but possibly more + notifications will be received depending on the implementation. We specify numBlocks - 1 because the last block + will be the total size as above. Finally, we assert that the number reported monotonically increases. + */ + (numBlocks - 1.._) * mockReceiver.reportProgress(!data.remaining()) >> { long bytesTransferred -> + if (!(bytesTransferred > prevCount)) { + throw new IllegalArgumentException("Reported progress should monotonically increase") + } else { + prevCount = bytesTransferred + } + } + + // We should receive no notifications that report more progress than the size of the file. + 0 * mockReceiver.reportProgress({ it > data.remaining() }) + notThrown(IllegalArgumentException) + } + + def "Upload NRF network error"() { + setup: + /* + This test uses a Flowable that does not allow multiple subscriptions and therefore ensures that we are + buffering properly to allow for retries even given this source behavior. + */ + bu.upload(Flowable.just(defaultData), defaultDataSize).blockingGet() + def nrf = bu.download().blockingGet().body(null) + + // Mock a response that will always be retried. + def mockHttpResponse = Mock(HttpResponse) { + statusCode() >> 500 + bodyAsString() >> Single.just("") + } + + // Mock a policy that will always then check that the data is still the same and return a retryable error. + def mockPolicy = Mock(RequestPolicy) { + sendAsync(_) >> { HttpRequest request -> + if (!(FlowableUtil.collectBytesInBuffer(request.body()).blockingGet() == defaultData)) { + throw new IllegalArgumentException() + } + return Single.just(mockHttpResponse) + } + } + + // Mock a factory that always returns our mock policy. + def mockFactory = Mock(RequestPolicyFactory) { + create(*_) >> mockPolicy + } + + // Build the pipeline + def testPipeline = HttpPipeline.build(new RequestRetryFactory(new RequestRetryOptions(null, 3, null, null, null, + null)), mockFactory) + bu = bu.withPipeline(testPipeline) + + when: + // Try to upload the flowable, which will hit a retry. A normal upload would throw, but buffering prevents that. + TransferManager.uploadFromNonReplayableFlowable(nrf, bu, 1024, 4, null).blockingGet() + + then: + // A second subscription to a download stream will + def e = thrown(StorageException) + e.statusCode() == 500 + } +} + diff --git a/storage/data-plane/src/test/java/com/microsoft/azure/storage/blob/DownloadResponseMockFlowable.java b/storage/data-plane/src/test/java/com/microsoft/azure/storage/blob/DownloadResponseMockFlowable.java new file mode 100644 index 0000000000000..5e0d40d076500 --- /dev/null +++ b/storage/data-plane/src/test/java/com/microsoft/azure/storage/blob/DownloadResponseMockFlowable.java @@ -0,0 +1,229 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.microsoft.azure.storage.blob; + +import com.microsoft.azure.storage.APISpec; +import com.microsoft.azure.storage.blob.models.BlobDownloadHeaders; +import com.microsoft.azure.storage.blob.models.BlobDownloadResponse; +import com.microsoft.azure.storage.blob.models.StorageErrorException; +import com.microsoft.rest.v2.http.HttpHeaders; +import com.microsoft.rest.v2.http.HttpResponse; +import io.reactivex.Flowable; +import io.reactivex.Single; +import org.reactivestreams.Subscriber; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.HashMap; + +public class DownloadResponseMockFlowable extends Flowable { + + public static final int DR_TEST_SCENARIO_SUCCESSFUL_ONE_CHUNK = 0; + + public static final int DR_TEST_SCENARIO_SUCCESSFUL_MULTI_CHUNK = 1; + + public static final int DR_TEST_SCENARIO_SUCCESSFUL_STREAM_FAILURES = 2; + + public static final int DR_TEST_SCENARIO_MAX_RETRIES_EXCEEDED = 3; + + public static final int DR_TEST_SCENARIO_NON_RETRYABLE_ERROR = 4; + + public static final int DR_TEST_SCENARIO_ERROR_GETTER_MIDDLE = 6; + + public static final int DR_TEST_SCENARIO_INFO_TEST = 8; + + + private int scenario; + + private int tryNumber; + + private HTTPGetterInfo info; + + private ByteBuffer scenarioData; + + public DownloadResponseMockFlowable(int scenario) { + this.scenario = scenario; + switch (this.scenario) { + case DR_TEST_SCENARIO_SUCCESSFUL_ONE_CHUNK: + this.scenarioData = APISpec.getRandomData(512 * 1024); + break; + case DR_TEST_SCENARIO_SUCCESSFUL_MULTI_CHUNK: + // Fall through + case DR_TEST_SCENARIO_SUCCESSFUL_STREAM_FAILURES: + this.scenarioData = APISpec.getRandomData(1024); + break; + } + } + + public ByteBuffer getScenarioData() { + return this.scenarioData; + } + + public int getTryNumber() { + return this.tryNumber; + } + + @Override + protected void subscribeActual(Subscriber s) { + switch (this.scenario) { + case DR_TEST_SCENARIO_SUCCESSFUL_ONE_CHUNK: + s.onNext(this.scenarioData.duplicate()); + s.onComplete(); + break; + + case DR_TEST_SCENARIO_SUCCESSFUL_MULTI_CHUNK: + for (int i = 0; i < 4; i++) { + ByteBuffer toSend = this.scenarioData.duplicate(); + toSend.position(i * 256); + toSend.limit((i + 1) * 256); + s.onNext(toSend); + } + s.onComplete(); + break; + + case DR_TEST_SCENARIO_SUCCESSFUL_STREAM_FAILURES: + if (this.tryNumber <= 3) { + // tryNumber is 1 indexed, so we have to sub 1. + if (this.info.offset() != (this.tryNumber - 1) * 256 || + this.info.count() != this.scenarioData.remaining() - (this.tryNumber - 1) * 256) { + s.onError(new IllegalArgumentException("Info values are incorrect.")); + return; + } + ByteBuffer toSend = this.scenarioData.duplicate(); + toSend.position((this.tryNumber - 1) * 256); + toSend.limit(this.tryNumber * 256); + s.onNext(toSend); + s.onError(new IOException()); + break; + } + if (this.info.offset() != (this.tryNumber - 1) * 256 || + this.info.count() != this.scenarioData.remaining() - (this.tryNumber - 1) * 256) { + s.onError(new IllegalArgumentException("Info values are incorrect.")); + return; + } + ByteBuffer toSend = this.scenarioData.duplicate(); + toSend.position((this.tryNumber - 1) * 256); + toSend.limit(this.tryNumber * 256); + s.onNext(toSend); + s.onComplete(); + break; + + case DR_TEST_SCENARIO_MAX_RETRIES_EXCEEDED: + s.onError(new IOException()); + break; + + case DR_TEST_SCENARIO_NON_RETRYABLE_ERROR: + s.onError(new Exception()); + break; + + case DR_TEST_SCENARIO_ERROR_GETTER_MIDDLE: + switch (this.tryNumber) { + case 1: + /* + We return a retryable error here so we have to invoke the getter, which will throw an error in + this case. + */ + s.onError(new IOException()); + break; + default: + s.onError(new IllegalArgumentException("Retried after getter error.")); + } + break; + + case DR_TEST_SCENARIO_INFO_TEST: + switch (this.tryNumber) { + case 1: + // Test the value of info when getting the initial response. + s.onError(new IOException()); + break; + case 2: + // Test the value of info when getting an intermediate response. + s.onError(new IOException()); + break; + case 3: + // All calls to getter checked. Exit. This test does not check for data. + s.onComplete(); + break; + } + break; + + default: + s.onError(new IllegalArgumentException("Invalid test case")); + } + } + + public Single getter(HTTPGetterInfo info) { + this.tryNumber++; + this.info = info; + BlobDownloadResponse rawResponse = + new BlobDownloadResponse(null, 200, new BlobDownloadHeaders(), new HashMap<>(), this); + DownloadResponse response = new DownloadResponse(rawResponse, info, this::getter); + + switch (this.scenario) { + case DR_TEST_SCENARIO_ERROR_GETTER_MIDDLE: + switch (this.tryNumber) { + case 1: + return Single.just(response); + case 2: + /* + This validates that we don't retry in the getter even if it's a retryable error from the + service. + */ + throw new StorageErrorException("Message", new HttpResponse() { + @Override + public int statusCode() { + return 500; + } + + @Override + public String headerValue(String s) { + return null; + } + + @Override + public HttpHeaders headers() { + return null; + } + + @Override + public Flowable body() { + return null; + } + + @Override + public Single bodyAsByteArray() { + return null; + } + + @Override + public Single bodyAsString() { + return null; + } + }); + default: + throw new IllegalArgumentException("Retried after error in getter"); + } + case DR_TEST_SCENARIO_INFO_TEST: + // We also test that the info is updated in DR_TEST_SCENARIO_SUCCESSFUL_STREAM_FAILURES. + if (info.count() != 10 || info.offset() != 20 || !info.eTag().equals("etag")) { + throw new IllegalArgumentException("Info values incorrect"); + } + return Single.just(response); + default: + return Single.just(response); + } + } +} diff --git a/storage/data-plane/src/test/java/com/microsoft/azure/storage/blob/DownloadResponseTest.groovy b/storage/data-plane/src/test/java/com/microsoft/azure/storage/blob/DownloadResponseTest.groovy new file mode 100644 index 0000000000000..d34305e5cba63 --- /dev/null +++ b/storage/data-plane/src/test/java/com/microsoft/azure/storage/blob/DownloadResponseTest.groovy @@ -0,0 +1,180 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.microsoft.azure.storage.blob + +import com.microsoft.azure.storage.APISpec +import com.microsoft.azure.storage.blob.models.StorageErrorException +import com.microsoft.rest.v2.util.FlowableUtil +import io.reactivex.Flowable +import spock.lang.Unroll + +class DownloadResponseTest extends APISpec { + BlockBlobURL bu + + def setup() { + bu = cu.createBlockBlobURL(generateBlobName()) + bu.upload(Flowable.just(defaultData), defaultText.length(), null, null, null, null).blockingGet() + } + + /* + This shouldn't really be different from anything else we're doing in the other tests. Just a sanity check against + a real use case. + */ + def "Network call"() { + expect: + FlowableUtil.collectBytesInBuffer(bu.download(null, null, false, null).blockingGet().body(null)) + .blockingGet() == defaultData + } + + @Unroll + def "Successful"() { + setup: + DownloadResponseMockFlowable flowable = new DownloadResponseMockFlowable(scenario) + def info = new HTTPGetterInfo() + info.withOffset(0) + .withCount(flowable.getScenarioData().remaining()) + .withETag("etag") + + def options = new ReliableDownloadOptions() + options.withMaxRetryRequests(5) + + def mockRawResponse = flowable.getter(info).blockingGet().rawResponse() + + when: + DownloadResponse response = new DownloadResponse(mockRawResponse, info, { HTTPGetterInfo newInfo -> + flowable.getter(newInfo) + }) + + then: + FlowableUtil.collectBytesInBuffer(response.body(options)).blockingGet() == flowable.getScenarioData() + flowable.getTryNumber() == tryNumber + + + where: + scenario | tryNumber | provideInitialResponse + DownloadResponseMockFlowable.DR_TEST_SCENARIO_SUCCESSFUL_ONE_CHUNK | 1 | false + DownloadResponseMockFlowable.DR_TEST_SCENARIO_SUCCESSFUL_MULTI_CHUNK | 1 | false + DownloadResponseMockFlowable.DR_TEST_SCENARIO_SUCCESSFUL_STREAM_FAILURES | 4 | false + } + + @Unroll + def "Failure"() { + setup: + def flowable = new DownloadResponseMockFlowable(scenario) + + def options = new ReliableDownloadOptions() + .withMaxRetryRequests(5) + + def info = new HTTPGetterInfo().withETag("etag") + def mockRawResponse = flowable.getter(info).blockingGet().rawResponse() + + when: + DownloadResponse response = new DownloadResponse(mockRawResponse, info, { HTTPGetterInfo newInfo -> + flowable.getter(newInfo) + }) + response.body(options).blockingSubscribe() + + then: + def e = thrown(Throwable) // Blocking subscribe will sometimes wrap the IOException in a RuntimeException. + if (e.getCause() != null) { + e = e.getCause() + } + exceptionType.isInstance(e) + flowable.getTryNumber() == tryNumber + + /* + tryNumber is 7 because the initial request is the first try, then it will fail when retryCount>maxRetryCount, + which is when retryCount=6 and therefore tryNumber=7 + */ + where: + scenario | exceptionType | tryNumber + DownloadResponseMockFlowable.DR_TEST_SCENARIO_MAX_RETRIES_EXCEEDED | IOException | 7 + DownloadResponseMockFlowable.DR_TEST_SCENARIO_NON_RETRYABLE_ERROR | Exception | 1 + DownloadResponseMockFlowable.DR_TEST_SCENARIO_ERROR_GETTER_MIDDLE | StorageErrorException | 2 + } + + @Unroll + def "Info null IA"() { + setup: + def flowable = new DownloadResponseMockFlowable( + DownloadResponseMockFlowable.DR_TEST_SCENARIO_SUCCESSFUL_ONE_CHUNK) + + when: + new DownloadResponse(flowable.getter(info).blockingGet().rawResponse(), info, + { HTTPGetterInfo newInfo -> + flowable.getter(newInfo) + }) + + + then: + thrown(IllegalArgumentException) + + where: + info | _ + null | _ + new HTTPGetterInfo().withETag(null) | _ + } + + def "Options IA"() { + when: + new ReliableDownloadOptions().withMaxRetryRequests(-1) + + then: + thrown(IllegalArgumentException) + } + + def "Getter IA"() { + setup: + def flowable = new DownloadResponseMockFlowable( + DownloadResponseMockFlowable.DR_TEST_SCENARIO_SUCCESSFUL_ONE_CHUNK) + + when: + def response = new DownloadResponse(flowable.getter(new HTTPGetterInfo()).blockingGet() + .rawResponse(), new HTTPGetterInfo().withETag("etag"), null) + response.body(null).blockingSubscribe() + + then: + thrown(IllegalArgumentException) + } + + def "Info"() { + setup: + def flowable = new DownloadResponseMockFlowable(DownloadResponseMockFlowable.DR_TEST_SCENARIO_INFO_TEST) + def info = new HTTPGetterInfo() + info.withOffset(20) + info.withCount(10) + info.withETag("etag") + def options = new ReliableDownloadOptions() + options.withMaxRetryRequests(5) + + when: + def response = new DownloadResponse(flowable.getter(info).blockingGet().rawResponse(), info, + { HTTPGetterInfo newInfo -> + return flowable.getter(newInfo) + }) + response.body(options).blockingSubscribe() + + then: + flowable.tryNumber == 3 + } + + def "Info count IA"() { + when: + new HTTPGetterInfo().withCount(-1) + + then: + thrown(IllegalArgumentException) + } +} diff --git a/storage/data-plane/src/test/java/com/microsoft/azure/storage/blob/HelperTest.groovy b/storage/data-plane/src/test/java/com/microsoft/azure/storage/blob/HelperTest.groovy new file mode 100644 index 0000000000000..aec349ca79fa0 --- /dev/null +++ b/storage/data-plane/src/test/java/com/microsoft/azure/storage/blob/HelperTest.groovy @@ -0,0 +1,643 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.microsoft.azure.storage.blob + +import com.microsoft.azure.storage.APISpec +import com.microsoft.azure.storage.blob.models.AccessPolicy +import com.microsoft.azure.storage.blob.models.SignedIdentifier +import com.microsoft.azure.storage.blob.models.StorageErrorCode +import spock.lang.Unroll + +import java.time.OffsetDateTime +import java.time.ZoneOffset + +class HelperTest extends APISpec { + + def "responseError"() { + when: + cu.listBlobsFlatSegment("garbage", null, null).blockingGet() + + then: + def e = thrown(StorageException) + e.errorCode() == StorageErrorCode.INVALID_QUERY_PARAMETER_VALUE + e.statusCode() == 400 + e.message().contains("Value for one of the query parameters specified in the request URI is invalid.") + e.getMessage().contains("> level + } + } + + /* + Clean out the logs directory so we can validate that it grows, which is how we test default logging. We only + need to do this once per test past rather than per test, and we don't have to be entirely successful. This should + just keep it from growing too large. + */ + def setupSpec() { + File logsDir = new File(System.getProperty("java.io.tmpdir") + "AzureStorageJavaSDKLogs") + for (File file : logsDir.listFiles()) { + file.delete() + } + } + + /* + We test that default logging is on by checking that the size of the logging folder has grown in Warning and Error + cases when we expect default logging. We cannot check a specific file because we have no way of retrieving the + filename, and there is some randomness involved. We can rely on a fairly naive implementation of this method + as we know the directory will exist and that there will be no subdirectories. + */ + def calculateLogsDirectorySize() { + File logsDir = new File(System.getProperty("java.io.tmpdir") + "AzureStorageJavaSDKLogs") + long length = 0 + + for (File file : logsDir.listFiles()){ + length += file.size() + } + return length + } + + @Unroll + def "Successful fast response"() { + setup: + def factory = new LoggingFactory(new LoggingOptions(2000)) + + def logger = getMockLogger(logLevel) + def requestPolicyOptions = new RequestPolicyOptions(logger) + /* + By mocking a policy, we can simply call sendAsync on the policy under test directly instead of having to + construct a pipeline + */ + def mockDownstream = Mock(RequestPolicy) { + sendAsync(_) >> Single.just(getStubResponse(200)) + } + + def policy = factory.create(mockDownstream, requestPolicyOptions) + def logDirectorySize = calculateLogsDirectorySize() + def slf4jLogger = TestLoggerFactory.getTestLogger("Azure Storage Java SDK") + slf4jLogger.clearAll() + + when: + policy.sendAsync(getMockRequest()).blockingGet() + + then: + /* + logCount1 * means that we expect this method to be called with these parameters logCount1 number of + times. '_' means we don't care what the value of that parameter is, so in both of these cases, we are specifying + that log should be called with HttpPipelineLogLevel.INFO as the first argument, and the other arguments can + be anything. The '>>' operator allows us to specify some behavior on the mocked forceLogger when this method is + called. Because there is lots of string formatting going on, we can't match against the log string in the + argument list, so we perform some logic to see if it looks correct and throw if it looks incorrect to actually + validate the logging behavior. + */ + logCount1 * logger.log(HttpPipelineLogLevel.INFO, _, _) >> + { HttpPipelineLogLevel level, String message, Object[] params -> + if (!message.contains("OUTGOING REQUEST")) { + throw new IllegalArgumentException(message) + } + } + logCount2 * logger.log(HttpPipelineLogLevel.INFO, _, _) >> + { HttpPipelineLogLevel level, String message, Object[] params -> + if (!(message.contains("Success") && message.contains("Request try"))) { + throw new IllegalArgumentException(message) + } + } + 0 * logger.log(HttpPipelineLogLevel.WARNING, _, _) + 0 * logger.log(HttpPipelineLogLevel.ERROR, _, _) + logDirectorySize == calculateLogsDirectorySize() + slf4jLogger.getAllLoggingEvents().size() == 2 // The slf4j test logger is always set to info. + + where: + logLevel | logCount1 | logCount2 + HttpPipelineLogLevel.INFO | 1 | 1 + HttpPipelineLogLevel.WARNING | 0 | 0 + HttpPipelineLogLevel.ERROR | 0 | 0 + } + + @Unroll + def "Successful slow response"() { + setup: + def factory = new LoggingFactory(new LoggingOptions(500, disableDefault)) + + def logger = getMockLogger(logLevel) + def requestPolicyOptions = new RequestPolicyOptions(logger) + def mockDownstream = Mock(RequestPolicy) { + sendAsync(_) >> { + sleep(600) + Single.just(getStubResponse(200)) + } + } + + def policy = factory.create(mockDownstream, requestPolicyOptions) + int logDirectorySize = calculateLogsDirectorySize() + def slf4jLogger = TestLoggerFactory.getTestLogger("Azure Storage Java SDK") + slf4jLogger.clearAll() + + when: + policy.sendAsync(getMockRequest()).blockingGet() + + then: + logCount * logger.log(HttpPipelineLogLevel.WARNING, _, _) >> + { HttpPipelineLogLevel level, String message, Object[] params -> + if (!(message.startsWith("SLOW OPERATION") && message.contains("Request try"))) { + throw new IllegalArgumentException(message) + } + } + 0 * logger.log(HttpPipelineLogLevel.ERROR, _, _) + calculateLogsDirectorySize().compareTo(logDirectorySize) == result + slf4jLogger.getAllLoggingEvents().size() == 2 // The slf4j test logger is always set to info. + + where: + logLevel | logCount | disableDefault || result + HttpPipelineLogLevel.INFO | 1 | false || 1 + HttpPipelineLogLevel.WARNING | 1 | true || 0 + HttpPipelineLogLevel.ERROR | 0 | false || 1 + } + + @Unroll + def "Error response"() { + setup: + def factory = new LoggingFactory(new LoggingOptions(2000)) + + def logger = getMockLogger(logLevel) + def requestPolicyOptions = new RequestPolicyOptions(logger) + def mockDownstream = Mock(RequestPolicy) { + sendAsync(_) >> Single.just(getStubResponse(code)) + } + + def policy = factory.create(mockDownstream, requestPolicyOptions) + def logDirectorySize = calculateLogsDirectorySize() + def slf4jLogger = TestLoggerFactory.getTestLogger("Azure Storage Java SDK") + slf4jLogger.clearAll() + + when: + policy.sendAsync(getMockRequest()).blockingGet() + + then: + 1 * logger.log(HttpPipelineLogLevel.ERROR, _, _) >> + { HttpPipelineLogLevel level, String message, Object[] params -> + if (!message.startsWith("REQUEST ERROR")) { + throw new IllegalArgumentException(message) + } + } + calculateLogsDirectorySize() > logDirectorySize + slf4jLogger.getAllLoggingEvents().size() == 2 // The slf4j test logger is always set to info. + + where: + logLevel | code + HttpPipelineLogLevel.INFO | 400 + HttpPipelineLogLevel.INFO | 503 + HttpPipelineLogLevel.WARNING | 400 + HttpPipelineLogLevel.WARNING | 503 + HttpPipelineLogLevel.ERROR | 400 + HttpPipelineLogLevel.ERROR | 503 + } + + @Unroll + def "Error responses expected"() { + setup: + def factory = new LoggingFactory(new LoggingOptions(2000)) + + def logger = getMockLogger(logLevel) + def requestPolicyOptions = new RequestPolicyOptions(logger) + def mockDownstream = Mock(RequestPolicy) { + sendAsync(_) >> Single.just(getStubResponse(code)) + } + + def policy = factory.create(mockDownstream, requestPolicyOptions) + def logDirectorySize = calculateLogsDirectorySize() + def slf4jLogger = TestLoggerFactory.getTestLogger("Azure Storage Java SDK") + slf4jLogger.clearAll() + + when: + policy.sendAsync(getMockRequest()).blockingGet() + + then: + /* + Because all of these "error" responses are potentially expected (perhaps from a createIfNotExist call), we + don't want to say they are errors in the log. Therefore, we specify that we never log with log level ERROR in + the case of these status codes. + */ + 0 * logger.log(HttpPipelineLogLevel.ERROR, _, _) + calculateLogsDirectorySize() == logDirectorySize + slf4jLogger.getAllLoggingEvents().size() == 2 // The slf4j test logger is always set to info. + + /* + Note that these where-tables usually have a column of '_' if we only need to test one variable. However, because + '_' is used in some cases to specify method parameter behavior, the overload becomes confusing both for the + reader and the IDE, so we just specify an extra variable that is constant. + */ + where: + logLevel | code + HttpPipelineLogLevel.INFO | 404 + HttpPipelineLogLevel.INFO | 416 + HttpPipelineLogLevel.INFO | 412 + HttpPipelineLogLevel.INFO | 409 + } + + @Unroll + def "Network error"() { + setup: + def factory = new LoggingFactory(new LoggingOptions(duration)) + + def logger = getMockLogger(logLevel) + def requestPolicyOptions = new RequestPolicyOptions(logger) + def mockDownstream = Mock(RequestPolicy) { + sendAsync(_) >> { + Single.error(new SocketException("Check for me")) + } + } + + def policy = factory.create(mockDownstream, requestPolicyOptions) + def logDirectorySize = calculateLogsDirectorySize() + def slf4jLogger = TestLoggerFactory.getTestLogger("Azure Storage Java SDK") + slf4jLogger.clearAll() + + when: + policy.sendAsync(getMockRequest()).blockingGet() + + then: + thrown(RuntimeException) // Because we return this from the downstream, it will be thrown when we blockingGet. + 1 * logger.log(HttpPipelineLogLevel.ERROR, _, _) >> + { HttpPipelineLogLevel level, String message, Object[] params -> + if (!(message.contains("Error message") && message.contains("Check for me"))) { + throw new IllegalArgumentException(message) + } + } + calculateLogsDirectorySize() > logDirectorySize + slf4jLogger.getAllLoggingEvents().size() == 2 // The slf4j test logger is always set to info. + + where: + logLevel | duration + HttpPipelineLogLevel.INFO | 500 + HttpPipelineLogLevel.WARNING | 500 + HttpPipelineLogLevel.ERROR | 500 + } + + /* + This is a basic test to validate that a basic scenario works in the context of an actual Pipeline. + */ + def "Pipeline integration test"() { + setup: + def logger = getMockLogger(HttpPipelineLogLevel.INFO) + def po = new PipelineOptions() + po.withLogger(logger) + + cu = primaryServiceURL.createContainerURL(generateContainerName()) + cu = new ContainerURL(cu.toURL(), StorageURL.createPipeline(primaryCreds, po)) + + when: + cu.create(null, null, null).blockingGet() + + then: + 2 * logger.log(*_) + } + + /* + This test validates the content of the logs when shared key is used. Note that the Auth header is redacted. + */ + def "Shared key logs"() { + setup: + def factory = new LoggingFactory(new LoggingOptions(2000)) + + def logger = getMockLogger(HttpPipelineLogLevel.INFO) + def requestPolicyOptions = new RequestPolicyOptions(logger) + + def mockDownstream = Mock(RequestPolicy) { + sendAsync(_) >> Single.just(getStubResponse(200)) + } + + def policy = factory.create(mockDownstream, requestPolicyOptions) + + def userAgentValue = "Azure-Storage/0.1 " + def authorizationValue = "authorizationValue" + def dateValue = "Mon, 29 Oct 2018 21:12:12 GMT" + def requestId = UUID.randomUUID().toString() + def httpHeaders = new HttpHeaders() + httpHeaders.set(Constants.HeaderConstants.VERSION, Constants.HeaderConstants.TARGET_STORAGE_VERSION) + httpHeaders.set(Constants.HeaderConstants.USER_AGENT, userAgentValue) + httpHeaders.set(Constants.HeaderConstants.AUTHORIZATION, authorizationValue) + httpHeaders.set(Constants.HeaderConstants.DATE, dateValue) + httpHeaders.set(Constants.HeaderConstants.CLIENT_REQUEST_ID_HEADER, requestId) + def urlString = "http://devtest.blob.core.windows.net/test-container/test-blob" + def url = new URL(urlString) + + when: + policy.sendAsync(new HttpRequest(null, HttpMethod.HEAD, url, httpHeaders, null, null)).blockingGet() + + then: + 1 * logger.log(HttpPipelineLogLevel.INFO, _, _) >> + { HttpPipelineLogLevel level, String message, Object[] params -> + if (!message.contains("OUTGOING REQUEST")) { + throw new IllegalArgumentException(message) + } + } + 1 * logger.log(HttpPipelineLogLevel.INFO, _, _) >> + { HttpPipelineLogLevel level, String message, Object[] params -> + if (!(message.contains("Success") + && message.contains("Request try") + && message.contains(HttpMethod.HEAD.toString()) + && message.contains(urlString) + && message.contains(url.toString()) + && message.contains(Constants.HeaderConstants.VERSION) + && message.contains(Constants.HeaderConstants.TARGET_STORAGE_VERSION) + && message.contains(Constants.HeaderConstants.DATE) + && message.contains(dateValue) + && message.contains(Constants.HeaderConstants.CLIENT_REQUEST_ID_HEADER) + && message.contains(requestId) + && message.contains(Constants.HeaderConstants.USER_AGENT) + && message.contains(userAgentValue) + && message.contains(Constants.HeaderConstants.AUTHORIZATION) + && message.contains(Constants.REDACTED) + && !message.contains(authorizationValue))) { + throw new IllegalArgumentException(message) + } + } + } + + /* + This test validates the contents of the logs when sas is used. Note that the signatures are redacted. + */ + def "SAS logs"() { + setup: + def factory = new LoggingFactory(new LoggingOptions(2000)) + + def logger = getMockLogger(HttpPipelineLogLevel.INFO) + def requestPolicyOptions = new RequestPolicyOptions(logger) + + def mockDownstream = Mock(RequestPolicy) { + sendAsync(_) >> Single.just(getStubResponse(200)) + } + + def policy = factory.create(mockDownstream, requestPolicyOptions) + + def userAgentValue = "Azure-Storage/0.1 " + def dateValue = "Mon, 29 Oct 2018 21:12:12 GMT" + def requestId = UUID.randomUUID().toString() + def copySource = "http://dev.blob.core.windows.net/test-container/test-blob?snapshot=2018-10-30T19:19:22.1016437Z&sv=2018-03-28&ss=b&srt=co&st=2018-10-29T20:45:11Z&se=2018-10-29T22:45:11Z&sp=rwdlac&sig=copySourceSignature" + def httpHeaders = new HttpHeaders() + httpHeaders.set(Constants.HeaderConstants.VERSION, Constants.HeaderConstants.TARGET_STORAGE_VERSION) + httpHeaders.set(Constants.HeaderConstants.USER_AGENT, userAgentValue) + httpHeaders.set(Constants.HeaderConstants.DATE, dateValue) + httpHeaders.set(Constants.HeaderConstants.CLIENT_REQUEST_ID_HEADER, requestId) + httpHeaders.set(Constants.HeaderConstants.COPY_SOURCE, copySource) + def urlString = "http://dev.blob.core.windows.net/test-container/test-blob?sv=2018-03-29&ss=f&srt=s&st=2018-10-30T20%3A45%3A11Z&se=2019-10-29T22%3A45%3A11Z&sp=rw&sig=urlSignature&comp=incrementalcopy" + def url = new URL(urlString) + + when: + policy.sendAsync(new HttpRequest(null, HttpMethod.PUT, url, httpHeaders, null, null)).blockingGet() + + then: + 1 * logger.log(HttpPipelineLogLevel.INFO, _, _) >> + { HttpPipelineLogLevel level, String message, Object[] params -> + if (!message.contains("OUTGOING REQUEST")) { + throw new IllegalArgumentException(message) + } + } + 1 * logger.log(HttpPipelineLogLevel.INFO, _, _) >> + { HttpPipelineLogLevel level, String message, Object[] params -> + if (!(message.contains("Success") + && message.contains("Request try") + && message.contains(HttpMethod.PUT.toString()) + && message.contains(Constants.HeaderConstants.VERSION) + && message.contains(Constants.HeaderConstants.TARGET_STORAGE_VERSION) + && message.contains(Constants.HeaderConstants.DATE) + && message.contains(dateValue) + && message.contains(Constants.HeaderConstants.CLIENT_REQUEST_ID_HEADER) + && message.contains(requestId) + && message.contains(Constants.HeaderConstants.USER_AGENT) + && message.contains(userAgentValue) + + // SAS URL parameters + && message.contains("sv=2018-03-29") + && message.contains("ss=f") + && message.contains("srt=s") + && message.contains("st=2018-10-30T20%3A45%3A11Z") + && message.contains("se=2019-10-29T22%3A45%3A11Z") + && message.contains("sp=rw") + && !message.contains("sig=urlSignature") + + // Copy Source URL parameters + && message.contains("sv=2018-03-28") + && message.contains("ss=b") + && message.contains("srt=co") + && message.contains("st=2018-10-29T20%3A45%3A11Z") + && message.contains("se=2018-10-29T22%3A45%3A11Z") + && message.contains("sp=rwdlac") + && message.contains("sig=REDACTED") + && !message.contains("copySourceSignature") + )) { + throw new IllegalArgumentException(message) + } + } + } +} diff --git a/storage/data-plane/src/test/java/com/microsoft/azure/storage/blob/ProgressReporterTest.groovy b/storage/data-plane/src/test/java/com/microsoft/azure/storage/blob/ProgressReporterTest.groovy new file mode 100644 index 0000000000000..fd5a1c54b6481 --- /dev/null +++ b/storage/data-plane/src/test/java/com/microsoft/azure/storage/blob/ProgressReporterTest.groovy @@ -0,0 +1,115 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.microsoft.azure.storage.blob; + +import com.microsoft.azure.storage.APISpec +import io.reactivex.Flowable + +import java.nio.ByteBuffer +import java.util.concurrent.atomic.AtomicLong +import java.util.concurrent.locks.ReentrantLock; + +class ProgressReporterTest extends APISpec { + def "Report progress sequential"() { + setup: + def buf1 = getRandomData(10) + def buf2 = getRandomData(15) + def buf3 = getRandomData(5) + + def mockReceiver = Mock(IProgressReceiver) + + Flowable data = Flowable.just(buf1, buf2, buf3) + data = ProgressReporter.addProgressReporting(data, mockReceiver) + + when: + data.blockingSubscribe() + data.blockingSubscribe() // Subscribing twice enforces invocation of rewind + + then: + // The same benchmarks should be reported on each subscription (retry). We should never go over total data size. + 2 * mockReceiver.reportProgress(10) + 2 * mockReceiver.reportProgress(25) + 2 * mockReceiver.reportProgress(30) + 0 * mockReceiver.reportProgress({it > 30}) + } + + def "Report progress sequential network test"() { + setup: + def mockReceiver = Mock(IProgressReceiver) + + def buffer = getRandomData(1 * 1024 * 1024) + def data = ProgressReporter.addProgressReporting(Flowable.just(buffer), mockReceiver) + + when: + def bu = cu.createBlockBlobURL(generateBlobName()) + bu.upload(data, buffer.remaining()).blockingGet() + + then: + /* + With the HTTP client, etc. involved, the best we can guarantee is that it's called once with the total. There + may or may not be any intermediary calls. This test mostly looks to validate that there is no interference + with actual network calls. + */ + 1 * mockReceiver.reportProgress(1 * 1024 * 1024) + } + + def "Report progress parallel"() { + setup: + def buf1 = getRandomData(10) + def buf2 = getRandomData(15) + def buf3 = getRandomData(5) + + def lock = new ReentrantLock() + def totalProgress = new AtomicLong(0) + + def mockReceiver = Mock(IProgressReceiver) + def data = Flowable.just(buf1, buf2, buf3) + def data2 = Flowable.just(buf3, buf2, buf1) + data = ProgressReporter.addParallelProgressReporting(data, mockReceiver, lock, totalProgress) + data2 = ProgressReporter.addParallelProgressReporting(data2, mockReceiver, lock, totalProgress) + + when: + data.subscribe() + data2.subscribe() + data.subscribe() + data2.subscribe() + + sleep(3000) // These Flowables should complete quickly, but we don't want to block or it'll order everything + + then: + /* + There should be at least one call reporting the total length of the data. There may be two if both data and + data2 complete before the second batch of subscriptions + */ + (1..2) * mockReceiver.reportProgress(60) + + /* + There should be 12 calls total, but either one or two of them could be reporting the total length, so we + can only guarantee four calls with an unknown parameter. This test doesn't strictly mimic the network as + there would never be concurrent subscriptions to the same Flowable as may be the case here, but it is good + enough. + */ + (10..11) * mockReceiver.reportProgress(_) + + /* + We should never report more progress than the 60 total (30 from each Flowable--Resubscribing is a retry and + therefore rewinds). + */ + 0 * mockReceiver.reportProgress({it > 60}) + } + + // See TransferManagerTest for network tests of the parallel ProgressReporter. +} diff --git a/storage/data-plane/src/test/java/com/microsoft/azure/storage/blob/RequestRetryTestFactory.java b/storage/data-plane/src/test/java/com/microsoft/azure/storage/blob/RequestRetryTestFactory.java new file mode 100644 index 0000000000000..2ebbfdc2ab630 --- /dev/null +++ b/storage/data-plane/src/test/java/com/microsoft/azure/storage/blob/RequestRetryTestFactory.java @@ -0,0 +1,440 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.microsoft.azure.storage.blob; + +import com.microsoft.azure.storage.blob.models.StorageErrorException; +import com.microsoft.rest.v2.http.*; +import com.microsoft.rest.v2.policy.RequestPolicy; +import com.microsoft.rest.v2.policy.RequestPolicyFactory; +import com.microsoft.rest.v2.policy.RequestPolicyOptions; +import com.microsoft.rest.v2.util.FlowableUtil; +import io.reactivex.Flowable; +import io.reactivex.Single; +import io.reactivex.SingleObserver; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.nio.ByteBuffer; +import java.time.OffsetDateTime; +import java.time.temporal.ChronoUnit; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import static java.lang.StrictMath.pow; + +public class RequestRetryTestFactory implements RequestPolicyFactory { + public static final int RETRY_TEST_SCENARIO_RETRY_UNTIL_SUCCESS = 1; + + public static final int RETRY_TEST_SCENARIO_RETRY_UNTIL_MAX_RETRIES = 2; + + public static final int RETRY_TEST_SCENARIO_NON_RETRYABLE = 3; + + public static final int RETRY_TEST_SCENARIO_NON_RETRYABLE_SECONDARY = 4; + + public static final int RETRY_TEST_SCENARIO_NETWORK_ERROR = 5; + + public static final int RETRY_TEST_SCENARIO_EXPONENTIAL_TIMING = 6; + + public static final int RETRY_TEST_SCENARIO_FIXED_TIMING = 7; + + public static final int RETRY_TEST_SCENARIO_TRY_TIMEOUT = 8; + + public static final int RETRY_TEST_SCENARIO_NON_REPLAYABLE_FLOWABLE = 9; + + // Cancelable + + public static final String RETRY_TEST_PRIMARY_HOST = "PrimaryDC"; + + public static final String RETRY_TEST_SECONDARY_HOST = "SecondaryDC"; + public static final ByteBuffer RETRY_TEST_DEFAULT_DATA = ByteBuffer.wrap("Default data".getBytes()); + private static final String RETRY_TEST_HEADER = "TestHeader"; + private static final String RETRY_TEST_QUERY_PARAM = "TestQueryParam"; + private static final Single RETRY_TEST_OK_RESPONSE = + Single.just(new RetryTestResponse(200)); + + /* + We wrap the response in a StorageErrorException to mock the HttpClient. Any responses that the HttpClient receives + that is not an expected response is wrapped in a StorageErrorException. + */ + private static final Single RETRY_TEST_TEMPORARY_ERROR_RESPONSE = + Single.just(new RetryTestResponse(503)); + + private static final Single RETRY_TEST_TIMEOUT_ERROR_RESPONSE = + Single.just(new RetryTestResponse(500)); + + private static final Single RETRY_TEST_NON_RETRYABLE_ERROR = + Single.just(new RetryTestResponse(400)); + + private static final Single RETRY_TEST_NOT_FOUND_RESPONSE = + Single.just(new RetryTestResponse(404)); + + private int retryTestScenario; + + private RequestRetryOptions options; + + /* + It is atypical and not recommended to have mutable state on the factory itself. However, the tests will need to + be able to validate the number of tries, and the tests will not have access to the policies, so we break our own + rule here. + */ + private int tryNumber; + + private OffsetDateTime time; + + public RequestRetryTestFactory(int scenario, RequestRetryOptions options) { + this.retryTestScenario = scenario; + this.options = options; + } + + public int getTryNumber() { + return this.tryNumber; + } + + @Override + public RequestPolicy create(RequestPolicy next, RequestPolicyOptions options) { + return new RetryTestPolicy(this); + } + + // The retry factory only really cares about the status code. + private static final class RetryTestResponse extends HttpResponse { + + int statusCode; + + RetryTestResponse(int statusCode) { + this.statusCode = statusCode; + } + + @Override + public int statusCode() { + return this.statusCode; + } + + @Override + public String headerValue(String headerName) { + return null; + } + + @Override + public HttpHeaders headers() { + return null; + } + + @Override + public Flowable body() { + return null; + } + + @Override + public Single bodyAsByteArray() { + return null; + } + + @Override + public Single bodyAsString() { + return null; + } + } + + private final class RetryTestPolicy implements RequestPolicy { + private RequestRetryTestFactory factory; + + RetryTestPolicy(RequestRetryTestFactory parent) { + this.factory = parent; + } + + @Override + public Single sendAsync(HttpRequest request) { + this.factory.tryNumber++; + if (this.factory.tryNumber > this.factory.options.maxTries()) { + throw new IllegalArgumentException("Try number has exceeded max tries"); + } + + // Validate the expected preconditions for each try: The correct host is used. + String expectedHost = RETRY_TEST_PRIMARY_HOST; + if (this.factory.tryNumber % 2 == 0) { + /* + Special cases: retry until success scenario fail's on the 4th try with a 404 on the secondary, so we + never expect it to check the secondary after that. All other tests should continue to check the + secondary. + Exponential timing only tests secondary backoff once but uses the rest of the retries to hit the max + delay. + */ + if (!((this.factory.retryTestScenario == RequestRetryTestFactory.RETRY_TEST_SCENARIO_RETRY_UNTIL_SUCCESS + && this.factory.tryNumber > 4) || + (this.factory.retryTestScenario == + RequestRetryTestFactory.RETRY_TEST_SCENARIO_EXPONENTIAL_TIMING + && this.factory.tryNumber > 2))) { + expectedHost = RETRY_TEST_SECONDARY_HOST; + } + } + if (!request.url().getHost().equals(expectedHost)) { + throw new IllegalArgumentException("The host does not match the expected host"); + } + + /* + This policy will add test headers and query parameters. Ensure they are removed/reset for each retry. + The retry policy should be starting with a fresh copy of the request for every try. + */ + if (request.headers().value(RETRY_TEST_HEADER) != null) { + throw new IllegalArgumentException("Headers not reset."); + } + if ((request.url().getQuery() != null && request.url().getQuery().contains(RETRY_TEST_QUERY_PARAM))) { + throw new IllegalArgumentException("Query params not reset."); + } + if (FlowableUtil.collectBytesInBuffer(request.body()).blockingGet() + .compareTo(RETRY_TEST_DEFAULT_DATA) != 0) { + throw new IllegalArgumentException(("Body not reset.")); + } + + /* + Modify the request as policies downstream of the retry policy are likely to do. These must be reset on each + try. + */ + request.headers().set(RETRY_TEST_HEADER, "testheader"); + UrlBuilder builder = UrlBuilder.parse(request.url()); + builder.setQueryParameter(RETRY_TEST_QUERY_PARAM, "testquery"); + try { + request.withUrl(builder.toURL()); + } catch (MalformedURLException e) { + throw new IllegalArgumentException("The URL has been mangled"); + } + + switch (this.factory.retryTestScenario) { + case RETRY_TEST_SCENARIO_RETRY_UNTIL_SUCCESS: + switch (this.factory.tryNumber) { + case 1: + /* + The timer is set with a timeout on the Single used to make the request. If the single + doesn't return success fast enough, it will throw a TimeoutException. We can short circuit + the waiting by simply returning an error. We will validate the time parameter later. Here, + we just test that a timeout is retried. + */ + return Single.error(new TimeoutException()); + case 2: + return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; + case 3: + return RETRY_TEST_TIMEOUT_ERROR_RESPONSE; + case 4: + /* + By returning 404 when we should be testing against the secondary, we exercise the logic + that should prevent further tries to secondary when the secondary evidently doesn't have the + data. + */ + return RETRY_TEST_NOT_FOUND_RESPONSE; + case 5: + // Just to get to a sixth try where we ensure we should not be trying the secondary again. + return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; + case 6: + return RETRY_TEST_OK_RESPONSE; + default: + throw new IllegalArgumentException("Continued trying after success."); + } + + case RETRY_TEST_SCENARIO_RETRY_UNTIL_MAX_RETRIES: + return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; + + case RETRY_TEST_SCENARIO_NON_RETRYABLE: + switch (this.factory.tryNumber) { + case 1: + return RETRY_TEST_NON_RETRYABLE_ERROR; + default: + throw new IllegalArgumentException("Continued trying after non retryable error."); + } + + case RETRY_TEST_SCENARIO_NON_RETRYABLE_SECONDARY: + switch (this.factory.tryNumber) { + case 1: + return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; + case 2: + return RETRY_TEST_NON_RETRYABLE_ERROR; + default: + throw new IllegalArgumentException("Continued trying after non retryable error."); + } + + case RETRY_TEST_SCENARIO_NETWORK_ERROR: + switch (this.factory.tryNumber) { + case 1: + // fall through + case 2: + return Single.error(new IOException()); + case 3: + return RETRY_TEST_OK_RESPONSE; + default: + throw new IllegalArgumentException("Continued retrying after success."); + } + + case RETRY_TEST_SCENARIO_TRY_TIMEOUT: + switch (this.factory.tryNumber) { + case 1: + return RETRY_TEST_OK_RESPONSE.delay(options.tryTimeout() + 1, TimeUnit.SECONDS); + case 2: + return RETRY_TEST_OK_RESPONSE.delay(options.tryTimeout() + 1, TimeUnit.SECONDS); + case 3: + return RETRY_TEST_OK_RESPONSE.delay(options.tryTimeout() - 1, TimeUnit.SECONDS); + default: + throw new IllegalArgumentException("Continued retrying after success"); + } + + case RETRY_TEST_SCENARIO_EXPONENTIAL_TIMING: + switch (this.factory.tryNumber) { + case 1: + this.factory.time = OffsetDateTime.now(); + return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; + case 2: + /* + Calculation for secondary is always the same, so we don't need to keep testing it. Not + trying the secondary any more will also speed up the test. + */ + return testDelayBounds(1, false, + RETRY_TEST_NOT_FOUND_RESPONSE); + case 3: + return testDelayBounds(2, true, + RETRY_TEST_TEMPORARY_ERROR_RESPONSE); + case 4: + return testDelayBounds(3, true, + RETRY_TEST_TEMPORARY_ERROR_RESPONSE); + case 5: + /* + With the current configuration in RetryTest, the maxRetryDelay should be reached upon the + fourth try to the primary. + */ + return testMaxDelayBounds(RETRY_TEST_TEMPORARY_ERROR_RESPONSE); + case 6: + return testMaxDelayBounds(RETRY_TEST_OK_RESPONSE); + default: + throw new IllegalArgumentException("Max retries exceeded/continued retrying after success"); + } + + case RETRY_TEST_SCENARIO_FIXED_TIMING: + switch (this.factory.tryNumber) { + case 1: + this.factory.time = OffsetDateTime.now(); + return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; + case 2: + return testDelayBounds(1, false, + RETRY_TEST_TEMPORARY_ERROR_RESPONSE); + case 3: + return testDelayBounds(2, true, + RETRY_TEST_TEMPORARY_ERROR_RESPONSE); + case 4: + /* + Fixed backoff means it's always the same and we never hit the max, no need to keep testing. + */ + return RETRY_TEST_OK_RESPONSE; + default: + throw new IllegalArgumentException("Retries continued after success."); + } + + case RETRY_TEST_SCENARIO_NON_REPLAYABLE_FLOWABLE: + switch (this.factory.tryNumber) { + case 1: + return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; + case 2: + return Single.error(new UnexpectedLengthException("Unexpected length", 5, 6)); + default: + throw new IllegalArgumentException("Retries continued on non retryable error."); + } + } + return Single.error(new IllegalArgumentException("Invalid scenario")); + } + + /* + Calculate the delay in seconds. Round up to ensure we include the maximum value and some offset for the code + executing between the original calculation in the retry policy and this check. + */ + private long calcPrimaryDelay(int tryNumber) { + switch (this.factory.retryTestScenario) { + case RETRY_TEST_SCENARIO_EXPONENTIAL_TIMING: + return (long) Math.ceil( + ((pow(2L, tryNumber - 1) - 1L) * this.factory.options.retryDelayInMs()) / 1000); + case RETRY_TEST_SCENARIO_FIXED_TIMING: + return (long) Math.ceil(this.factory.options.retryDelayInMs() / 1000); + default: + throw new IllegalArgumentException("Invalid test scenario"); + } + } + + private OffsetDateTime calcUpperBound(OffsetDateTime start, int primaryTryNumber, boolean tryingPrimary) { + if (tryingPrimary) { + return start.plus(calcPrimaryDelay(primaryTryNumber) * 1000 + 500, ChronoUnit.MILLIS); + } else { + return start.plus(1400, ChronoUnit.MILLIS); + } + } + + private OffsetDateTime calcLowerBound(OffsetDateTime start, int primaryTryNumber, boolean tryingPrimary) { + if (tryingPrimary) { + return start.plus(calcPrimaryDelay(primaryTryNumber) * 1000 - 500, ChronoUnit.MILLIS); + } else { + return start.plus(700, ChronoUnit.MILLIS); + } + } + + private Single testDelayBounds(int primaryTryNumber, boolean tryingPrimary, + Single response) { + /* + We have to return a new Single so that the calculation for time is performed at the correct time, i.e. when + the Single is actually subscribed to. This mocks an HttpClient because the requests are made only when + the Single is subscribed to, not when all the infrastructure around it is put in place, and we care about + the delay before the request itself. + */ + return new Single() { + @Override + protected void subscribeActual(SingleObserver observer) { + try { + if (OffsetDateTime.now().isAfter(calcUpperBound(factory.time, primaryTryNumber, tryingPrimary)) + || OffsetDateTime.now() + .isBefore(calcLowerBound(factory.time, primaryTryNumber, tryingPrimary))) { + throw new IllegalArgumentException("Delay was not within jitter bounds"); + } + factory.time = OffsetDateTime.now(); + /* + We can blocking get because it's not actually an IO call. Everything returned here returns + Single.just(response). + */ + HttpResponse unwrappedResponse = response.blockingGet(); + observer.onSuccess(unwrappedResponse); + } catch (StorageErrorException | IllegalArgumentException e) { + observer.onError(e); + } + } + }; + } + + private Single testMaxDelayBounds(Single response) { + return new Single() { + @Override + protected void subscribeActual(SingleObserver observer) { + try { + if (OffsetDateTime.now().isAfter(factory.time.plusSeconds( + (long) Math.ceil((factory.options.maxRetryDelayInMs() / 1000) + 1)))) { + throw new IllegalArgumentException("Max retry delay exceeded"); + } else if (OffsetDateTime.now().isBefore(factory.time.plusSeconds( + (long) Math.ceil((factory.options.maxRetryDelayInMs() / 1000) - 1)))) { + throw new IllegalArgumentException("Retry did not delay long enough"); + } + + factory.time = OffsetDateTime.now(); + HttpResponse unwrappedResponse = response.blockingGet(); + observer.onSuccess(unwrappedResponse); + } catch (StorageErrorException | IllegalArgumentException e) { + observer.onError(e); + } + } + }; + } + } +} diff --git a/storage/data-plane/src/test/java/com/microsoft/azure/storage/blob/RetryTest.groovy b/storage/data-plane/src/test/java/com/microsoft/azure/storage/blob/RetryTest.groovy new file mode 100644 index 0000000000000..07a629340c692 --- /dev/null +++ b/storage/data-plane/src/test/java/com/microsoft/azure/storage/blob/RetryTest.groovy @@ -0,0 +1,198 @@ +/* + * Copyright Microsoft Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.microsoft.azure.storage.blob + +import com.microsoft.azure.storage.APISpec +import com.microsoft.rest.v2.http.* +import io.reactivex.Flowable +import spock.lang.Unroll + +// Tests for package-private functionality. +class RetryTest extends APISpec { + static URL retryTestURL = new URL("http://" + RequestRetryTestFactory.RETRY_TEST_PRIMARY_HOST) + static RequestRetryOptions retryTestOptions = new RequestRetryOptions(RetryPolicyType.EXPONENTIAL, 6, + 2, 1000, 4000, RequestRetryTestFactory.RETRY_TEST_SECONDARY_HOST) + + def "Retries until success"() { + setup: + RequestRetryTestFactory retryTestFactory = new RequestRetryTestFactory( + RequestRetryTestFactory.RETRY_TEST_SCENARIO_RETRY_UNTIL_SUCCESS, retryTestOptions) + HttpPipeline pipeline = HttpPipeline.build(new RequestRetryFactory(retryTestOptions), retryTestFactory) + + + when: + HttpResponse response = pipeline.sendRequestAsync(new HttpRequest(null, HttpMethod.GET, + retryTestURL, new HttpHeaders(), + Flowable.just(RequestRetryTestFactory.RETRY_TEST_DEFAULT_DATA), null)).blockingGet() + + then: + response.statusCode() == 200 + retryTestFactory.getTryNumber() == 6 + } + + def "Retries until max retries"() { + setup: + RequestRetryTestFactory retryTestFactory = new RequestRetryTestFactory( + RequestRetryTestFactory.RETRY_TEST_SCENARIO_RETRY_UNTIL_MAX_RETRIES, retryTestOptions) + HttpPipeline pipeline = HttpPipeline.build(new RequestRetryFactory(retryTestOptions), retryTestFactory) + + when: + HttpResponse response = pipeline.sendRequestAsync(new HttpRequest(null, HttpMethod.GET, + retryTestURL, new HttpHeaders(), Flowable.just(RequestRetryTestFactory.RETRY_TEST_DEFAULT_DATA), + null)).blockingGet() + + then: + response.statusCode() == 503 + retryTestFactory.tryNumber == retryTestOptions.maxTries() + } + + def "Retries non retryable"() { + setup: + RequestRetryTestFactory retryTestFactory = new RequestRetryTestFactory( + RequestRetryTestFactory.RETRY_TEST_SCENARIO_NON_RETRYABLE, retryTestOptions) + HttpPipeline pipeline = HttpPipeline.build(new RequestRetryFactory(retryTestOptions), retryTestFactory) + + when: + HttpResponse response = pipeline.sendRequestAsync(new HttpRequest(null, HttpMethod.GET, retryTestURL, + new HttpHeaders(), Flowable.just(RequestRetryTestFactory.RETRY_TEST_DEFAULT_DATA), null)) + .blockingGet() + + then: + response.statusCode() == 400 + retryTestFactory.tryNumber == 1 + } + + def "Retries non retryable secondary"() { + setup: + RequestRetryTestFactory retryTestFactory = new RequestRetryTestFactory( + RequestRetryTestFactory.RETRY_TEST_SCENARIO_NON_RETRYABLE_SECONDARY, retryTestOptions) + HttpPipeline pipeline = HttpPipeline.build(new RequestRetryFactory(retryTestOptions), retryTestFactory) + + when: + HttpResponse response = pipeline.sendRequestAsync(new HttpRequest(null, HttpMethod.GET, + retryTestURL, new HttpHeaders(), Flowable.just(RequestRetryTestFactory.RETRY_TEST_DEFAULT_DATA), + null)).blockingGet() + + then: + response.statusCode() == 400 + retryTestFactory.tryNumber == 2 + } + + def "Retries network error"() { + setup: + RequestRetryTestFactory retryTestFactory = new RequestRetryTestFactory( + RequestRetryTestFactory.RETRY_TEST_SCENARIO_NETWORK_ERROR, retryTestOptions) + HttpPipeline pipeline = HttpPipeline.build(new RequestRetryFactory(retryTestOptions), retryTestFactory) + + when: + HttpResponse response = + pipeline.sendRequestAsync(new HttpRequest(null, HttpMethod.GET, retryTestURL, + new HttpHeaders(), Flowable.just(RequestRetryTestFactory.RETRY_TEST_DEFAULT_DATA), + null)).blockingGet() + + then: + response.statusCode() == 200 + retryTestFactory.tryNumber == 3 + } + + def "Retries try timeout"() { + setup: + RequestRetryTestFactory retryTestFactory = new RequestRetryTestFactory( + RequestRetryTestFactory.RETRY_TEST_SCENARIO_TRY_TIMEOUT, retryTestOptions) + HttpPipeline pipeline = HttpPipeline.build(new RequestRetryFactory(retryTestOptions), retryTestFactory) + + when: + HttpResponse response = + pipeline.sendRequestAsync(new HttpRequest(null, HttpMethod.GET, retryTestURL, + new HttpHeaders(), Flowable.just(RequestRetryTestFactory.RETRY_TEST_DEFAULT_DATA), + null)).blockingGet() + + then: + response.statusCode() == 200 + retryTestFactory.tryNumber == 3 + } + + def "Retries exponential delay"() { + setup: + RequestRetryTestFactory retryTestFactory = new RequestRetryTestFactory( + RequestRetryTestFactory.RETRY_TEST_SCENARIO_EXPONENTIAL_TIMING, retryTestOptions) + HttpPipeline pipeline = HttpPipeline.build(new RequestRetryFactory(retryTestOptions), retryTestFactory) + + when: + HttpResponse response = + pipeline.sendRequestAsync(new HttpRequest(null, HttpMethod.GET, retryTestURL, + new HttpHeaders(), Flowable.just(RequestRetryTestFactory.RETRY_TEST_DEFAULT_DATA), + null)).blockingGet() + + then: + response.statusCode() == 200 + retryTestFactory.tryNumber == 6 + } + + def "Retries fixed delay"() { + setup: + RequestRetryTestFactory retryTestFactory = new RequestRetryTestFactory( + RequestRetryTestFactory.RETRY_TEST_SCENARIO_FIXED_TIMING, retryTestOptions) + HttpPipeline pipeline = HttpPipeline.build(new RequestRetryFactory(retryTestOptions), retryTestFactory) + + when: + HttpResponse response = + pipeline.sendRequestAsync(new HttpRequest(null, HttpMethod.GET, retryTestURL, + new HttpHeaders(), Flowable.just(RequestRetryTestFactory.RETRY_TEST_DEFAULT_DATA), + null)).blockingGet() + + then: + response.statusCode() == 200 + retryTestFactory.tryNumber == 4 + } + + def "Retries non replyable flowable"() { + setup: + RequestRetryTestFactory retryTestFactory = new RequestRetryTestFactory( + RequestRetryTestFactory.RETRY_TEST_SCENARIO_NON_REPLAYABLE_FLOWABLE, retryTestOptions) + HttpPipeline pipeline = HttpPipeline.build(new RequestRetryFactory(retryTestOptions), retryTestFactory) + + when: + pipeline.sendRequestAsync(new HttpRequest(null, HttpMethod.GET, retryTestURL, + new HttpHeaders(), Flowable.just(RequestRetryTestFactory.RETRY_TEST_DEFAULT_DATA), + null)).blockingGet() + + then: + def e = thrown(IllegalStateException) + e.getMessage().startsWith("The request failed because") + e.getCause() instanceof UnexpectedLengthException + } + + @Unroll + def "Retries options invalid"() { + when: + new RequestRetryOptions(null, maxTries, tryTimeout, + retryDelayInMs, maxRetryDelayInMs, null) + + then: + thrown(IllegalArgumentException) + + where: + maxTries | tryTimeout | retryDelayInMs | maxRetryDelayInMs + 0 | null | null | null + null | 0 | null | null + null | null | 0 | 1 + null | null | 1 | 0 + null | null | null | 1 + null | null | 1 | null + null | null | 5 | 4 + } +} diff --git a/storage/data-plane/swagger/README.md b/storage/data-plane/swagger/README.md new file mode 100644 index 0000000000000..136e6f8b5189e --- /dev/null +++ b/storage/data-plane/swagger/README.md @@ -0,0 +1,19 @@ +# Azure Storage Java Proto +> see https://aka.ms/autorest +```yaml +title: StorageClient +description: Storage Client +java: true +enable-xml: true +namespace: com.microsoft.azure.storage +license-header: MICROSOFT_MIT_NO_VERSION +output-folder: ../ +input-file: +- path\to\json +directive: + # removes the x-ms-error-code from default response headers + where: $..default.headers["x-ms-error-code"] + transform: return undefined; + reason: Default models with header parameters will generated properties on non-default models + authorized-by: "@fearthecowboy" +``` diff --git a/storage/data-plane/swagger/generate.bat b/storage/data-plane/swagger/generate.bat new file mode 100644 index 0000000000000..0c538943072cb --- /dev/null +++ b/storage/data-plane/swagger/generate.bat @@ -0,0 +1,3 @@ +set version=2.0.24 +set url=https://github.com/Azure/autorest.java/releases/download/v%version%/microsoft.azure-autorest.java-%version%.tgz +autorest %~dp0README.md --use=%url% --reset --preview --implementation-subpackage=blob.implementation --models-subpackage=blob.models --generate-client-interfaces=false --required-parameter-client-methods=false --client-type-prefix=Generated --add-context-parameter=true \ No newline at end of file