Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added code to allow scheduling file expiry #14319

Merged
merged 4 commits into from
Aug 25, 2020
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -732,6 +732,7 @@
<Class name="com.azure.core.implementation.http.UrlBuilder"/>
<Class name="com.azure.storage.blob.implementation.models.AppendBlobAppendBlockFromUrlHeaders"/>
<Class name="com.azure.storage.file.share.implementation.models.FileGetPropertiesHeaders"/>
<Class name="com.azure.storage.file.datalake.implementation.models.PathAppendDataHeaders"/>
</Or>
<Bug pattern="NM_CONFUSING"/>
</Match>
Expand Down Expand Up @@ -2103,7 +2104,7 @@
<Match>
<Package name="~com\.azure\.resourcemanager(\.[^.]+)*\.samples(\.[^.]+)*"/>
<Bug pattern="REC_CATCH_EXCEPTION,
RCN_REDUNDANT_NULLCHECK_WOULD_HAVE_BEEN_A_NPE,
RCN_REDUNDANT_NULLCHECK_WOULD_HAVE_BEEN_A_NPE,
RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE"/>
</Match>

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ public final class BlobProperties {
private final String objectReplicationDestinationPolicyId;
private final RehydratePriority rehydratePriority;
private final Boolean isSealed;
private final OffsetDateTime expiresOn;

/**
* Constructs a {@link BlobProperties}.
Expand Down Expand Up @@ -173,6 +174,74 @@ public BlobProperties(final OffsetDateTime creationTime, final OffsetDateTime la
final Integer committedBlockCount, final String versionId, final Boolean isCurrentVersion,
final Long tagCount, Map<String, String> objectReplicationStatus, final String rehydratePriority,
final Boolean isSealed) {
this(creationTime, lastModified, eTag, blobSize, contentType, contentMd5, contentEncoding,
contentDisposition, contentLanguage, cacheControl, blobSequenceNumber, blobType, leaseStatus,
leaseState, leaseDuration, copyId, copyStatus, copySource, copyProgress, copyCompletionTime,
copyStatusDescription, isServerEncrypted, isIncrementalCopy, copyDestinationSnapshot, accessTier,
isAccessTierInferred, archiveStatus, encryptionKeySha256, encryptionScope, accessTierChangeTime,
metadata, committedBlockCount, versionId, isCurrentVersion, tagCount, objectReplicationStatus,
rehydratePriority, isSealed, null);
}

/**
* Constructs a {@link BlobProperties}.
*
* @param creationTime Creation time of the blob.
* @param lastModified Datetime when the blob was last modified.
* @param eTag ETag of the blob.
* @param blobSize Size of the blob.
* @param contentType Content type specified for the blob.
* @param contentMd5 Content MD5 specified for the blob.
* @param contentEncoding Content encoding specified for the blob.
* @param contentDisposition Content disposition specified for the blob.
* @param contentLanguage Content language specified for the blob.
* @param cacheControl Cache control specified for the blob.
* @param blobSequenceNumber The current sequence number for a page blob, if the blob is an append or block blob
* pass {@code null}.
* @param blobType Type of the blob.
* @param leaseStatus Status of the lease on the blob.
* @param leaseState State of the lease on the blob.
* @param leaseDuration Type of lease on the blob.
* @param copyId Identifier of the last copy operation performed on the blob.
* @param copyStatus Status of the last copy operation performed on the blob.
* @param copySource Source of the last copy operation performed on the blob.
* @param copyProgress Progress of the last copy operation performed on the blob.
* @param copyCompletionTime Datetime when the last copy operation on the blob completed.
* @param copyStatusDescription Description of the last copy operation on the blob.
* @param isServerEncrypted Flag indicating if the blob's content is encrypted on the server.
* @param isIncrementalCopy Flag indicating if the blob was incrementally copied.
* @param copyDestinationSnapshot Snapshot identifier of the last incremental copy snapshot for the blob.
* @param accessTier Access tier of the blob.
* @param isAccessTierInferred Flag indicating if the access tier of the blob was inferred from properties of the
* blob.
* @param archiveStatus Archive status of the blob.
* @param encryptionKeySha256 SHA256 of the customer provided encryption key used to encrypt the blob on the server.
* @param encryptionScope The name of the encryption scope under which the blob is encrypted.
* @param accessTierChangeTime Datetime when the access tier of the blob last changed.
* @param metadata Metadata associated with the blob.
* @param committedBlockCount Number of blocks committed to an append blob, if the blob is a block or page blob
* pass {@code null}.
* @param versionId The version identifier of the blob.
* @param isCurrentVersion Flag indicating if version identifier points to current version of the blob.
* @param tagCount Number of tags associated with the blob.
* @param objectReplicationStatus The object replication status map to parse.
* @param rehydratePriority The rehydrate priority
* @param isSealed Whether or not the blob is sealed.
* @param expiresOn The time when the blob is going to expire.
*/
public BlobProperties(final OffsetDateTime creationTime, final OffsetDateTime lastModified, final String eTag,
final long blobSize, final String contentType, final byte[] contentMd5, final String contentEncoding,
final String contentDisposition, final String contentLanguage, final String cacheControl,
final Long blobSequenceNumber, final BlobType blobType, final LeaseStatusType leaseStatus,
final LeaseStateType leaseState, final LeaseDurationType leaseDuration, final String copyId,
final CopyStatusType copyStatus, final String copySource, final String copyProgress,
final OffsetDateTime copyCompletionTime, final String copyStatusDescription, final Boolean isServerEncrypted,
final Boolean isIncrementalCopy, final String copyDestinationSnapshot, final AccessTier accessTier,
final Boolean isAccessTierInferred, final ArchiveStatus archiveStatus, final String encryptionKeySha256,
final String encryptionScope, final OffsetDateTime accessTierChangeTime, final Map<String, String> metadata,
final Integer committedBlockCount, final String versionId, final Boolean isCurrentVersion,
final Long tagCount, Map<String, String> objectReplicationStatus, final String rehydratePriority,
final Boolean isSealed, final OffsetDateTime expiresOn) {
this.creationTime = creationTime;
this.lastModified = lastModified;
this.eTag = eTag;
Expand Down Expand Up @@ -231,6 +300,7 @@ public BlobProperties(final OffsetDateTime creationTime, final OffsetDateTime la
}
this.rehydratePriority = RehydratePriority.fromString(rehydratePriority);
this.isSealed = isSealed;
this.expiresOn = expiresOn;
}


Expand Down Expand Up @@ -331,6 +401,7 @@ public BlobProperties(final OffsetDateTime creationTime, final OffsetDateTime la
this.objectReplicationDestinationPolicyId = objectReplicationDestinationPolicyId;
this.rehydratePriority = null;
this.isSealed = null;
this.expiresOn = null;
}

/**
Expand Down Expand Up @@ -619,4 +690,11 @@ public RehydratePriority getRehydratePriority() {
public Boolean isSealed() {
return isSealed;
}

/**
* @return the time when the blob is going to expire.
*/
public OffsetDateTime getExpiresOn() {
return expiresOn;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -1211,7 +1211,8 @@ Mono<Response<BlobProperties>> getPropertiesWithResponse(BlobRequestConditions r
hd.isAccessTierInferred(), ArchiveStatus.fromString(hd.getArchiveStatus()),
hd.getEncryptionKeySha256(), hd.getEncryptionScope(), hd.getAccessTierChangeTime(),
hd.getMetadata(), hd.getBlobCommittedBlockCount(), hd.getVersionId(), hd.isCurrentVersion(),
hd.getTagCount(), hd.getObjectReplicationRules(), hd.getRehydratePriority(), hd.isSealed());
hd.getTagCount(), hd.getObjectReplicationRules(), hd.getRehydratePriority(), hd.isSealed(),
hd.getExpiresOn());
return new SimpleResponse<>(rb, properties);
});
}
Expand Down
2 changes: 1 addition & 1 deletion sdk/storage/azure-storage-blob/swagger/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ autorest [email protected]/[email protected] --use=jianghaolu/autorest.mo

### Code generation settings
``` yaml
input-file: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/storage-dataplane-preview/specification/storage/data-plane/Microsoft.BlobStorage/preview/2019-12-12/blob.json
input-file: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/storage-dataplane-preview/specification/storage/data-plane/Microsoft.BlobStorage/preview/2020-02-10/blob.json
java: true
output-folder: ../
namespace: com.azure.storage.blob
Expand Down
3 changes: 2 additions & 1 deletion sdk/storage/azure-storage-file-datalake/CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
# Release History

## 12.3.0-beta.1 (Unreleased)

- Added support for the 2019-02-10 service version.
- Added support to schedule file expiration.

## 12.2.0 (2020-08-13)
- Fixed bug where Query Input Stream would throw when a ByteBuffer of length 0 was encountered.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import com.azure.core.http.rest.Response;
import com.azure.core.http.rest.SimpleResponse;
import com.azure.core.util.Context;
import com.azure.core.util.DateTimeRfc1123;
import com.azure.core.util.FluxUtil;
import com.azure.core.util.logging.ClientLogger;
import com.azure.storage.blob.BlobAsyncClient;
Expand All @@ -20,18 +21,21 @@
import com.azure.storage.common.implementation.UploadUtils;
import com.azure.storage.file.datalake.implementation.models.LeaseAccessConditions;
import com.azure.storage.file.datalake.implementation.models.ModifiedAccessConditions;
import com.azure.storage.file.datalake.implementation.models.PathExpiryOptions;
import com.azure.storage.file.datalake.implementation.models.PathResourceType;
import com.azure.storage.file.datalake.implementation.util.DataLakeImplUtils;
import com.azure.storage.file.datalake.implementation.util.ModelHelper;
import com.azure.storage.file.datalake.models.DataLakeRequestConditions;
import com.azure.storage.file.datalake.models.DownloadRetryOptions;
import com.azure.storage.file.datalake.models.FileExpirationOffset;
import com.azure.storage.file.datalake.models.FileQueryAsyncResponse;
import com.azure.storage.file.datalake.options.FileQueryOptions;
import com.azure.storage.file.datalake.models.FileRange;
import com.azure.storage.file.datalake.models.FileReadAsyncResponse;
import com.azure.storage.file.datalake.models.PathHttpHeaders;
import com.azure.storage.file.datalake.models.PathInfo;
import com.azure.storage.file.datalake.models.PathProperties;
import com.azure.storage.file.datalake.options.FileScheduleDeletionOptions;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.util.function.Tuples;
Expand Down Expand Up @@ -582,7 +586,7 @@ Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset,

PathHttpHeaders headers = new PathHttpHeaders().setTransactionalContentHash(contentMd5);

return this.dataLakeStorage.paths().appendDataWithRestResponseAsync(data, fileOffset, null, length, null,
return this.dataLakeStorage.paths().appendDataWithRestResponseAsync(data, fileOffset, null, length, null, null,
headers, leaseAccessConditions, context).map(response -> new SimpleResponse<>(response, null));
}

Expand Down Expand Up @@ -922,4 +926,66 @@ public Mono<FileQueryAsyncResponse> queryWithResponse(FileQueryOptions queryOpti
.onErrorMap(DataLakeImplUtils::transformBlobStorageException);
}

// TODO (kasobol-msft) add REST DOCS
/**
* Schedules the file for deletion.
* For more information, see the
* <a href="TBD">Azure Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.scheduleDeletion#FileScheduleDeletionOptions}
*
* @param options Schedule deletion parameters.
* @return A reactive response signalling completion.
*/
public Mono<Void> scheduleDeletion(FileScheduleDeletionOptions options) {
return scheduleDeletionWithResponse(options).flatMap(FluxUtil::toMono);
}

// TODO (kasobol-msft) add REST DOCS
/**
* Schedules the file for deletion.
* For more information, see the
* <a href="TBD">Azure Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.scheduleDeletionWithResponse#FileScheduleDeletionOptions}
*
* @param options Schedule deletion parameters.
* @return A reactive response signalling completion.
*/
public Mono<Response<Void>> scheduleDeletionWithResponse(FileScheduleDeletionOptions options) {
try {
return withContext(context -> scheduleDeletionWithResponse(options, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}

Mono<Response<Void>> scheduleDeletionWithResponse(FileScheduleDeletionOptions options, Context context) {
PathExpiryOptions pathExpiryOptions;
context = context == null ? Context.NONE : context;
String expiresOn = null;
if (options != null && options.getExpiresOn() != null) {
pathExpiryOptions = PathExpiryOptions.ABSOLUTE;
expiresOn = new DateTimeRfc1123(options.getExpiresOn()).toString();
} else if (options != null && options.getTimeToExpire() != null) {
if (options.getExpiryRelativeTo() == FileExpirationOffset.CREATION_TIME) {
pathExpiryOptions = PathExpiryOptions.RELATIVE_TO_CREATION;
} else {
pathExpiryOptions = PathExpiryOptions.RELATIVE_TO_NOW;
}
expiresOn = Long.toString(options.getTimeToExpire().toMillis());
} else {
pathExpiryOptions = PathExpiryOptions.NEVER_EXPIRE;
}
return this.blobDataLakeStorage.paths().setExpiryWithRestResponseAsync(
pathExpiryOptions, null,
null, expiresOn,
context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE))
.map(rb -> new SimpleResponse<>(rb, null));
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
import com.azure.storage.file.datalake.models.PathHttpHeaders;
import com.azure.storage.file.datalake.models.PathInfo;
import com.azure.storage.file.datalake.models.PathProperties;
import com.azure.storage.file.datalake.options.FileScheduleDeletionOptions;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.core.scheduler.Schedulers;
Expand Down Expand Up @@ -643,4 +644,41 @@ public FileQueryResponse queryWithResponse(FileQueryOptions queryOptions, Durati
return Transforms.toFileQueryResponse(response);
}, logger);
}

// TODO (kasobol-msft) add REST DOCS
/**
* Schedules the file for deletion.
* For more information, see the
* <a href="TBD">Azure Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletion#FileScheduleDeletionOptions}
*
* @param options Schedule deletion parameters.
*/
public void scheduleDeletion(FileScheduleDeletionOptions options) {
this.scheduleDeletionWithResponse(options, null, Context.NONE);
}

// TODO (kasobol-msft) add REST DOCS
/**
* Schedules the file for deletion.
* For more information, see the
* <a href="TBD">Azure Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletionWithResponse#FileScheduleDeletionOptions-Duration-Context}
*
* @param options Schedule deletion parameters.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
public Response<Void> scheduleDeletionWithResponse(FileScheduleDeletionOptions options,
Duration timeout, Context context) {
Mono<Response<Void>> response = this.dataLakeFileAsyncClient.scheduleDeletionWithResponse(options, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,11 @@ public class DataLakePathAsyncClient {
private final ClientLogger logger = new ClientLogger(DataLakePathAsyncClient.class);

final DataLakeStorageClientImpl dataLakeStorage;
/**
* This {@link DataLakeStorageClientImpl} is pointing to blob endpoint instead of dfs
* in order to expose APIs that are on blob endpoint but are only functional for HNS enabled accounts.
*/
final DataLakeStorageClientImpl blobDataLakeStorage;
private final String accountName;
private final String fileSystemName;
final String pathName;
Expand Down Expand Up @@ -89,6 +94,13 @@ public class DataLakePathAsyncClient {
.build();
this.serviceVersion = serviceVersion;

String blobUrl = DataLakeImplUtils.endpointToDesiredEndpoint(url, "blob", "dfs");
this.blobDataLakeStorage = new DataLakeStorageClientBuilder()
.pipeline(pipeline)
.url(blobUrl)
.version(serviceVersion.getVersion())
.build();

this.accountName = accountName;
this.fileSystemName = fileSystemName;
this.pathName = Utility.urlEncode(Utility.urlDecode(pathName));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ static PathProperties toPathProperties(BlobProperties properties) {
properties.isServerEncrypted(), properties.isIncrementalCopy(),
Transforms.toDataLakeAccessTier(properties.getAccessTier()),
Transforms.toDataLakeArchiveStatus(properties.getArchiveStatus()), properties.getEncryptionKeySha256(),
properties.getAccessTierChangeTime(), properties.getMetadata());
properties.getAccessTierChangeTime(), properties.getMetadata(), properties.getExpiresOn());
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ public DataLakeStorageClientImpl build() {
if (this.version != null) {
client.setVersion(this.version);
} else {
client.setVersion("2019-02-02");
client.setVersion("2020-02-02");
}
if (this.fileSystem != null) {
client.setFileSystem(this.fileSystem);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ public PathsImpl paths() {
* Initializes an instance of DataLakeStorageClient client.
*/
public DataLakeStorageClientImpl() {
new HttpPipelineBuilder().policies(new UserAgentPolicy(), new RetryPolicy(), new CookiePolicy()).build();
this(new HttpPipelineBuilder().policies(new UserAgentPolicy(), new RetryPolicy(), new CookiePolicy()).build());
}

/**
Expand Down
Loading