diff --git a/.gitignore b/.gitignore index 1b84a66d..62c090bc 100644 --- a/.gitignore +++ b/.gitignore @@ -45,6 +45,7 @@ tests/.secrets* # IAM users files often created in testing users.json +users.json.backup # env files for testing **/.env* diff --git a/backend/azure/azure.go b/backend/azure/azure.go index b5716ebb..a641a986 100644 --- a/backend/azure/azure.go +++ b/backend/azure/azure.go @@ -17,6 +17,7 @@ package azure import ( "bytes" "context" + "crypto/sha256" "encoding/base64" "encoding/binary" "encoding/json" @@ -25,7 +26,9 @@ import ( "io" "math" "os" + "path/filepath" "slices" + "sort" "strconv" "strings" "time" @@ -40,6 +43,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/google/uuid" "github.com/versity/versitygw/auth" "github.com/versity/versitygw/backend" "github.com/versity/versitygw/s3err" @@ -52,18 +56,35 @@ import ( type key string const ( - keyAclCapital key = "Acl" - keyAclLower key = "acl" - keyOwnership key = "Ownership" - keyTags key = "Tags" - keyPolicy key = "Policy" - keyBucketLock key = "Bucketlock" - keyObjRetention key = "Objectretention" - keyObjLegalHold key = "Objectlegalhold" + keyAclCapital key = "Acl" + keyAclLower key = "acl" + keyOwnership key = "Ownership" + keyTags key = "Tags" + keyPolicy key = "Policy" + keyBucketLock key = "Bucketlock" + keyObjRetention key = "Objectretention" + keyObjLegalHold key = "Objectlegalhold" + onameAttr key = "Objname" + onameAttrLower key = "objname" + metaTmpMultipartPrefix key = ".sgwtmp" + "/multipart" defaultContentType = "binary/octet-stream" ) +func (key) Table() map[string]struct{} { + return map[string]struct{}{ + "acl": {}, + "ownership": {}, + "tags": {}, + "policy": {}, + "bucketlock": {}, + "objectretention": {}, + "objectlegalhold": {}, + "objname": {}, + ".sgwtmp/multipart": {}, + } +} + type Azure struct { backend.BackendUnsupported @@ -254,6 +275,9 @@ func (az *Azure) GetBucketOwnershipControls(ctx context.Context, bucket string) if err != nil { return ownship, err } + if len(ownership) == 0 { + return ownship, s3err.GetAPIError(s3err.ErrOwnershipControlsNotFound) + } return types.ObjectOwnership(ownership), nil } @@ -334,11 +358,11 @@ func (az *Azure) GetBucketTagging(ctx context.Context, bucket string) (map[strin return nil, err } - var tags map[string]string if len(tagsJson) == 0 { - return tags, nil + return nil, s3err.GetAPIError(s3err.ErrBucketTaggingNotFound) } + var tags map[string]string err = json.Unmarshal(tagsJson, &tags) if err != nil { return nil, err @@ -391,6 +415,7 @@ func (az *Azure) GetObject(ctx context.Context, input *s3.GetObjectInput) (*s3.G TagCount: &tagcount, ContentRange: blobDownloadResponse.ContentRange, Body: blobDownloadResponse.Body, + StorageClass: types.StorageClassStandard, }, nil } @@ -419,6 +444,7 @@ func (az *Azure) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3 ContentLength: block.Size, ETag: block.Name, PartsCount: &partsCount, + StorageClass: types.StorageClassStandard, }, nil } } @@ -447,6 +473,7 @@ func (az *Azure) HeadObject(ctx context.Context, input *s3.HeadObjectInput) (*s3 LastModified: resp.LastModified, Metadata: parseAzMetadata(resp.Metadata), Expires: resp.ExpiresOn, + StorageClass: types.StorageClassStandard, } status, ok := resp.Metadata[string(keyObjLegalHold)] @@ -475,64 +502,31 @@ func (az *Azure) GetObjectAttributes(ctx context.Context, input *s3.GetObjectAtt Bucket: input.Bucket, Key: input.Key, }) - if err == nil { - return s3response.GetObjectAttributesResult{ - ETag: data.ETag, - LastModified: data.LastModified, - ObjectSize: data.ContentLength, - StorageClass: data.StorageClass, - VersionId: data.VersionId, - }, nil - } - if !errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchKey)) { - return s3response.GetObjectAttributesResult{}, err - } - - resp, err := az.ListParts(ctx, &s3.ListPartsInput{ - Bucket: input.Bucket, - Key: input.Key, - PartNumberMarker: input.PartNumberMarker, - MaxParts: input.MaxParts, - }) - if errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchUpload)) { - return s3response.GetObjectAttributesResult{}, s3err.GetAPIError(s3err.ErrNoSuchKey) - } if err != nil { return s3response.GetObjectAttributesResult{}, err } - parts := []types.ObjectPart{} - - for _, p := range resp.Parts { - partNumber := int32(p.PartNumber) - size := p.Size - - parts = append(parts, types.ObjectPart{ - Size: &size, - PartNumber: &partNumber, - }) - } - - //TODO: handle PartsCount prop return s3response.GetObjectAttributesResult{ - ObjectParts: &s3response.ObjectParts{ - IsTruncated: resp.IsTruncated, - MaxParts: resp.MaxParts, - PartNumberMarker: resp.PartNumberMarker, - NextPartNumberMarker: resp.NextPartNumberMarker, - Parts: parts, - }, + ETag: data.ETag, + LastModified: data.LastModified, + ObjectSize: data.ContentLength, + StorageClass: data.StorageClass, }, nil } func (az *Azure) ListObjects(ctx context.Context, input *s3.ListObjectsInput) (s3response.ListObjectsResult, error) { - pager := az.client.NewListBlobsFlatPager(*input.Bucket, &azblob.ListBlobsFlatOptions{ + client, err := az.getContainerClient(*input.Bucket) + if err != nil { + return s3response.ListObjectsResult{}, nil + } + pager := client.NewListBlobsHierarchyPager(*input.Delimiter, &container.ListBlobsHierarchyOptions{ Marker: input.Marker, MaxResults: input.MaxKeys, Prefix: input.Prefix, }) var objects []s3response.Object + var cPrefixes []types.CommonPrefix var nextMarker *string var isTruncated bool var maxKeys int32 = math.MaxInt32 @@ -547,13 +541,10 @@ Pager: if err != nil { return s3response.ListObjectsResult{}, azureErrToS3Err(err) } - for _, v := range resp.Segment.BlobItems { - if nextMarker == nil && *resp.NextMarker != "" { - nextMarker = resp.NextMarker + if len(objects)+len(cPrefixes) >= int(maxKeys) { + nextMarker = objects[len(objects)-1].Key isTruncated = true - } - if len(objects) >= int(maxKeys) { break Pager } objects = append(objects, s3response.Object{ @@ -561,7 +552,20 @@ Pager: Key: v.Name, LastModified: v.Properties.LastModified, Size: v.Properties.ContentLength, - StorageClass: types.ObjectStorageClass(*v.Properties.AccessTier), + StorageClass: types.ObjectStorageClassStandard, + }) + } + for _, v := range resp.Segment.BlobPrefixes { + if *v.Name <= *input.Marker { + continue + } + if len(objects)+len(cPrefixes) >= int(maxKeys) { + nextMarker = cPrefixes[len(cPrefixes)-1].Prefix + isTruncated = true + break Pager + } + cPrefixes = append(cPrefixes, types.CommonPrefix{ + Prefix: v.Name, }) } } @@ -569,14 +573,15 @@ Pager: // TODO: generate common prefixes when appropriate return s3response.ListObjectsResult{ - Contents: objects, - Marker: input.Marker, - MaxKeys: input.MaxKeys, - Name: input.Bucket, - NextMarker: nextMarker, - Prefix: input.Prefix, - IsTruncated: &isTruncated, - Delimiter: input.Delimiter, + Contents: objects, + Marker: input.Marker, + MaxKeys: input.MaxKeys, + Name: input.Bucket, + NextMarker: nextMarker, + Prefix: input.Prefix, + IsTruncated: &isTruncated, + Delimiter: input.Delimiter, + CommonPrefixes: cPrefixes, }, nil } @@ -587,13 +592,18 @@ func (az *Azure) ListObjectsV2(ctx context.Context, input *s3.ListObjectsV2Input } else { marker = *input.StartAfter } - pager := az.client.NewListBlobsFlatPager(*input.Bucket, &azblob.ListBlobsFlatOptions{ + client, err := az.getContainerClient(*input.Bucket) + if err != nil { + return s3response.ListObjectsV2Result{}, nil + } + pager := client.NewListBlobsHierarchyPager(*input.Delimiter, &container.ListBlobsHierarchyOptions{ Marker: &marker, MaxResults: input.MaxKeys, Prefix: input.Prefix, }) var objects []s3response.Object + var cPrefixes []types.CommonPrefix var nextMarker *string var isTruncated bool var maxKeys int32 = math.MaxInt32 @@ -609,26 +619,34 @@ Pager: return s3response.ListObjectsV2Result{}, azureErrToS3Err(err) } for _, v := range resp.Segment.BlobItems { - if nextMarker == nil && *resp.NextMarker != "" { - nextMarker = resp.NextMarker + if len(objects)+len(cPrefixes) >= int(maxKeys) { + nextMarker = objects[len(objects)-1].Key isTruncated = true - } - if len(objects) >= int(maxKeys) { break Pager } - nextMarker = resp.NextMarker objects = append(objects, s3response.Object{ ETag: (*string)(v.Properties.ETag), Key: v.Name, LastModified: v.Properties.LastModified, Size: v.Properties.ContentLength, - StorageClass: types.ObjectStorageClass(*v.Properties.AccessTier), + StorageClass: types.ObjectStorageClassStandard, + }) + } + for _, v := range resp.Segment.BlobPrefixes { + if *v.Name <= marker { + continue + } + if len(objects)+len(cPrefixes) >= int(maxKeys) { + nextMarker = cPrefixes[len(cPrefixes)-1].Prefix + isTruncated = true + break Pager + } + cPrefixes = append(cPrefixes, types.CommonPrefix{ + Prefix: v.Name, }) } } - // TODO: generate common prefixes when appropriate - return s3response.ListObjectsV2Result{ Contents: objects, ContinuationToken: input.ContinuationToken, @@ -638,6 +656,7 @@ Pager: Prefix: input.Prefix, IsTruncated: &isTruncated, Delimiter: input.Delimiter, + CommonPrefixes: cPrefixes, }, nil } @@ -687,21 +706,24 @@ func (az *Azure) DeleteObjects(ctx context.Context, input *s3.DeleteObjectsInput } func (az *Azure) CopyObject(ctx context.Context, input *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) { - mdmap, err := az.getContainerMetaDataMap(ctx, *input.Bucket) + bclient, err := az.getBlobClient(*input.Bucket, *input.Key) if err != nil { return nil, err } - if strings.Join([]string{*input.Bucket, *input.Key}, "/") == *input.CopySource && isMetaSame(mdmap, input.Metadata) { - return nil, s3err.GetAPIError(s3err.ErrInvalidCopyDest) - } + if strings.Join([]string{*input.Bucket, *input.Key}, "/") == *input.CopySource { + props, err := bclient.GetProperties(ctx, nil) + if err != nil { + return nil, azureErrToS3Err(err) + } - tags, err := parseTags(input.Tagging) - if err != nil { - return nil, err + mdmap := props.Metadata + if isMetaSame(mdmap, input.Metadata) { + return nil, s3err.GetAPIError(s3err.ErrInvalidCopyDest) + } } - bclient, err := az.getBlobClient(*input.Bucket, *input.Key) + tags, err := parseTags(input.Tagging) if err != nil { return nil, err } @@ -765,26 +787,96 @@ func (az *Azure) DeleteObjectTagging(ctx context.Context, bucket, object string) } func (az *Azure) CreateMultipartUpload(ctx context.Context, input *s3.CreateMultipartUploadInput) (s3response.InitiateMultipartUploadResult, error) { - // Multipart upload starts with UploadPart action so there is no - // correlating function for creating mutlipart uploads. - // TODO: since azure only allows for a single multipart upload - // for an object name at a time, we need to send an error back to - // the client if there is already an outstanding upload in progress - // for this object. - // Alternatively, is there something we can do with upload ids to - // keep concurrent uploads unique still? I haven't found an efficient - // way to rename final objects. + if input.ObjectLockLegalHoldStatus != "" || input.ObjectLockMode != "" { + bucketLock, err := az.getContainerMetaData(ctx, *input.Bucket, string(keyBucketLock)) + if err != nil { + return s3response.InitiateMultipartUploadResult{}, azureErrToS3Err(err) + } + + if len(bucketLock) == 0 { + return s3response.InitiateMultipartUploadResult{}, s3err.GetAPIError(s3err.ErrInvalidBucketObjectLockConfiguration) + } + + var bucketLockConfig auth.BucketLockConfig + if err := json.Unmarshal(bucketLock, &bucketLockConfig); err != nil { + return s3response.InitiateMultipartUploadResult{}, fmt.Errorf("parse bucket lock config: %w", err) + } + + if !bucketLockConfig.Enabled { + return s3response.InitiateMultipartUploadResult{}, s3err.GetAPIError(s3err.ErrInvalidBucketObjectLockConfiguration) + } + } + + meta := parseMetadata(input.Metadata) + meta[string(onameAttr)] = input.Key + + // parse object tags + tagsStr := getString(input.Tagging) + tags := map[string]string{} + if tagsStr != "" { + tagParts := strings.Split(tagsStr, "&") + for _, prt := range tagParts { + p := strings.Split(prt, "=") + if len(p) != 2 { + return s3response.InitiateMultipartUploadResult{}, s3err.GetAPIError(s3err.ErrInvalidTag) + } + if len(p[0]) > 128 || len(p[1]) > 256 { + return s3response.InitiateMultipartUploadResult{}, s3err.GetAPIError(s3err.ErrInvalidTag) + } + tags[p[0]] = p[1] + } + } + + // set blob legal hold status in metadata + if input.ObjectLockLegalHoldStatus == types.ObjectLockLegalHoldStatusOn { + meta[string(keyObjLegalHold)] = backend.GetStringPtr("1") + } + + // set blob retention date + if input.ObjectLockMode != "" { + retention := types.ObjectLockRetention{ + Mode: types.ObjectLockRetentionMode(input.ObjectLockMode), + RetainUntilDate: input.ObjectLockRetainUntilDate, + } + retParsed, err := json.Marshal(retention) + if err != nil { + return s3response.InitiateMultipartUploadResult{}, azureErrToS3Err(err) + } + meta[string(keyObjRetention)] = backend.GetStringPtr(string(retParsed)) + } + + uploadId := uuid.New().String() + + tmpPath := createMetaTmpPath(*input.Key, uploadId) + + opts := &blockblob.UploadBufferOptions{ + Metadata: meta, + Tags: tags, + } + if getString(input.ContentType) != "" { + opts.HTTPHeaders = &blob.HTTPHeaders{ + BlobContentType: input.ContentType, + } + } + + // Create and empty blob in .sgwtmp/multipart// + // The blob indicates multipart upload initialization and holds the mp metadata + // e.g tagging, content-type, metadata, object lock status ... + _, err := az.client.UploadBuffer(ctx, *input.Bucket, tmpPath, []byte{}, opts) + if err != nil { + return s3response.InitiateMultipartUploadResult{}, azureErrToS3Err(err) + } + return s3response.InitiateMultipartUploadResult{ Bucket: *input.Bucket, Key: *input.Key, - UploadId: *input.Key, + UploadId: uploadId, }, nil } // Each part is translated into an uncommitted block in a newly created blob in staging area func (az *Azure) UploadPart(ctx context.Context, input *s3.UploadPartInput) (etag string, err error) { - client, err := az.getBlockBlobClient(*input.Bucket, *input.Key) - if err != nil { + if err := az.checkIfMpExists(ctx, *input.Bucket, *input.Key, *input.UploadId); err != nil { return "", err } @@ -797,6 +889,11 @@ func (az *Azure) UploadPart(ctx context.Context, input *s3.UploadPartInput) (eta return "", err } + client, err := az.getBlockBlobClient(*input.Bucket, *input.Key) + if err != nil { + return "", err + } + // block id serves as etag here etag = blockIDInt32ToBase64(*input.PartNumber) _, err = client.StageBlock(ctx, etag, rdr, nil) @@ -813,10 +910,14 @@ func (az *Azure) UploadPartCopy(ctx context.Context, input *s3.UploadPartCopyInp return s3response.CopyObjectResult{}, nil } + if err := az.checkIfMpExists(ctx, *input.Bucket, *input.Key, *input.UploadId); err != nil { + return s3response.CopyObjectResult{}, err + } + + eTag := blockIDInt32ToBase64(*input.PartNumber) //TODO: handle block copy by range //TODO: the action returns not implemented on azurite, maybe in production this will work? - // UploadId here is the source block id - _, err = client.StageBlockFromURL(ctx, *input.UploadId, *input.CopySource, nil) + _, err = client.StageBlockFromURL(ctx, eTag, *input.CopySource, nil) if err != nil { return s3response.CopyObjectResult{}, parseMpError(err) } @@ -826,15 +927,14 @@ func (az *Azure) UploadPartCopy(ctx context.Context, input *s3.UploadPartCopyInp // Lists all uncommitted parts from the blob func (az *Azure) ListParts(ctx context.Context, input *s3.ListPartsInput) (s3response.ListPartsResult, error) { + if err := az.checkIfMpExists(ctx, *input.Bucket, *input.Key, *input.UploadId); err != nil { + return s3response.ListPartsResult{}, err + } client, err := az.getBlockBlobClient(*input.Bucket, *input.Key) if err != nil { return s3response.ListPartsResult{}, nil } - resp, err := client.GetBlockList(ctx, blockblob.BlockListTypeUncommitted, nil) - if err != nil { - return s3response.ListPartsResult{}, parseMpError(err) - } var partNumberMarker int var nextPartNumberMarker int var maxParts int32 = math.MaxInt32 @@ -850,13 +950,28 @@ func (az *Azure) ListParts(ctx context.Context, input *s3.ListPartsInput) (s3res maxParts = *input.MaxParts } + resp, err := client.GetBlockList(ctx, blockblob.BlockListTypeUncommitted, nil) + if err != nil { + // If the mp exists but the client returns 'NoSuchKey' error, return empty result + if errors.Is(azureErrToS3Err(err), s3err.GetAPIError(s3err.ErrNoSuchKey)) { + return s3response.ListPartsResult{ + Bucket: *input.Bucket, + Key: *input.Key, + PartNumberMarker: partNumberMarker, + IsTruncated: isTruncated, + MaxParts: int(maxParts), + StorageClass: types.StorageClassStandard, + }, nil + } + } + parts := []s3response.Part{} for _, el := range resp.UncommittedBlocks { partNumber, err := decodeBlockId(*el.Name) if err != nil { return s3response.ListPartsResult{}, err } - if partNumberMarker != 0 && partNumberMarker >= partNumber { + if partNumberMarker >= partNumber { continue } parts = append(parts, s3response.Part{ @@ -879,29 +994,29 @@ func (az *Azure) ListParts(ctx context.Context, input *s3.ListPartsInput) (s3res PartNumberMarker: partNumberMarker, IsTruncated: isTruncated, MaxParts: int(maxParts), + StorageClass: types.StorageClassStandard, }, nil } -// Lists all block blobs, which has uncommitted blocks +// Lists all the multipart uploads initiated with .sgwtmp/multipart prefix func (az *Azure) ListMultipartUploads(ctx context.Context, input *s3.ListMultipartUploadsInput) (s3response.ListMultipartUploadsResult, error) { client, err := az.getContainerClient(*input.Bucket) if err != nil { return s3response.ListMultipartUploadsResult{}, err } - pager := client.NewListBlobsFlatPager(&container.ListBlobsFlatOptions{ - Include: container.ListBlobsInclude{UncommittedBlobs: true}, - Marker: input.KeyMarker, - Prefix: input.Prefix, - }) - var maxUploads int32 - if input.MaxUploads != nil { - maxUploads = *input.MaxUploads - } - isTruncated := false - nextKeyMarker := "" uploads := []s3response.Upload{} - breakFlag := false + + var uploadIDMarker string + if input.UploadIdMarker != nil { + uploadIDMarker = *input.UploadIdMarker + } + uploadIdMarkerFound := false + prefix := string(metaTmpMultipartPrefix) + + pager := client.NewListBlobsFlatPager(&container.ListBlobsFlatOptions{ + Prefix: &prefix, + }) for pager.More() { resp, err := pager.NextPage(ctx) @@ -909,49 +1024,131 @@ func (az *Azure) ListMultipartUploads(ctx context.Context, input *s3.ListMultipa return s3response.ListMultipartUploadsResult{}, azureErrToS3Err(err) } for _, el := range resp.Segment.BlobItems { - if el.Properties.AccessTier == nil { - if len(uploads) >= int(*input.MaxUploads) && maxUploads != 0 { - breakFlag = true - nextKeyMarker = *el.Name - isTruncated = true - break - } - uploads = append(uploads, s3response.Upload{ - Key: *el.Name, - Initiated: *el.Properties.CreationTime, - }) + key, ok := el.Metadata[string(onameAttrLower)] + if !ok { + continue } + if *key <= *input.KeyMarker { + continue + } + if input.Prefix != nil && !strings.HasPrefix(*key, *input.Prefix) { + continue + } + + path := filepath.Clean(*el.Name) + parts := strings.Split(path, "/") + uploadId := parts[2] + + uploads = append(uploads, s3response.Upload{ + Key: *key, + Initiated: *el.Properties.CreationTime, + UploadID: uploadId, + StorageClass: types.StorageClassStandard, + }) } - if breakFlag { - break + } + maxUploads := 1000 + if input.MaxUploads != nil { + maxUploads = int(*input.MaxUploads) + } + if *input.KeyMarker != "" && uploadIDMarker != "" && !uploadIdMarkerFound { + return s3response.ListMultipartUploadsResult{ + Bucket: *input.Bucket, + Delimiter: *input.Delimiter, + KeyMarker: *input.KeyMarker, + MaxUploads: maxUploads, + Prefix: *input.Prefix, + UploadIDMarker: *input.UploadIdMarker, + Uploads: []s3response.Upload{}, + }, nil + } + + sort.SliceStable(uploads, func(i, j int) bool { + return uploads[i].Key < uploads[j].Key + }) + + if *input.KeyMarker != "" && *input.UploadIdMarker != "" { + // the uploads are already filtered by keymarker + // filter the uploads by uploadIdMarker + for i, upl := range uploads { + if upl.UploadID == uploadIDMarker { + uploads = uploads[i+1:] + break + } } } - return s3response.ListMultipartUploadsResult{ - Uploads: uploads, - Bucket: *input.Bucket, - KeyMarker: *input.KeyMarker, - NextKeyMarker: nextKeyMarker, - MaxUploads: int(maxUploads), - Prefix: *input.Prefix, - IsTruncated: isTruncated, - Delimiter: *input.Delimiter, - }, nil + + if len(uploads) <= maxUploads { + return s3response.ListMultipartUploadsResult{ + Bucket: *input.Bucket, + Delimiter: *input.Delimiter, + KeyMarker: *input.KeyMarker, + MaxUploads: maxUploads, + Prefix: *input.Prefix, + UploadIDMarker: *input.UploadIdMarker, + Uploads: uploads, + }, nil + } else { + resUploads := uploads[:maxUploads] + return s3response.ListMultipartUploadsResult{ + Bucket: *input.Bucket, + Delimiter: *input.Delimiter, + KeyMarker: *input.KeyMarker, + NextKeyMarker: resUploads[len(resUploads)-1].Key, + MaxUploads: maxUploads, + Prefix: *input.Prefix, + UploadIDMarker: *input.UploadIdMarker, + NextUploadIDMarker: resUploads[len(resUploads)-1].UploadID, + IsTruncated: true, + Uploads: resUploads, + }, nil + } } // Deletes the block blob with committed/uncommitted blocks +// Cleans up the initiated multipart upload in .sgwtmp namespace func (az *Azure) AbortMultipartUpload(ctx context.Context, input *s3.AbortMultipartUploadInput) error { - // TODO: need to verify this blob has uncommitted blocks? - _, err := az.client.DeleteBlob(ctx, *input.Bucket, *input.Key, nil) + tmpPath := createMetaTmpPath(*input.Key, *input.UploadId) + _, err := az.client.DeleteBlob(ctx, *input.Bucket, tmpPath, nil) if err != nil { return parseMpError(err) } + + // Cleanup the uploaded parts + _, err = az.client.DeleteBlob(ctx, *input.Bucket, *input.Key, nil) + if err != nil { + err = azureErrToS3Err(err) + if errors.Is(err, s3err.GetAPIError(s3err.ErrNoSuchKey)) { + return nil + } + + return err + } + return nil } // Commits all the uncommitted blocks inside the block blob -// And moves the block blob from staging area into the blobs list +// And moves the block blob from staging area into the blobs list. +// Copeies the multipart metadata from .sgwtmp namespace into the newly created blob +// Deletes the multipart upload 'blob' from .sgwtmp namespace // It indicates the end of the multipart upload func (az *Azure) CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) { + tmpPath := createMetaTmpPath(*input.Key, *input.UploadId) + blobClient, err := az.getBlobClient(*input.Bucket, tmpPath) + if err != nil { + return nil, err + } + + props, err := blobClient.GetProperties(ctx, nil) + if err != nil { + return nil, parseMpError(err) + } + tags, err := blobClient.GetTags(ctx, nil) + if err != nil { + return nil, parseMpError(err) + } + client, err := az.getBlockBlobClient(*input.Bucket, *input.Key) if err != nil { return nil, err @@ -988,7 +1185,21 @@ func (az *Azure) CompleteMultipartUpload(ctx context.Context, input *s3.Complete blockIds = append(blockIds, *block.Name) } - resp, err := client.CommitBlockList(ctx, blockIds, nil) + opts := &blockblob.CommitBlockListOptions{ + Metadata: props.Metadata, + Tags: parseAzTags(tags.BlobTagSet), + } + opts.HTTPHeaders = &blob.HTTPHeaders{ + BlobContentType: props.ContentType, + } + + resp, err := client.CommitBlockList(ctx, blockIds, opts) + if err != nil { + return nil, parseMpError(err) + } + + // cleanup the multipart upload + _, err = blobClient.Delete(ctx, nil) if err != nil { return nil, parseMpError(err) } @@ -1313,9 +1524,15 @@ func parseAzMetadata(m map[string]*string) map[string]string { return nil } + keywords := keyTags.Table() + meta := make(map[string]string) for k, v := range m { + _, ok := keywords[strings.ToLower(k)] + if ok { + continue + } meta[k] = *v } return meta @@ -1427,20 +1644,6 @@ func (az *Azure) getContainerMetaData(ctx context.Context, bucket, key string) ( return value, nil } -func (az *Azure) getContainerMetaDataMap(ctx context.Context, bucket string) (map[string]*string, error) { - client, err := az.getContainerClient(bucket) - if err != nil { - return nil, err - } - - props, err := client.GetProperties(ctx, nil) - if err != nil { - return nil, azureErrToS3Err(err) - } - - return props.Metadata, nil -} - func (az *Azure) setContainerMetaData(ctx context.Context, bucket, key string, value []byte) error { client, err := az.getContainerClient(bucket) if err != nil { @@ -1517,7 +1720,7 @@ func getAclFromMetadata(meta map[string]*string, key key) (*auth.ACL, error) { } func isMetaSame(azMeta map[string]*string, awsMeta map[string]string) bool { - if len(azMeta) != len(awsMeta)+1 { + if len(azMeta) != len(awsMeta) { return false } @@ -1533,3 +1736,24 @@ func isMetaSame(azMeta map[string]*string, awsMeta map[string]string) bool { return true } + +func createMetaTmpPath(obj, uploadId string) string { + objNameSum := sha256.Sum256([]byte(obj)) + return filepath.Join(string(metaTmpMultipartPrefix), uploadId, fmt.Sprintf("%x", objNameSum)) +} + +// Checks if the multipart upload existis with the given bucket, key and uploadId +func (az *Azure) checkIfMpExists(ctx context.Context, bucket, obj, uploadId string) error { + tmpPath := createMetaTmpPath(obj, uploadId) + blobClient, err := az.getBlobClient(bucket, tmpPath) + if err != nil { + return err + } + + _, err = blobClient.GetProperties(ctx, nil) + if err != nil { + return s3err.GetAPIError(s3err.ErrNoSuchUpload) + } + + return nil +} diff --git a/tests/integration/group-tests.go b/tests/integration/group-tests.go index fee493f6..8aa0c91d 100644 --- a/tests/integration/group-tests.go +++ b/tests/integration/group-tests.go @@ -132,7 +132,6 @@ func TestPutObject(s *S3Conf) { PutObject_special_chars(s) PutObject_invalid_long_tags(s) PutObject_missing_object_lock_retention_config(s) - PutObject_name_too_long(s) PutObject_with_object_lock(s) PutObject_success(s) PutObject_invalid_credentials(s) @@ -192,7 +191,6 @@ func TestListObjectsV2(s *S3Conf) { func TestDeleteObject(s *S3Conf) { DeleteObject_non_existing_object(s) - DeleteObject_name_too_long(s) DeleteObject_non_existing_dir_object(s) DeleteObject_success(s) DeleteObject_success_status_code(s) @@ -480,6 +478,9 @@ func TestPosix(s *S3Conf) { PutObject_overwrite_file_obj(s) PutObject_dir_obj_with_data(s) CreateMultipartUpload_dir_obj(s) + PutObject_name_too_long(s) + HeadObject_name_too_long(s) + DeleteObject_name_too_long(s) } func TestIAM(s *S3Conf) { diff --git a/tests/integration/tests.go b/tests/integration/tests.go index 26fb9e5e..5c312fa1 100644 --- a/tests/integration/tests.go +++ b/tests/integration/tests.go @@ -39,7 +39,6 @@ import ( var ( shortTimeout = 10 * time.Second iso8601Format = "20060102T150405Z" - emptyObjETag = "d41d8cd98f00b204e9800998ecf8427e" ) func Authentication_empty_auth_header(s *S3Conf) error { @@ -2188,7 +2187,7 @@ func DeleteBucket_non_existing_bucket(s *S3Conf) error { func DeleteBucket_non_empty_bucket(s *S3Conf) error { testName := "DeleteBucket_non_empty_bucket" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { - err := putObjects(s3client, []string{"foo"}, bucket) + _, err := putObjects(s3client, []string{"foo"}, bucket) if err != nil { return err } @@ -2682,7 +2681,7 @@ func DeleteBucketTagging_success(s *S3Conf) error { func PutObject_non_existing_bucket(s *S3Conf) error { testName := "PutObject_non_existing_bucket" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { - err := putObjects(s3client, []string{"my-obj"}, "non-existing-bucket") + _, err := putObjects(s3client, []string{"my-obj"}, "non-existing-bucket") if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrNoSuchBucket)); err != nil { return err } @@ -2693,7 +2692,7 @@ func PutObject_non_existing_bucket(s *S3Conf) error { func PutObject_special_chars(s *S3Conf) error { testName := "PutObject_special_chars" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { - err := putObjects(s3client, []string{"my%key", "my^key", "my*key", "my.key", "my-key", "my_key", "my!key", "my'key", "my(key", "my)key", "my\\key", "my{}key", "my[]key", "my`key", "my+key", "my%25key", "my@key"}, bucket) + _, err := putObjects(s3client, []string{"my%key", "my^key", "my*key", "my.key", "my-key", "my_key", "my!key", "my'key", "my(key", "my)key", "my\\key", "my{}key", "my[]key", "my`key", "my+key", "my%25key", "my@key"}, bucket) if err != nil { return err } @@ -2770,25 +2769,6 @@ func PutObject_missing_object_lock_retention_config(s *S3Conf) error { }) } -func PutObject_name_too_long(s *S3Conf) error { - testName := "PutObject_name_too_long" - return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { - key := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" - - ctx, cancel := context.WithTimeout(context.Background(), shortTimeout) - _, err := s3client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: &bucket, - Key: &key, - }) - cancel() - if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrKeyTooLong)); err != nil { - return err - } - - return nil - }) -} - func PutObject_with_object_lock(s *S3Conf) error { testName := "PutObject_with_object_lock" runF(testName) @@ -2861,7 +2841,7 @@ func PutObject_with_object_lock(s *S3Conf) error { func PutObject_success(s *S3Conf) error { testName := "PutObject_success" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { - err := putObjects(s3client, []string{"my-obj"}, bucket) + _, err := putObjects(s3client, []string{"my-obj"}, bucket) if err != nil { return err } @@ -2875,7 +2855,7 @@ func PutObject_invalid_credentials(s *S3Conf) error { newconf := *s newconf.awsSecret = newconf.awsSecret + "badpassword" client := s3.NewFromConfig(newconf.Config()) - err := putObjects(client, []string{"my-obj"}, bucket) + _, err := putObjects(client, []string{"my-obj"}, bucket) return checkApiErr(err, s3err.GetAPIError(s3err.ErrSignatureDoesNotMatch)) }) } @@ -2896,22 +2876,6 @@ func HeadObject_non_existing_object(s *S3Conf) error { }) } -func HeadObject_name_too_long(s *S3Conf) error { - testName := "HeadObject_name_too_long" - return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { - ctx, cancel := context.WithTimeout(context.Background(), shortTimeout) - _, err := s3client.HeadObject(ctx, &s3.HeadObjectInput{ - Bucket: &bucket, - Key: getPtr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), - }) - cancel() - if err := checkSdkApiErr(err, "BadRequest"); err != nil { - return err - } - return nil - }) -} - func HeadObject_invalid_part_number(s *S3Conf) error { testName := "HeadObject_invalid_part_number" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { @@ -3745,7 +3709,7 @@ func ListObjects_with_prefix(s *S3Conf) error { return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { prefix := "obj" objWithPrefix := []string{prefix + "/bar", prefix + "/baz/bla", prefix + "/foo"} - err := putObjects(s3client, append(objWithPrefix, []string{"xzy/csf", "hell"}...), bucket) + contents, err := putObjects(s3client, append(objWithPrefix, []string{"azy/csf", "hell"}...), bucket) if err != nil { return err } @@ -3760,13 +3724,11 @@ func ListObjects_with_prefix(s *S3Conf) error { return err } - contents := createEmptyObjectsList(objWithPrefix) - if *out.Prefix != prefix { return fmt.Errorf("expected prefix %v, instead got %v", prefix, *out.Prefix) } - if !compareObjects(contents, out.Contents) { - return fmt.Errorf("expected the output to be %v, instead got %v", contents, out.Contents) + if !compareObjects(contents[2:], out.Contents) { + return fmt.Errorf("expected the output to be %v, instead got %v", contents[2:], out.Contents) } return nil @@ -3777,7 +3739,7 @@ func ListObjects_truncated(s *S3Conf) error { testName := "ListObjects_truncated" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { maxKeys := int32(2) - err := putObjects(s3client, []string{"foo", "bar", "baz"}, bucket) + contents, err := putObjects(s3client, []string{"foo", "bar", "baz"}, bucket) if err != nil { return err } @@ -3804,9 +3766,8 @@ func ListObjects_truncated(s *S3Conf) error { return fmt.Errorf("expected next-marker to be baz, instead got %v", *out1.NextMarker) } - contents := createEmptyObjectsList([]string{"bar", "baz"}) - if !compareObjects(contents, out1.Contents) { - return fmt.Errorf("expected the output to be %v, instead got %v", contents, out1.Contents) + if !compareObjects(contents[:2], out1.Contents) { + return fmt.Errorf("expected the output to be %v, instead got %v", contents[:2], out1.Contents) } ctx, cancel = context.WithTimeout(context.Background(), shortTimeout) @@ -3827,10 +3788,8 @@ func ListObjects_truncated(s *S3Conf) error { return fmt.Errorf("expected marker to be %v, instead got %v", *out1.NextMarker, *out2.Marker) } - contents = createEmptyObjectsList([]string{"foo"}) - - if !compareObjects(contents, out2.Contents) { - return fmt.Errorf("expected the output to be %v, instead got %v", contents, out2.Contents) + if !compareObjects(contents[2:], out2.Contents) { + return fmt.Errorf("expected the output to be %v, instead got %v", contents[2:], out2.Contents) } return nil }) @@ -3858,7 +3817,7 @@ func ListObjects_max_keys_0(s *S3Conf) error { testName := "ListObjects_max_keys_0" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { objects := []string{"foo", "bar", "baz"} - err := putObjects(s3client, objects, bucket) + _, err := putObjects(s3client, objects, bucket) if err != nil { return err } @@ -3884,7 +3843,7 @@ func ListObjects_max_keys_0(s *S3Conf) error { func ListObjects_delimiter(s *S3Conf) error { testName := "ListObjects_delimiter" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { - err := putObjects(s3client, []string{"foo/bar/baz", "foo/bar/xyzzy", "quux/thud", "asdf"}, bucket) + _, err := putObjects(s3client, []string{"foo/bar/baz", "foo/bar/xyzzy", "quux/thud", "asdf"}, bucket) if err != nil { return err } @@ -3920,7 +3879,7 @@ func ListObjects_delimiter(s *S3Conf) error { func ListObjects_max_keys_none(s *S3Conf) error { testName := "ListObjects_max_keys_none" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { - err := putObjects(s3client, []string{"foo", "bar", "baz"}, bucket) + _, err := putObjects(s3client, []string{"foo", "bar", "baz"}, bucket) if err != nil { return err } @@ -3945,7 +3904,7 @@ func ListObjects_max_keys_none(s *S3Conf) error { func ListObjects_marker_not_from_obj_list(s *S3Conf) error { testName := "ListObjects_marker_not_from_obj_list" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { - err := putObjects(s3client, []string{"foo", "bar", "baz", "qux", "hello", "xyz"}, bucket) + contents, err := putObjects(s3client, []string{"foo", "bar", "baz", "qux", "hello", "xyz"}, bucket) if err != nil { return err } @@ -3960,9 +3919,7 @@ func ListObjects_marker_not_from_obj_list(s *S3Conf) error { return err } - contents := createEmptyObjectsList([]string{"foo", "hello", "qux", "xyz"}) - - if !compareObjects(contents, out.Contents) { + if !compareObjects(contents[2:], out.Contents) { return fmt.Errorf("expected output to be %v, instead got %v", contents, out.Contents) } @@ -3973,7 +3930,7 @@ func ListObjects_marker_not_from_obj_list(s *S3Conf) error { func ListObjectsV2_start_after(s *S3Conf) error { testName := "ListObjectsV2_start_after" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { - err := putObjects(s3client, []string{"foo", "bar", "baz"}, bucket) + contents, err := putObjects(s3client, []string{"foo", "bar", "baz"}, bucket) if err != nil { return err } @@ -3988,9 +3945,7 @@ func ListObjectsV2_start_after(s *S3Conf) error { return err } - contents := createEmptyObjectsList([]string{"baz", "foo"}) - - if !compareObjects(contents, out.Contents) { + if !compareObjects(contents[1:], out.Contents) { return fmt.Errorf("expected the output to be %v, instead got %v", contents, out.Contents) } @@ -4001,7 +3956,7 @@ func ListObjectsV2_start_after(s *S3Conf) error { func ListObjectsV2_both_start_after_and_continuation_token(s *S3Conf) error { testName := "ListObjectsV2_both_start_after_and_continuation_token" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { - err := putObjects(s3client, []string{"foo", "bar", "baz", "quxx"}, bucket) + contents, err := putObjects(s3client, []string{"foo", "bar", "baz", "quxx"}, bucket) if err != nil { return err } @@ -4029,10 +3984,8 @@ func ListObjectsV2_both_start_after_and_continuation_token(s *S3Conf) error { return fmt.Errorf("expected next-marker to be baz, instead got %v", *out.NextContinuationToken) } - contents := createEmptyObjectsList([]string{"bar"}) - - if !compareObjects(contents, out.Contents) { - return fmt.Errorf("expected the output to be %v, instead got %v", contents, out.Contents) + if !compareObjects(contents[:1], out.Contents) { + return fmt.Errorf("expected the output to be %v, instead got %v", contents[:1], out.Contents) } ctx, cancel = context.WithTimeout(context.Background(), shortTimeout) @@ -4046,10 +3999,8 @@ func ListObjectsV2_both_start_after_and_continuation_token(s *S3Conf) error { return err } - contents = createEmptyObjectsList([]string{"foo", "quxx"}) - - if !compareObjects(contents, resp.Contents) { - return fmt.Errorf("expected the output to be %v, instead got %v", contents, resp.Contents) + if !compareObjects(contents[2:], resp.Contents) { + return fmt.Errorf("expected the output to be %v, instead got %v", contents[2:], resp.Contents) } return nil @@ -4059,7 +4010,7 @@ func ListObjectsV2_both_start_after_and_continuation_token(s *S3Conf) error { func ListObjectsV2_start_after_not_in_list(s *S3Conf) error { testName := "ListObjectsV2_start_after_not_in_list" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { - err := putObjects(s3client, []string{"foo", "bar", "baz", "quxx"}, bucket) + contents, err := putObjects(s3client, []string{"foo", "bar", "baz", "quxx"}, bucket) if err != nil { return err } @@ -4074,10 +4025,8 @@ func ListObjectsV2_start_after_not_in_list(s *S3Conf) error { return err } - contents := createEmptyObjectsList([]string{"foo", "quxx"}) - - if !compareObjects(contents, out.Contents) { - return fmt.Errorf("expected the output to be %v, instead got %v", contents, out.Contents) + if !compareObjects(contents[2:], out.Contents) { + return fmt.Errorf("expected the output to be %v, instead got %v", contents[2:], out.Contents) } return nil @@ -4087,7 +4036,7 @@ func ListObjectsV2_start_after_not_in_list(s *S3Conf) error { func ListObjectsV2_start_after_empty_result(s *S3Conf) error { testName := "ListObjectsV2_start_after_empty_result" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { - err := putObjects(s3client, []string{"foo", "bar", "baz", "quxx"}, bucket) + _, err := putObjects(s3client, []string{"foo", "bar", "baz", "quxx"}, bucket) if err != nil { return err } @@ -4113,13 +4062,14 @@ func ListObjectsV2_start_after_empty_result(s *S3Conf) error { func ListObjectsV2_both_delimiter_and_prefix(s *S3Conf) error { testName := "ListObjectsV2_both_delimiter_and_prefix" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { - if err := putObjects(s3client, []string{ + _, err := putObjects(s3client, []string{ "sample.jpg", "photos/2006/January/sample.jpg", "photos/2006/February/sample2.jpg", "photos/2006/February/sample3.jpg", "photos/2006/February/sample4.jpg", - }, bucket); err != nil { + }, bucket) + if err != nil { return err } delim, prefix := "/", "photos/2006/" @@ -4155,7 +4105,8 @@ func ListObjectsV2_both_delimiter_and_prefix(s *S3Conf) error { func ListObjectsV2_single_dir_object_with_delim_and_prefix(s *S3Conf) error { testName := "ListObjectsV2_single_dir_object_with_delim_and_prefix" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { - if err := putObjects(s3client, []string{"a/"}, bucket); err != nil { + contents, err := putObjects(s3client, []string{"a/"}, bucket) + if err != nil { return err } @@ -4192,8 +4143,6 @@ func ListObjectsV2_single_dir_object_with_delim_and_prefix(s *S3Conf) error { return err } - contents := createEmptyObjectsList([]string{"a/"}) - if !compareObjects(contents, res.Contents) { return fmt.Errorf("expected the object list to be %v, instead got %v", []string{"a/"}, res.Contents) } @@ -4208,7 +4157,8 @@ func ListObjectsV2_single_dir_object_with_delim_and_prefix(s *S3Conf) error { func ListObjectsV2_truncated_common_prefixes(s *S3Conf) error { testName := "ListObjectsV2_truncated_common_prefixes" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { - if err := putObjects(s3client, []string{"d1/f1", "d2/f2", "d3/f3", "d4/f4"}, bucket); err != nil { + _, err := putObjects(s3client, []string{"d1/f1", "d2/f2", "d3/f3", "d4/f4"}, bucket) + if err != nil { return err } @@ -4263,12 +4213,12 @@ func ListObjectsV2_truncated_common_prefixes(s *S3Conf) error { func ListObjectsV2_all_objs_max_keys(s *S3Conf) error { testName := "ListObjectsV2_all_objs_max_keys" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { - objs := []string{"bar", "baz", "foo"} - if err := putObjects(s3client, objs, bucket); err != nil { + contents, err := putObjects(s3client, []string{"bar", "baz", "foo"}, bucket) + if err != nil { return err } - maxKeys := int32(len(objs)) + maxKeys := int32(3) ctx, cancel := context.WithTimeout(context.Background(), shortTimeout) out, err := s3client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{ @@ -4290,8 +4240,6 @@ func ListObjectsV2_all_objs_max_keys(s *S3Conf) error { return fmt.Errorf("expected the max-keys to be %v, instead got %v", maxKeys, *out.MaxKeys) } - contents := createEmptyObjectsList(objs) - if !compareObjects(contents, out.Contents) { return fmt.Errorf("expected the objects list to be %v, instead got %v", contents, out.Contents) } @@ -4313,24 +4261,11 @@ func DeleteObject_non_existing_object(s *S3Conf) error { }) } -func DeleteObject_name_too_long(s *S3Conf) error { - testName := "DeleteObject_name_too_long" - return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { - ctx, cancel := context.WithTimeout(context.Background(), shortTimeout) - _, err := s3client.DeleteObject(ctx, &s3.DeleteObjectInput{ - Bucket: &bucket, - Key: getPtr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), - }) - cancel() - return err - }) -} - func DeleteObject_non_existing_dir_object(s *S3Conf) error { testName := "DeleteObject_non_existing_dir_object" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { obj := "my-obj" - err := putObjects(s3client, []string{obj}, bucket) + _, err := putObjects(s3client, []string{obj}, bucket) if err != nil { return err } @@ -4350,7 +4285,7 @@ func DeleteObject_success(s *S3Conf) error { testName := "DeleteObject_success" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { obj := "my-obj" - err := putObjects(s3client, []string{obj}, bucket) + _, err := putObjects(s3client, []string{obj}, bucket) if err != nil { return err } @@ -4382,7 +4317,7 @@ func DeleteObject_success_status_code(s *S3Conf) error { testName := "DeleteObject_success_status_code" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { obj := "my-obj" - err := putObjects(s3client, []string{obj}, bucket) + _, err := putObjects(s3client, []string{obj}, bucket) if err != nil { return err } @@ -4412,7 +4347,7 @@ func DeleteObject_success_status_code(s *S3Conf) error { func DeleteObjects_empty_input(s *S3Conf) error { testName := "DeleteObjects_empty_input" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { - err := putObjects(s3client, []string{"foo", "bar", "baz"}, bucket) + contents, err := putObjects(s3client, []string{"foo", "bar", "baz"}, bucket) if err != nil { return err } @@ -4445,8 +4380,6 @@ func DeleteObjects_empty_input(s *S3Conf) error { return err } - contents := createEmptyObjectsList([]string{"bar", "baz", "foo"}) - if !compareObjects(contents, res.Contents) { return fmt.Errorf("expected the output to be %v, instead got %v", contents, res.Contents) } @@ -4487,7 +4420,7 @@ func DeleteObjects_success(s *S3Conf) error { testName := "DeleteObjects_success" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { objects, objToDel := []string{"obj1", "obj2", "obj3"}, []string{"foo", "bar", "baz"} - err := putObjects(s3client, append(objToDel, objects...), bucket) + contents, err := putObjects(s3client, append(objToDel, objects...), bucket) if err != nil { return err } @@ -4529,10 +4462,8 @@ func DeleteObjects_success(s *S3Conf) error { return err } - contents := createEmptyObjectsList(objects) - - if !compareObjects(contents, res.Contents) { - return fmt.Errorf("expected the output to be %v, instead got %v", contents, res.Contents) + if !compareObjects(contents[3:], res.Contents) { + return fmt.Errorf("expected the output to be %v, instead got %v", contents[3:], res.Contents) } return nil @@ -4543,7 +4474,7 @@ func CopyObject_non_existing_dst_bucket(s *S3Conf) error { testName := "CopyObject_non_existing_dst_bucket" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { obj := "my-obj" - err := putObjects(s3client, []string{obj}, bucket) + _, err := putObjects(s3client, []string{obj}, bucket) if err != nil { return err } @@ -4565,7 +4496,7 @@ func CopyObject_not_owned_source_bucket(s *S3Conf) error { testName := "CopyObject_not_owned_source_bucket" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { srcObj := "my-obj" - err := putObjects(s3client, []string{srcObj}, bucket) + _, err := putObjects(s3client, []string{srcObj}, bucket) if err != nil { return err } @@ -4621,7 +4552,7 @@ func CopyObject_copy_to_itself(s *S3Conf) error { testName := "CopyObject_copy_to_itself" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { obj := "my-obj" - err := putObjects(s3client, []string{obj}, bucket) + _, err := putObjects(s3client, []string{obj}, bucket) if err != nil { return err } @@ -4643,7 +4574,7 @@ func CopyObject_copy_to_itself_invalid_directive(s *S3Conf) error { testName := "CopyObject_copy_to_itself_invalid_directive" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { obj := "my-obj" - err := putObjects(s3client, []string{obj}, bucket) + _, err := putObjects(s3client, []string{obj}, bucket) if err != nil { return err } @@ -4671,7 +4602,7 @@ func CopyObject_to_itself_with_new_metadata(s *S3Conf) error { return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { obj := "my-obj" - err := putObjects(s3client, []string{obj}, bucket) + _, err := putObjects(s3client, []string{obj}, bucket) if err != nil { return err } @@ -4919,7 +4850,7 @@ func PutObjectTagging_long_tags(s *S3Conf) error { return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { obj := "my-obj" tagging := types.Tagging{TagSet: []types.Tag{{Key: getPtr(genRandString(129)), Value: getPtr("val")}}} - err := putObjects(s3client, []string{obj}, bucket) + _, err := putObjects(s3client, []string{obj}, bucket) if err != nil { return err } @@ -4955,7 +4886,7 @@ func PutObjectTagging_success(s *S3Conf) error { return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { obj := "my-obj" tagging := types.Tagging{TagSet: []types.Tag{{Key: getPtr("key1"), Value: getPtr("val2")}, {Key: getPtr("key2"), Value: getPtr("val2")}}} - err := putObjects(s3client, []string{obj}, bucket) + _, err := putObjects(s3client, []string{obj}, bucket) if err != nil { return err } @@ -4994,11 +4925,12 @@ func GetObjectTagging_unset_tags(s *S3Conf) error { testName := "GetObjectTagging_unset_tags" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { obj := "my-obj" - if err := putObjects(s3client, []string{obj}, bucket); err != nil { + _, err := putObjects(s3client, []string{obj}, bucket) + if err != nil { return err } ctx, cancel := context.WithTimeout(context.Background(), shortTimeout) - _, err := s3client.GetObjectTagging(ctx, &s3.GetObjectTaggingInput{ + _, err = s3client.GetObjectTagging(ctx, &s3.GetObjectTaggingInput{ Bucket: &bucket, Key: &obj, }) @@ -5015,7 +4947,7 @@ func GetObjectTagging_success(s *S3Conf) error { return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { obj := "my-obj" tagging := types.Tagging{TagSet: []types.Tag{{Key: getPtr("key1"), Value: getPtr("val2")}, {Key: getPtr("key2"), Value: getPtr("val2")}}} - err := putObjects(s3client, []string{obj}, bucket) + _, err := putObjects(s3client, []string{obj}, bucket) if err != nil { return err } @@ -5068,7 +5000,7 @@ func DeleteObjectTagging_success_status(s *S3Conf) error { testName := "DeleteObjectTagging_success_status" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { obj := "my-obj" - err := putObjects(s3client, []string{obj}, bucket) + _, err := putObjects(s3client, []string{obj}, bucket) if err != nil { return err } @@ -5120,7 +5052,7 @@ func DeleteObjectTagging_success(s *S3Conf) error { return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { obj := "my-obj" tagging := types.Tagging{TagSet: []types.Tag{{Key: getPtr("key1"), Value: getPtr("val2")}, {Key: getPtr("key2"), Value: getPtr("val2")}}} - err := putObjects(s3client, []string{obj}, bucket) + _, err := putObjects(s3client, []string{obj}, bucket) if err != nil { return err } @@ -5712,7 +5644,7 @@ func UploadPartCopy_incorrect_uploadId(s *S3Conf) error { if err != nil { return err } - err = putObjects(s3client, []string{srcObj}, srcBucket) + _, err = putObjects(s3client, []string{srcObj}, srcBucket) if err != nil { return err } @@ -5753,7 +5685,7 @@ func UploadPartCopy_incorrect_object_key(s *S3Conf) error { if err != nil { return err } - err = putObjects(s3client, []string{srcObj}, srcBucket) + _, err = putObjects(s3client, []string{srcObj}, srcBucket) if err != nil { return err } @@ -6366,7 +6298,7 @@ func ListMultipartUploads_max_uploads(s *S3Conf) error { return fmt.Errorf("expected next-key-marker to be %v, instead got %v", *uploads[1].Key, *out.NextKeyMarker) } if *out.NextUploadIdMarker != *uploads[1].UploadId { - return fmt.Errorf("expected next-upload-id-marker to be %v, instead got %v", *uploads[1].Key, *out.NextKeyMarker) + return fmt.Errorf("expected next-upload-id-marker to be %v, instead got %v", *uploads[1].UploadId, *out.NextUploadIdMarker) } ctx, cancel = context.WithTimeout(context.Background(), shortTimeout) @@ -6398,7 +6330,7 @@ func ListMultipartUploads_incorrect_next_key_marker(s *S3Conf) error { ctx, cancel := context.WithTimeout(context.Background(), shortTimeout) out, err := s3client.ListMultipartUploads(ctx, &s3.ListMultipartUploadsInput{ Bucket: &bucket, - KeyMarker: getPtr("incorrect_object_key"), + KeyMarker: getPtr("wrong_object_key"), }) cancel() if err != nil { @@ -7027,7 +6959,7 @@ func PutBucketAcl_success_access_denied(s *S3Conf) error { newConf.awsSecret = "grt1secret" userClient := s3.NewFromConfig(newConf.Config()) - err = putObjects(userClient, []string{"my-obj"}, bucket) + _, err = putObjects(userClient, []string{"my-obj"}, bucket) if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrAccessDenied)); err != nil { return err } @@ -7059,7 +6991,7 @@ func PutBucketAcl_success_canned_acl(s *S3Conf) error { newConf.awsSecret = "grt1secret" userClient := s3.NewFromConfig(newConf.Config()) - err = putObjects(userClient, []string{"my-obj"}, bucket) + _, err = putObjects(userClient, []string{"my-obj"}, bucket) if err != nil { return err } @@ -7091,7 +7023,7 @@ func PutBucketAcl_success_acp(s *S3Conf) error { newConf.awsSecret = "grt1secret" userClient := s3.NewFromConfig(newConf.Config()) - err = putObjects(userClient, []string{"my-obj"}, bucket) + _, err = putObjects(userClient, []string{"my-obj"}, bucket) if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrAccessDenied)); err != nil { return err } @@ -7145,7 +7077,7 @@ func PutBucketAcl_success_grants(s *S3Conf) error { newConf.awsSecret = "grt1secret" userClient := s3.NewFromConfig(newConf.Config()) - err = putObjects(userClient, []string{"my-obj"}, bucket) + _, err = putObjects(userClient, []string{"my-obj"}, bucket) if err != nil { return err } @@ -8349,12 +8281,13 @@ func PutObjectRetention_unset_bucket_object_lock_config(s *S3Conf) error { date := time.Now().Add(time.Hour * 3) key := "my-obj" - if err := putObjects(s3client, []string{key}, bucket); err != nil { + _, err := putObjects(s3client, []string{key}, bucket) + if err != nil { return err } ctx, cancel := context.WithTimeout(context.Background(), shortTimeout) - _, err := s3client.PutObjectRetention(ctx, &s3.PutObjectRetentionInput{ + _, err = s3client.PutObjectRetention(ctx, &s3.PutObjectRetentionInput{ Bucket: &bucket, Key: &key, Retention: &types.ObjectLockRetention{ @@ -8387,7 +8320,8 @@ func PutObjectRetention_disabled_bucket_object_lock_config(s *S3Conf) error { date := time.Now().Add(time.Hour * 3) key := "my-obj" - if err := putObjects(s3client, []string{key}, bucket); err != nil { + _, err = putObjects(s3client, []string{key}, bucket) + if err != nil { return err } @@ -8463,12 +8397,13 @@ func PutObjectRetention_overwrite_compliance_mode(s *S3Conf) error { return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { date := time.Now().Add(time.Hour * 3) obj := "my-obj" - if err := putObjects(s3client, []string{obj}, bucket); err != nil { + _, err := putObjects(s3client, []string{obj}, bucket) + if err != nil { return err } ctx, cancel := context.WithTimeout(context.Background(), shortTimeout) - _, err := s3client.PutObjectRetention(ctx, &s3.PutObjectRetentionInput{ + _, err = s3client.PutObjectRetention(ctx, &s3.PutObjectRetentionInput{ Bucket: &bucket, Key: &obj, Retention: &types.ObjectLockRetention{ @@ -8508,12 +8443,13 @@ func PutObjectRetention_overwrite_governance_without_bypass_specified(s *S3Conf) return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { date := time.Now().Add(time.Hour * 3) obj := "my-obj" - if err := putObjects(s3client, []string{obj}, bucket); err != nil { + _, err := putObjects(s3client, []string{obj}, bucket) + if err != nil { return err } ctx, cancel := context.WithTimeout(context.Background(), shortTimeout) - _, err := s3client.PutObjectRetention(ctx, &s3.PutObjectRetentionInput{ + _, err = s3client.PutObjectRetention(ctx, &s3.PutObjectRetentionInput{ Bucket: &bucket, Key: &obj, Retention: &types.ObjectLockRetention{ @@ -8553,12 +8489,13 @@ func PutObjectRetention_overwrite_governance_with_permission(s *S3Conf) error { return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { date := time.Now().Add(time.Hour * 3) obj := "my-obj" - if err := putObjects(s3client, []string{obj}, bucket); err != nil { + _, err := putObjects(s3client, []string{obj}, bucket) + if err != nil { return err } ctx, cancel := context.WithTimeout(context.Background(), shortTimeout) - _, err := s3client.PutObjectRetention(ctx, &s3.PutObjectRetentionInput{ + _, err = s3client.PutObjectRetention(ctx, &s3.PutObjectRetentionInput{ Bucket: &bucket, Key: &obj, Retention: &types.ObjectLockRetention{ @@ -8617,12 +8554,13 @@ func PutObjectRetention_success(s *S3Conf) error { date := time.Now().Add(time.Hour * 3) key := "my-obj" - if err := putObjects(s3client, []string{key}, bucket); err != nil { + _, err := putObjects(s3client, []string{key}, bucket) + if err != nil { return err } ctx, cancel := context.WithTimeout(context.Background(), shortTimeout) - _, err := s3client.PutObjectRetention(ctx, &s3.PutObjectRetentionInput{ + _, err = s3client.PutObjectRetention(ctx, &s3.PutObjectRetentionInput{ Bucket: &bucket, Key: &key, Retention: &types.ObjectLockRetention{ @@ -8681,12 +8619,13 @@ func GetObjectRetention_unset_config(s *S3Conf) error { testName := "GetObjectRetention_unset_config" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { key := "my-obj" - if err := putObjects(s3client, []string{key}, bucket); err != nil { + _, err := putObjects(s3client, []string{key}, bucket) + if err != nil { return err } ctx, cancel := context.WithTimeout(context.Background(), shortTimeout) - _, err := s3client.GetObjectRetention(ctx, &s3.GetObjectRetentionInput{ + _, err = s3client.GetObjectRetention(ctx, &s3.GetObjectRetentionInput{ Bucket: &bucket, Key: &key, }) @@ -8706,7 +8645,8 @@ func GetObjectRetention_success(s *S3Conf) error { return err } key := "my-obj" - if err := putObjects(s3client, []string{key}, bucket); err != nil { + _, err := putObjects(s3client, []string{key}, bucket) + if err != nil { return err } @@ -8717,7 +8657,7 @@ func GetObjectRetention_success(s *S3Conf) error { } ctx, cancel := context.WithTimeout(context.Background(), shortTimeout) - _, err := s3client.PutObjectRetention(ctx, &s3.PutObjectRetentionInput{ + _, err = s3client.PutObjectRetention(ctx, &s3.PutObjectRetentionInput{ Bucket: &bucket, Key: &key, Retention: &retention, @@ -8842,12 +8782,13 @@ func PutObjectLegalHold_unset_bucket_object_lock_config(s *S3Conf) error { return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { key := "my-obj" - if err := putObjects(s3client, []string{key}, bucket); err != nil { + _, err := putObjects(s3client, []string{key}, bucket) + if err != nil { return err } ctx, cancel := context.WithTimeout(context.Background(), shortTimeout) - _, err := s3client.PutObjectLegalHold(ctx, &s3.PutObjectLegalHoldInput{ + _, err = s3client.PutObjectLegalHold(ctx, &s3.PutObjectLegalHoldInput{ Bucket: &bucket, Key: &key, LegalHold: &types.ObjectLockLegalHold{ @@ -8878,7 +8819,8 @@ func PutObjectLegalHold_disabled_bucket_object_lock_config(s *S3Conf) error { key := "my-obj" - if err := putObjects(s3client, []string{key}, bucket); err != nil { + _, err = putObjects(s3client, []string{key}, bucket) + if err != nil { return err } @@ -8908,12 +8850,13 @@ func PutObjectLegalHold_success(s *S3Conf) error { key := "my-obj" - if err := putObjects(s3client, []string{key}, bucket); err != nil { + _, err := putObjects(s3client, []string{key}, bucket) + if err != nil { return err } ctx, cancel := context.WithTimeout(context.Background(), shortTimeout) - _, err := s3client.PutObjectLegalHold(ctx, &s3.PutObjectLegalHoldInput{ + _, err = s3client.PutObjectLegalHold(ctx, &s3.PutObjectLegalHoldInput{ Bucket: &bucket, Key: &key, LegalHold: &types.ObjectLockLegalHold{ @@ -8971,12 +8914,13 @@ func GetObjectLegalHold_unset_config(s *S3Conf) error { testName := "GetObjectLegalHold_unset_config" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { key := "my-obj" - if err := putObjects(s3client, []string{key}, bucket); err != nil { + _, err := putObjects(s3client, []string{key}, bucket) + if err != nil { return err } ctx, cancel := context.WithTimeout(context.Background(), shortTimeout) - _, err := s3client.GetObjectLegalHold(ctx, &s3.GetObjectLegalHoldInput{ + _, err = s3client.GetObjectLegalHold(ctx, &s3.GetObjectLegalHoldInput{ Bucket: &bucket, Key: &key, }) @@ -8996,12 +8940,13 @@ func GetObjectLegalHold_success(s *S3Conf) error { return err } key := "my-obj" - if err := putObjects(s3client, []string{key}, bucket); err != nil { + _, err := putObjects(s3client, []string{key}, bucket) + if err != nil { return err } ctx, cancel := context.WithTimeout(context.Background(), shortTimeout) - _, err := s3client.PutObjectLegalHold(ctx, &s3.PutObjectLegalHoldInput{ + _, err = s3client.PutObjectLegalHold(ctx, &s3.PutObjectLegalHoldInput{ Bucket: &bucket, Key: &key, LegalHold: &types.ObjectLockLegalHold{ @@ -9058,7 +9003,8 @@ func WORMProtection_bucket_object_lock_configuration_compliance_mode(s *S3Conf) return err } - if err := putObjects(s3client, []string{object}, bucket); err != nil { + _, err = putObjects(s3client, []string{object}, bucket) + if err != nil { return err } @@ -9096,7 +9042,8 @@ func WORMProtection_bucket_object_lock_configuration_governance_mode(s *S3Conf) return err } - if err := putObjects(s3client, []string{object}, bucket); err != nil { + _, err = putObjects(s3client, []string{object}, bucket) + if err != nil { return err } @@ -9134,7 +9081,8 @@ func WORMProtection_bucket_object_lock_governance_bypass_delete(s *S3Conf) error return err } - if err := putObjects(s3client, []string{object}, bucket); err != nil { + _, err = putObjects(s3client, []string{object}, bucket) + if err != nil { return err } @@ -9193,7 +9141,8 @@ func WORMProtection_bucket_object_lock_governance_bypass_delete_multiple(s *S3Co return err } - if err := putObjects(s3client, []string{obj1, obj2, obj3}, bucket); err != nil { + _, err = putObjects(s3client, []string{obj1, obj2, obj3}, bucket) + if err != nil { return err } @@ -9246,13 +9195,14 @@ func WORMProtection_object_lock_retention_compliance_locked(s *S3Conf) error { return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { object := "my-obj" - if err := putObjects(s3client, []string{object}, bucket); err != nil { + _, err := putObjects(s3client, []string{object}, bucket) + if err != nil { return err } date := time.Now().Add(time.Hour * 3) ctx, cancel := context.WithTimeout(context.Background(), shortTimeout) - _, err := s3client.PutObjectRetention(ctx, &s3.PutObjectRetentionInput{ + _, err = s3client.PutObjectRetention(ctx, &s3.PutObjectRetentionInput{ Bucket: &bucket, Key: &object, Retention: &types.ObjectLockRetention{ @@ -9281,13 +9231,14 @@ func WORMProtection_object_lock_retention_governance_locked(s *S3Conf) error { return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { object := "my-obj" - if err := putObjects(s3client, []string{object}, bucket); err != nil { + _, err := putObjects(s3client, []string{object}, bucket) + if err != nil { return err } date := time.Now().Add(time.Hour * 3) ctx, cancel := context.WithTimeout(context.Background(), shortTimeout) - _, err := s3client.PutObjectRetention(ctx, &s3.PutObjectRetentionInput{ + _, err = s3client.PutObjectRetention(ctx, &s3.PutObjectRetentionInput{ Bucket: &bucket, Key: &object, Retention: &types.ObjectLockRetention{ @@ -9316,13 +9267,14 @@ func WORMProtection_object_lock_retention_governance_bypass_overwrite(s *S3Conf) return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { object := "my-obj" - if err := putObjects(s3client, []string{object}, bucket); err != nil { + _, err := putObjects(s3client, []string{object}, bucket) + if err != nil { return err } date := time.Now().Add(time.Hour * 3) ctx, cancel := context.WithTimeout(context.Background(), shortTimeout) - _, err := s3client.PutObjectRetention(ctx, &s3.PutObjectRetentionInput{ + _, err = s3client.PutObjectRetention(ctx, &s3.PutObjectRetentionInput{ Bucket: &bucket, Key: &object, Retention: &types.ObjectLockRetention{ @@ -9370,13 +9322,14 @@ func WORMProtection_object_lock_retention_governance_bypass_delete(s *S3Conf) er return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { object := "my-obj" - if err := putObjects(s3client, []string{object}, bucket); err != nil { + _, err := putObjects(s3client, []string{object}, bucket) + if err != nil { return err } date := time.Now().Add(time.Hour * 3) ctx, cancel := context.WithTimeout(context.Background(), shortTimeout) - _, err := s3client.PutObjectRetention(ctx, &s3.PutObjectRetentionInput{ + _, err = s3client.PutObjectRetention(ctx, &s3.PutObjectRetentionInput{ Bucket: &bucket, Key: &object, Retention: &types.ObjectLockRetention{ @@ -9426,7 +9379,8 @@ func WORMProtection_object_lock_retention_governance_bypass_delete_mul(s *S3Conf return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { objs := []string{"my-obj-1", "my-obj2", "my-obj-3"} - if err := putObjects(s3client, objs, bucket); err != nil { + _, err := putObjects(s3client, objs, bucket) + if err != nil { return err } @@ -9452,7 +9406,7 @@ func WORMProtection_object_lock_retention_governance_bypass_delete_mul(s *S3Conf bypass := true ctx, cancel := context.WithTimeout(context.Background(), shortTimeout) - _, err := s3client.PutBucketPolicy(ctx, &s3.PutBucketPolicyInput{ + _, err = s3client.PutBucketPolicy(ctx, &s3.PutBucketPolicyInput{ Bucket: &bucket, Policy: &policy, }) @@ -9501,12 +9455,13 @@ func WORMProtection_object_lock_legal_hold_locked(s *S3Conf) error { object := "my-obj" - if err := putObjects(s3client, []string{object}, bucket); err != nil { + _, err := putObjects(s3client, []string{object}, bucket) + if err != nil { return err } ctx, cancel := context.WithTimeout(context.Background(), shortTimeout) - _, err := s3client.PutObjectLegalHold(ctx, &s3.PutObjectLegalHoldInput{ + _, err = s3client.PutObjectLegalHold(ctx, &s3.PutObjectLegalHoldInput{ Bucket: &bucket, Key: &object, LegalHold: &types.ObjectLockLegalHold{ @@ -9518,7 +9473,7 @@ func WORMProtection_object_lock_legal_hold_locked(s *S3Conf) error { return err } - err = putObjects(s3client, []string{object}, bucket) + _, err = putObjects(s3client, []string{object}, bucket) if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrObjectLocked)); err != nil { return err } @@ -9535,13 +9490,14 @@ func WORMProtection_root_bypass_governance_retention_delete_object(s *S3Conf) er testName := "WORMProtection_root_bypass_governance_retention_delete_object" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { obj := "my-obj" - if err := putObjects(s3client, []string{obj}, bucket); err != nil { + _, err := putObjects(s3client, []string{obj}, bucket) + if err != nil { return err } retDate := time.Now().Add(time.Hour * 48) ctx, cancel := context.WithTimeout(context.Background(), shortTimeout) - _, err := s3client.PutObjectRetention(ctx, &s3.PutObjectRetentionInput{ + _, err = s3client.PutObjectRetention(ctx, &s3.PutObjectRetentionInput{ Bucket: &bucket, Key: &obj, Retention: &types.ObjectLockRetention{ @@ -9608,7 +9564,7 @@ func AccessControl_default_ACL_user_access_denied(s *S3Conf) error { cfg.awsID = usr.access cfg.awsSecret = usr.secret - err = putObjects(s3.NewFromConfig(cfg.Config()), []string{"my-obj"}, bucket) + _, err = putObjects(s3.NewFromConfig(cfg.Config()), []string{"my-obj"}, bucket) if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrAccessDenied)); err != nil { return err } @@ -9634,7 +9590,7 @@ func AccessControl_default_ACL_userplus_access_denied(s *S3Conf) error { cfg.awsID = usr.access cfg.awsSecret = usr.secret - err = putObjects(s3.NewFromConfig(cfg.Config()), []string{"my-obj"}, bucket) + _, err = putObjects(s3.NewFromConfig(cfg.Config()), []string{"my-obj"}, bucket) if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrAccessDenied)); err != nil { return err } @@ -9660,7 +9616,7 @@ func AccessControl_default_ACL_admin_successful_access(s *S3Conf) error { cfg.awsID = admin.access cfg.awsSecret = admin.secret - err = putObjects(s3.NewFromConfig(cfg.Config()), []string{"my-obj"}, bucket) + _, err = putObjects(s3.NewFromConfig(cfg.Config()), []string{"my-obj"}, bucket) if err != nil { return err } @@ -9766,14 +9722,14 @@ func AccessControl_bucket_resource_all_action(s *S3Conf) error { } user1Client := getUserS3Client(usr1, s) - err = putObjects(user1Client, []string{"my-obj"}, bucket) + _, err = putObjects(user1Client, []string{"my-obj"}, bucket) if err != nil { return err } user2Client := getUserS3Client(usr2, s) - err = putObjects(user2Client, []string{"my-obj"}, bucket) + _, err = putObjects(user2Client, []string{"my-obj"}, bucket) if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrAccessDenied)); err != nil { return err } @@ -9786,7 +9742,7 @@ func AccessControl_single_object_resource_actions(s *S3Conf) error { testName := "AccessControl_single_object_resource_actions" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { obj := "my-obj/nested-obj" - err := putObjects(s3client, []string{obj}, bucket) + _, err := putObjects(s3client, []string{obj}, bucket) if err != nil { return err } @@ -10043,7 +9999,8 @@ func AccessControl_copy_object_with_starting_slash_for_user(s *S3Conf) error { testName := "AccessControl_copy_object_with_starting_slash_for_user" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { obj := "my-obj" - if err := putObjects(s3client, []string{obj}, bucket); err != nil { + _, err := putObjects(s3client, []string{obj}, bucket) + if err != nil { return err } @@ -10064,7 +10021,7 @@ func AccessControl_copy_object_with_starting_slash_for_user(s *S3Conf) error { userClient := getUserS3Client(usr, s) ctx, cancel := context.WithTimeout(context.Background(), shortTimeout) - _, err := userClient.CopyObject(ctx, &s3.CopyObjectInput{ + _, err = userClient.CopyObject(ctx, &s3.CopyObjectInput{ Bucket: &bucket, Key: &obj, CopySource: ©Source, @@ -10253,7 +10210,7 @@ func IAM_ChangeBucketOwner_back_to_root(s *S3Conf) error { func PutObject_overwrite_dir_obj(s *S3Conf) error { testName := "PutObject_overwrite_dir_obj" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { - err := putObjects(s3client, []string{"foo/", "foo"}, bucket) + _, err := putObjects(s3client, []string{"foo/", "foo"}, bucket) if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrExistingObjectIsDirectory)); err != nil { return err } @@ -10264,7 +10221,7 @@ func PutObject_overwrite_dir_obj(s *S3Conf) error { func PutObject_overwrite_file_obj(s *S3Conf) error { testName := "PutObject_overwrite_file_obj" return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { - err := putObjects(s3client, []string{"foo", "foo/"}, bucket) + _, err := putObjects(s3client, []string{"foo", "foo/"}, bucket) if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrObjectParentIsFile)); err != nil { return err } @@ -10296,3 +10253,51 @@ func CreateMultipartUpload_dir_obj(s *S3Conf) error { return nil }) } + +func PutObject_name_too_long(s *S3Conf) error { + testName := "PutObject_name_too_long" + return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { + key := genRandString(300) + + ctx, cancel := context.WithTimeout(context.Background(), shortTimeout) + _, err := s3client.PutObject(ctx, &s3.PutObjectInput{ + Bucket: &bucket, + Key: &key, + }) + cancel() + if err := checkApiErr(err, s3err.GetAPIError(s3err.ErrKeyTooLong)); err != nil { + return err + } + + return nil + }) +} + +func HeadObject_name_too_long(s *S3Conf) error { + testName := "HeadObject_name_too_long" + return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { + ctx, cancel := context.WithTimeout(context.Background(), shortTimeout) + _, err := s3client.HeadObject(ctx, &s3.HeadObjectInput{ + Bucket: &bucket, + Key: getPtr(genRandString(300)), + }) + cancel() + if err := checkSdkApiErr(err, "BadRequest"); err != nil { + return err + } + return nil + }) +} + +func DeleteObject_name_too_long(s *S3Conf) error { + testName := "DeleteObject_name_too_long" + return actionHandler(s, testName, func(s3client *s3.Client, bucket string) error { + ctx, cancel := context.WithTimeout(context.Background(), shortTimeout) + _, err := s3client.DeleteObject(ctx, &s3.DeleteObjectInput{ + Bucket: &bucket, + Key: getPtr(genRandString(300)), + }) + cancel() + return err + }) +} diff --git a/tests/integration/utils.go b/tests/integration/utils.go index 8c29f05f..971f0059 100644 --- a/tests/integration/utils.go +++ b/tests/integration/utils.go @@ -29,6 +29,7 @@ import ( "net/url" "os" "os/exec" + "sort" "strings" "time" @@ -282,19 +283,34 @@ func checkSdkApiErr(err error, code string) error { return err } -func putObjects(client *s3.Client, objs []string, bucket string) error { +func putObjects(client *s3.Client, objs []string, bucket string) ([]types.Object, error) { + var contents []types.Object + var size int64 for _, key := range objs { ctx, cancel := context.WithTimeout(context.Background(), shortTimeout) - _, err := client.PutObject(ctx, &s3.PutObjectInput{ + res, err := client.PutObject(ctx, &s3.PutObjectInput{ Key: &key, Bucket: &bucket, }) cancel() if err != nil { - return err + return nil, err } + k := key + etag := strings.Trim(*res.ETag, `"`) + contents = append(contents, types.Object{ + Key: &k, + ETag: &etag, + StorageClass: types.ObjectStorageClassStandard, + Size: &size, + }) } - return nil + + sort.SliceStable(contents, func(i, j int) bool { + return *contents[i].Key < *contents[j].Key + }) + + return contents, nil } func putObjectWithData(lgth int64, input *s3.PutObjectInput, client *s3.Client) (csum [32]byte, data []byte, err error) { @@ -486,22 +502,6 @@ func compareObjects(list1, list2 []types.Object) bool { return true } -// Creates a list of types.Object with the provided objects keys: objs []string -func createEmptyObjectsList(objs []string) (result []types.Object) { - size := int64(0) - for _, obj := range objs { - o := obj - result = append(result, types.Object{ - Key: &o, - Size: &size, - StorageClass: types.ObjectStorageClassStandard, - ETag: &emptyObjETag, - }) - } - - return -} - func comparePrefixes(list1 []string, list2 []types.CommonPrefix) bool { if len(list1) != len(list2) { return false