diff --git a/pkg/cloud/amazon/s3_storage.go b/pkg/cloud/amazon/s3_storage.go index 19041e3516a0..42bc7340f000 100644 --- a/pkg/cloud/amazon/s3_storage.go +++ b/pkg/cloud/amazon/s3_storage.go @@ -357,7 +357,9 @@ func newClient( sess.Config.Region = aws.String(region) c := s3.New(sess) - u := s3manager.NewUploader(sess) + u := s3manager.NewUploader(sess, func(uploader *s3manager.Uploader) { + uploader.PartSize = cloud.WriteChunkSize.Get(&settings.SV) + }) return s3Client{client: c, uploader: u}, region, nil } diff --git a/pkg/cloud/azure/azure_storage.go b/pkg/cloud/azure/azure_storage.go index 62c9771e92c2..ee679ce83097 100644 --- a/pkg/cloud/azure/azure_storage.go +++ b/pkg/cloud/azure/azure_storage.go @@ -137,7 +137,7 @@ func (s *azureStorage) Writer(ctx context.Context, basename string) (io.WriteClo return cloud.BackgroundPipe(ctx, func(ctx context.Context, r io.Reader) error { _, err := azblob.UploadStreamToBlockBlob( ctx, r, blob, azblob.UploadStreamToBlockBlobOptions{ - BufferSize: 4 << 20, + BufferSize: int(cloud.WriteChunkSize.Get(&s.settings.SV)), }, ) return err diff --git a/pkg/cloud/cloud_io.go b/pkg/cloud/cloud_io.go index cd5edf36569c..b7f70b5b2801 100644 --- a/pkg/cloud/cloud_io.go +++ b/pkg/cloud/cloud_io.go @@ -46,6 +46,15 @@ var httpCustomCA = settings.RegisterStringSetting( "", ).WithPublic() +// WriteChunkSize is used to control the size of each chunk that is buffered and +// uploaded by the cloud storage client. +var WriteChunkSize = settings.RegisterByteSizeSetting( + settings.TenantWritable, + "cloudstorage.write_chunk.size", + "controls the size of each file chunk uploaded by the cloud storage client", + 8<<20, +) + // HTTPRetryOptions defines the tunable settings which control the retry of HTTP // operations. var HTTPRetryOptions = retry.Options{ diff --git a/pkg/cloud/gcp/gcs_storage.go b/pkg/cloud/gcp/gcs_storage.go index 2a785f887676..83a6a1b928ab 100644 --- a/pkg/cloud/gcp/gcs_storage.go +++ b/pkg/cloud/gcp/gcs_storage.go @@ -159,6 +159,7 @@ func (g *gcsStorage) Writer(ctx context.Context, basename string) (io.WriteClose sp.RecordStructured(&types.StringValue{Value: fmt.Sprintf("gcs.Writer: %s", path.Join(g.prefix, basename))}) w := g.bucket.Object(path.Join(g.prefix, basename)).NewWriter(ctx) + w.ChunkSize = int(cloud.WriteChunkSize.Get(&g.settings.SV)) if !gcsChunkingEnabled.Get(&g.settings.SV) { w.ChunkSize = 0 }