diff --git a/src/Storage/Storage.Management/ChangeLog.md b/src/Storage/Storage.Management/ChangeLog.md index 84572716d8fc..63008755e183 100644 --- a/src/Storage/Storage.Management/ChangeLog.md +++ b/src/Storage/Storage.Management/ChangeLog.md @@ -18,6 +18,8 @@ - Additional information about change #1 --> ## Upcoming Release +* Fixed upload file with OAuth authentication issue [#24289] + - `Set-AzStorageFileContent` ## Version 6.1.2 * Fixed parser logic when downloading blob from managed disk account with Sas Uri and bearer token on Linux and MacOS diff --git a/src/Storage/Storage/File/Cmdlet/SetAzureStorageFileContent.cs b/src/Storage/Storage/File/Cmdlet/SetAzureStorageFileContent.cs index 6262f142c65a..4604eeb932c7 100644 --- a/src/Storage/Storage/File/Cmdlet/SetAzureStorageFileContent.cs +++ b/src/Storage/Storage/File/Cmdlet/SetAzureStorageFileContent.cs @@ -239,29 +239,27 @@ await DataMovementTransferHelper.DoTransfer(() => using (FileStream stream = File.OpenRead(localFile.FullName)) { byte[] buffer = null; - long lastBlockSize = 0; - for (long offset = 0; offset < fileSize; offset += blockSize) + for (long offset = 0; offset < fileSize;) { - long currentBlockSize = offset + blockSize < fileSize ? blockSize : fileSize - offset; + long targetBlockSize = offset + blockSize < fileSize ? blockSize : fileSize - offset; - // Only need to create new buffer when chunk size change - if (currentBlockSize != lastBlockSize) - { - buffer = new byte[currentBlockSize]; - lastBlockSize = currentBlockSize; - } - await stream.ReadAsync(buffer: buffer, offset: 0, count: (int)currentBlockSize); + // create new buffer, the old buffer will be GC + buffer = new byte[targetBlockSize]; + + int actualBlockSize = await stream.ReadAsync(buffer: buffer, offset: 0, count: (int)targetBlockSize); if (!fipsEnabled && hash != null) { - hash.AppendData(buffer); + hash.AppendData(buffer, 0, actualBlockSize); } Task task = UploadFileRangAsync(fileClient, - new HttpRange(offset, currentBlockSize), - new MemoryStream(buffer), + new HttpRange(offset, actualBlockSize), + new MemoryStream(buffer, 0, actualBlockSize), progressHandler); runningTasks.Add(task); + offset += actualBlockSize; + // Check if any of upload range tasks are still busy if (runningTasks.Count >= maxWorkers) {