Skip to content

Commit

Permalink
@uppy/aws-s3-multipart: handle slow connections better (#4213)
Browse files Browse the repository at this point in the history
  • Loading branch information
aduh95 authored Nov 16, 2022
1 parent 92a0b20 commit e69e235
Showing 1 changed file with 18 additions and 4 deletions.
22 changes: 18 additions & 4 deletions packages/@uppy/aws-s3-multipart/src/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@ class HTTPCommunicationQueue {

#listParts

#previousRetryDelay

#requests

#retryDelayIterator
Expand Down Expand Up @@ -86,13 +88,25 @@ class HTTPCommunicationQueue {
}
if (status === 403 && err.message === 'Request has expired') {
if (!requests.isPaused) {
const next = this.#retryDelayIterator?.next()
if (next == null || next.done) {
return false
// We don't want to exhaust the retryDelayIterator as long as there are
// more than one request in parallel, to give slower connection a chance
// to catch up with the expiry set in Companion.
if (requests.limit === 1 || this.#previousRetryDelay == null) {
const next = this.#retryDelayIterator?.next()
if (next == null || next.done) {
return false
}
// If there are more than 1 request done in parallel, the RLQ limit is
// decreased and the failed request is requeued after waiting for a bit.
// If there is only one request in parallel, the limit can't be
// decreased, so we iterate over `retryDelayIterator` as we do for
// other failures.
// `#previousRetryDelay` caches the value so we can re-use it next time.
this.#previousRetryDelay = next.value
}
// No need to stop the other requests, we just want to lower the limit.
requests.rateLimit(0)
await new Promise(resolve => setTimeout(resolve, next.value))
await new Promise(resolve => setTimeout(resolve, this.#previousRetryDelay))
}
} else if (status === 429) {
// HTTP 429 Too Many Requests => to avoid the whole download to fail, pause all requests.
Expand Down

0 comments on commit e69e235

Please sign in to comment.