-
-
Notifications
You must be signed in to change notification settings - Fork 457
/
retryExchange.ts
236 lines (219 loc) · 7.71 KB
/
retryExchange.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
import {
makeSubject,
pipe,
merge,
filter,
fromValue,
debounce,
mergeMap,
takeUntil,
} from 'wonka';
import type { Exchange, Operation, CombinedError } from '@urql/core';
import { makeOperation } from '@urql/core';
/** Input parameters for the {@link retryExchange}. */
export interface RetryExchangeOptions {
/** Specify the minimum time to wait until retrying.
*
* @remarks
* `initialDelayMs` specifies the minimum time (in milliseconds) to wait
* until a failed operation is retried.
*
* @defaultValue `1_000` - one second
*/
initialDelayMs?: number;
/** Specifies the maximum time to wait until retrying.
*
* @remarks
* `maxDelayMs` specifies the maximum time (in milliseconds) to wait
* until a failed operation is retried. While `initialDelayMs`
* specifies the minimum amount of time, `randomDelay` may cause
* the delay to increase over multiple attempts.
*
* @defaultValue `15_000` - 15 seconds
*/
maxDelayMs?: number;
/** Enables a random exponential backoff to increase the delay over multiple retries.
*
* @remarks
* `randomDelay`, unless disabled, increases the time until a failed
* operation is retried over multiple attempts. It increases the time
* starting at `initialDelayMs` by 1.5x with an added factor of 0–1,
* until `maxDelayMs` is reached.
*
* @defaultValue `true` - enables random exponential backoff
*/
randomDelay?: boolean;
/** Specifies the maximum times an operation should be sent to the API.
*
* @remarks
* `maxNumberAttempts` defines the number of attempts an operation should
* be retried until it's considered failed.
*
* @defaultValue `2` - Retry once, i.e. two attempts
*/
maxNumberAttempts?: number;
/** Predicate allowing you to selectively not retry `Operation`s.
*
* @remarks
* `retryIf` is called with a {@link CombinedError} and the {@link Operation} that
* failed. If this function returns false the failed `Operation` is not retried.
*
* @defaultValue `(error) => !!error.networkError` - retries only on network errors.
*/
retryIf?(error: CombinedError, operation: Operation): boolean;
/** Transform function allowing you to selectively replace a retried `Operation` or return nullish value.
*
* @remarks
* `retryWhen` is called with a {@link CombinedError} and the {@link Operation} that
* failed. If this function returns an `Operation`, `retryExchange` will replace the
* failed `Operation` and retry. It won't retry the `Operation` if a nullish value
* is returned.
*
* The `retryIf` function, if defined, takes precedence and overrides this option.
*/
retryWith?(
error: CombinedError,
operation: Operation
): Operation | null | undefined;
}
interface RetryState {
count: number;
delay: number | null;
}
/** Exchange factory that retries failed operations.
*
* @param options - A {@link RetriesExchangeOptions} configuration object.
* @returns the created retry {@link Exchange}.
*
* @remarks
* The `retryExchange` retries failed operations with specified delays
* and exponential backoff.
*
* You may define a {@link RetryExchangeOptions.retryIf} or
* {@link RetryExchangeOptions.retryWhen} function to only retry
* certain kinds of operations, e.g. only queries.
*
* @example
* ```ts
* retryExchange({
* initialDelayMs: 1000,
* maxDelayMs: 15000,
* randomDelay: true,
* maxNumberAttempts: 2,
* retryIf: err => err && err.networkError,
* });
* ```
*/
export const retryExchange = (options: RetryExchangeOptions): Exchange => {
const { retryIf, retryWith } = options;
const MIN_DELAY = options.initialDelayMs || 1000;
const MAX_DELAY = options.maxDelayMs || 15_000;
const MAX_ATTEMPTS = options.maxNumberAttempts || 2;
const RANDOM_DELAY =
options.randomDelay != null ? !!options.randomDelay : true;
return ({ forward, dispatchDebug }) =>
operations$ => {
const { source: retry$, next: nextRetryOperation } =
makeSubject<Operation>();
const retryWithBackoff$ = pipe(
retry$,
mergeMap((operation: Operation) => {
const retry: RetryState = operation.context.retry || {
count: 0,
delay: null,
};
const retryCount = ++retry.count;
let delayAmount = retry.delay || MIN_DELAY;
const backoffFactor = Math.random() + 1.5;
if (RANDOM_DELAY) {
// if randomDelay is enabled and it won't exceed the max delay, apply a random
// amount to the delay to avoid thundering herd problem
if (delayAmount * backoffFactor < MAX_DELAY) {
delayAmount *= backoffFactor;
} else {
delayAmount = MAX_DELAY;
}
} else {
// otherwise, increase the delay proportionately by the initial delay
delayAmount = Math.min(retryCount * MIN_DELAY, MAX_DELAY);
}
// ensure the delay is carried over to the next context
retry.delay = delayAmount;
// We stop the retries if a teardown event for this operation comes in
// But if this event comes through regularly we also stop the retries, since it's
// basically the query retrying itself, no backoff should be added!
const teardown$ = pipe(
operations$,
filter(op => {
return (
(op.kind === 'query' || op.kind === 'teardown') &&
op.key === operation.key
);
})
);
dispatchDebug({
type: 'retryAttempt',
message: `The operation has failed and a retry has been triggered (${retryCount} / ${MAX_ATTEMPTS})`,
operation,
data: {
retryCount,
delayAmount,
},
});
// Add new retryDelay and retryCount to operation
return pipe(
fromValue(
makeOperation(operation.kind, operation, {
...operation.context,
retry,
})
),
debounce(() => delayAmount),
// Stop retry if a teardown comes in
takeUntil(teardown$)
);
})
);
return pipe(
merge([operations$, retryWithBackoff$]),
forward,
filter(res => {
const retry = res.operation.context.retry as RetryState | undefined;
// Only retry if the error passes the conditional retryIf function (if passed)
// or if the error contains a networkError
if (
!res.error ||
(retryIf
? !retryIf(res.error, res.operation)
: !retryWith && !res.error.networkError)
) {
// Reset the delay state for a successful operation
if (retry) {
retry.count = 0;
retry.delay = null;
}
return true;
}
const maxNumberAttemptsExceeded =
((retry && retry.count) || 0) >= MAX_ATTEMPTS - 1;
if (!maxNumberAttemptsExceeded) {
const operation = retryWith
? retryWith(res.error, res.operation)
: res.operation;
if (!operation) return true;
// Send failed responses to be retried by calling next on the retry$ subject
// Exclude operations that have been retried more than the specified max
nextRetryOperation(operation);
return false;
}
dispatchDebug({
type: 'retryExhausted',
message:
'Maximum number of retries has been reached. No further retries will be performed.',
operation: res.operation,
});
return true;
})
);
};
};