From 6ae540abb79ba36ed5474b46fd6c8e6c1e51bbfa Mon Sep 17 00:00:00 2001 From: Andrew Clark Date: Tue, 3 Dec 2024 17:26:23 -0500 Subject: [PATCH] [Segment Cache] Use LRU to manage cache data This implements an LRU for data stored in the Segment Cache. For now I've hardcoded in a cache size of 50mb, but as a follow up, we should look into creating a dynamic limit based on device memory constraints. Segments and Route entries are managed by separate LRUs. We could combine them into a single LRU, but because they are separate types, we'd need to wrap each one in an extra LRU node (to maintain monomorphism, at the cost of additional memory). --- .../router-reducer/fetch-server-response.ts | 2 +- .../client/components/segment-cache/cache.ts | 208 +++++++++++++++--- .../client/components/segment-cache/lru.ts | 138 ++++++++++++ .../memory-pressure/app/layout.tsx | 11 + .../app/memory-pressure/[step]/layout.tsx | 9 + .../app/memory-pressure/[step]/page.tsx | 17 ++ .../app/memory-pressure/layout.tsx | 3 + .../app/memory-pressure/page.tsx | 85 +++++++ .../memory-pressure/next.config.js | 12 + .../segment-cache-memory-pressure.test.ts | 180 +++++++++++++++ 10 files changed, 636 insertions(+), 29 deletions(-) create mode 100644 packages/next/src/client/components/segment-cache/lru.ts create mode 100644 test/e2e/app-dir/segment-cache/memory-pressure/app/layout.tsx create mode 100644 test/e2e/app-dir/segment-cache/memory-pressure/app/memory-pressure/[step]/layout.tsx create mode 100644 test/e2e/app-dir/segment-cache/memory-pressure/app/memory-pressure/[step]/page.tsx create mode 100644 test/e2e/app-dir/segment-cache/memory-pressure/app/memory-pressure/layout.tsx create mode 100644 test/e2e/app-dir/segment-cache/memory-pressure/app/memory-pressure/page.tsx create mode 100644 test/e2e/app-dir/segment-cache/memory-pressure/next.config.js create mode 100644 test/e2e/app-dir/segment-cache/memory-pressure/segment-cache-memory-pressure.test.ts diff --git a/packages/next/src/client/components/router-reducer/fetch-server-response.ts b/packages/next/src/client/components/router-reducer/fetch-server-response.ts index 7de9d9fca3062..4a441399bb16b 100644 --- a/packages/next/src/client/components/router-reducer/fetch-server-response.ts +++ b/packages/next/src/client/components/router-reducer/fetch-server-response.ts @@ -278,7 +278,7 @@ export function createFromNextReadableStream( }) } -export function createUnclosingPrefetchStream( +function createUnclosingPrefetchStream( originalFlightStream: ReadableStream ): ReadableStream { // When PPR is enabled, prefetch streams may contain references that never diff --git a/packages/next/src/client/components/segment-cache/cache.ts b/packages/next/src/client/components/segment-cache/cache.ts index 42aeb0eef9d04..5f7316f425324 100644 --- a/packages/next/src/client/components/segment-cache/cache.ts +++ b/packages/next/src/client/components/segment-cache/cache.ts @@ -14,7 +14,6 @@ import { import { createFetch, createFromNextReadableStream, - createUnclosingPrefetchStream, urlToUrlWithoutFlightMarker, type RequestHeaders, } from '../router-reducer/fetch-server-response' @@ -32,6 +31,7 @@ import type { RouteCacheKey, } from './cache-key' import { createTupleMap, type TupleMap, type Prefix } from './tuple-map' +import { createLRU, type LRU } from './lru' // A note on async/await when working in the prefetch cache: // @@ -60,6 +60,12 @@ type RouteCacheEntryShared = { // true in all other cases, including on initialization when we haven't yet // received a response from the server. couldBeIntercepted: boolean + + // LRU-related fields + keypath: null | Prefix + next: null | RouteCacheEntry + prev: null | RouteCacheEntry + size: number } export const enum EntryStatus { @@ -99,6 +105,12 @@ export type RouteCacheEntry = type SegmentCacheEntryShared = { staleAt: number + + // LRU-related fields + key: null | string + next: null | RouteCacheEntry + prev: null | RouteCacheEntry + size: number } type PendingSegmentCacheEntry = SegmentCacheEntryShared & { @@ -136,9 +148,29 @@ type RouteCacheKeypath = [NormalizedHref, NormalizedNextUrl] const routeCacheMap: TupleMap = createTupleMap() +// We use an LRU for memory management. We must update this whenever we add or +// remove a new cache entry, or when an entry changes size. +// TODO: I chose the max size somewhat arbitrarily. Consider setting this based +// on navigator.deviceMemory, or some other heuristic. We should make this +// customizable via the Next.js config, too. +const maxRouteLruSize = 10 * 1024 * 1024 // 10 MB +const routeCacheLru = createLRU( + maxRouteLruSize, + onRouteLRUEviction +) + // TODO: We may eventually store segment entries in a tuple map, too, to // account for search params. -const segmentCache = new Map() +const segmentCacheMap = new Map() +// NOTE: Segments and Route entries are managed by separate LRUs. We could +// combine them into a single LRU, but because they are separate types, we'd +// need to wrap each one in an extra LRU node (to maintain monomorphism, at the +// cost of additional memory). +const maxSegmentLruSize = 50 * 1024 * 1024 // 50 MB +const segmentCacheLru = createLRU( + maxSegmentLruSize, + onSegmentLRUEviction +) export function readExactRouteCacheEntry( now: number, @@ -152,10 +184,14 @@ export function readExactRouteCacheEntry( // Check if the entry is stale if (existingEntry.staleAt > now) { // Reuse the existing entry. + + // Since this is an access, move the entry to the front of the LRU. + routeCacheLru.put(existingEntry) + return existingEntry } else { // Evict the stale entry from the cache. - routeCacheMap.delete(keypath) + deleteRouteFromCache(existingEntry, keypath) } } return null @@ -180,15 +216,19 @@ export function readSegmentCacheEntry( now: number, path: string ): SegmentCacheEntry | null { - const existingEntry = segmentCache.get(path) + const existingEntry = segmentCacheMap.get(path) if (existingEntry !== undefined) { // Check if the entry is stale if (existingEntry.staleAt > now) { // Reuse the existing entry. + + // Since this is an access, move the entry to the front of the LRU. + segmentCacheLru.put(existingEntry) + return existingEntry } else { // Evict the stale entry from the cache. - evictSegmentEntryFromCache(existingEntry, path) + deleteSegmentFromCache(existingEntry, path) } } return null @@ -248,11 +288,21 @@ export function requestRouteCacheEntryFromCache( // could be intercepted. It's only set to false once we receive a response // from the server. couldBeIntercepted: true, + + // LRU-related fields + keypath: null, + next: null, + prev: null, + size: 0, } spawnPrefetchSubtask(fetchRouteOnCacheMiss(pendingEntry, task)) const keypath: Prefix = key.nextUrl === null ? [key.href] : [key.href, key.nextUrl] routeCacheMap.set(keypath, pendingEntry) + // Stash the keypath on the entry so we know how to remove it from the map + // if it gets evicted from the LRU. + pendingEntry.keypath = keypath + routeCacheLru.put(pendingEntry) return pendingEntry } @@ -279,6 +329,12 @@ export function requestSegmentEntryFromCache( loading: null, staleAt: route.staleAt, promise: null, + + // LRU-related fields + key: null, + next: null, + prev: null, + size: 0, } spawnPrefetchSubtask( fetchSegmentEntryOnCacheMiss( @@ -289,15 +345,50 @@ export function requestSegmentEntryFromCache( accessToken ) ) - segmentCache.set(path, pendingEntry) + segmentCacheMap.set(path, pendingEntry) + // Stash the keypath on the entry so we know how to remove it from the map + // if it gets evicted from the LRU. + pendingEntry.key = path + segmentCacheLru.put(pendingEntry) return pendingEntry } -function evictSegmentEntryFromCache( - entry: SegmentCacheEntry, - key: string +function deleteRouteFromCache( + entry: RouteCacheEntry, + keypath: Prefix ): void { - segmentCache.delete(key) + pingBlockedTasks(entry) + routeCacheMap.delete(keypath) + routeCacheLru.delete(entry) +} + +function deleteSegmentFromCache(entry: SegmentCacheEntry, key: string): void { + cancelEntryListeners(entry) + segmentCacheMap.delete(key) + segmentCacheLru.delete(entry) +} + +function onRouteLRUEviction(entry: RouteCacheEntry): void { + // The LRU evicted this entry. Remove it from the map. + const keypath = entry.keypath + if (keypath !== null) { + entry.keypath = null + pingBlockedTasks(entry) + routeCacheMap.delete(keypath) + } +} + +function onSegmentLRUEviction(entry: SegmentCacheEntry): void { + // The LRU evicted this entry. Remove it from the map. + const key = entry.key + if (key !== null) { + entry.key = null + cancelEntryListeners(entry) + segmentCacheMap.delete(key) + } +} + +function cancelEntryListeners(entry: SegmentCacheEntry): void { if (entry.status === EntryStatus.Pending && entry.promise !== null) { // There were listeners for this entry. Resolve them with `null` to indicate // that the prefetch failed. It's up to the listener to decide how to handle @@ -309,6 +400,18 @@ function evictSegmentEntryFromCache( } } +function pingBlockedTasks(entry: { + blockedTasks: Set | null +}): void { + const blockedTasks = entry.blockedTasks + if (blockedTasks !== null) { + for (const task of blockedTasks) { + pingPrefetchTask(task) + } + entry.blockedTasks = null + } +} + function fulfillRouteCacheEntry( entry: PendingRouteCacheEntry, tree: TreePrefetch, @@ -324,13 +427,7 @@ function fulfillRouteCacheEntry( fulfilledEntry.staleAt = staleAt fulfilledEntry.couldBeIntercepted = couldBeIntercepted fulfilledEntry.canonicalUrl = canonicalUrl - const blockedTasks = entry.blockedTasks - if (blockedTasks !== null) { - for (const task of blockedTasks) { - pingPrefetchTask(task) - } - fulfilledEntry.blockedTasks = null - } + pingBlockedTasks(entry) return fulfilledEntry } @@ -360,13 +457,7 @@ function rejectRouteCacheEntry( const rejectedEntry: RejectedRouteCacheEntry = entry as any rejectedEntry.status = EntryStatus.Rejected rejectedEntry.staleAt = staleAt - const blockedTasks = entry.blockedTasks - if (blockedTasks !== null) { - for (const task of blockedTasks) { - pingPrefetchTask(task) - } - rejectedEntry.blockedTasks = null - } + pingBlockedTasks(entry) } function rejectSegmentCacheEntry( @@ -402,8 +493,13 @@ async function fetchRouteOnCacheMiss( rejectRouteCacheEntry(entry, Date.now() + 10 * 1000) return } + const prefetchStream = createPrefetchResponseStream( + response.body, + routeCacheLru, + entry + ) const serverData: RootTreePrefetch = await (createFromNextReadableStream( - response.body + prefetchStream ) as Promise) if (serverData.buildId !== getAppBuildId()) { // The server build does not match the client. Treat as a 404. During @@ -449,6 +545,10 @@ async function fetchRouteOnCacheMiss( routeCacheMap.delete(currentKeypath) const newKeypath: Prefix = [href] routeCacheMap.set(newKeypath, entry) + // We don't need to update the LRU because the entry is already in it. + // But since we changed the keypath, we do need to update that, so we + // know how to remove it from the map if it gets evicted from the LRU. + entry.keypath = newKeypath } else { // Something else modified this entry already. Since the re-keying is // just a performance optimization, we can safely skip it. @@ -488,9 +588,12 @@ async function fetchSegmentEntryOnCacheMiss( return } // Wrap the original stream in a new stream that never closes. That way the - // Flight client doesn't error if there's a hanging promise. See comment in - // createUnclosingPrefetchStream for more details. - const prefetchStream = createUnclosingPrefetchStream(response.body) + // Flight client doesn't error if there's a hanging promise. + const prefetchStream = createPrefetchResponseStream( + response.body, + segmentCacheLru, + segmentCacheEntry + ) const serverData = await (createFromNextReadableStream( prefetchStream ) as Promise) @@ -544,6 +647,55 @@ async function fetchSegmentPrefetchResponse( return response } +function createPrefetchResponseStream< + T extends RouteCacheEntry | SegmentCacheEntry, +>( + originalFlightStream: ReadableStream, + lru: LRU, + lruEntry: T +): ReadableStream { + // When PPR is enabled, prefetch streams may contain references that never + // resolve, because that's how we encode dynamic data access. In the decoded + // object returned by the Flight client, these are reified into hanging + // promises that suspend during render, which is effectively what we want. + // The UI resolves when it switches to the dynamic data stream + // (via useDeferredValue(dynamic, static)). + // + // However, the Flight implementation currently errors if the server closes + // the response before all the references are resolved. As a cheat to work + // around this, we wrap the original stream in a new stream that never closes, + // and therefore doesn't error. + // + // While processing the original stream, we also incrementally update the size + // of the cache entry in the LRU. + let totalByteLength = 0 + const reader = originalFlightStream.getReader() + return new ReadableStream({ + async pull(controller) { + while (true) { + const { done, value } = await reader.read() + if (!done) { + // Pass to the target stream and keep consuming the Flight response + // from the server. + controller.enqueue(value) + + // Incrementally update the size of the cache entry in the LRU. + // NOTE: Since prefetch responses are delivered in a single chunk, + // it's not really necessary to do this streamingly, but I'm doing it + // anyway in case this changes in the future. + totalByteLength += value.byteLength + lru.updateSize(lruEntry, totalByteLength) + + continue + } + // The server stream has closed. Exit, but intentionally do not close + // the target stream. + return + } + }, + }) +} + function createPromiseWithResolvers(): PromiseWithResolvers { // Shim of Stage 4 Promise.withResolvers proposal let resolve: (value: T | PromiseLike) => void diff --git a/packages/next/src/client/components/segment-cache/lru.ts b/packages/next/src/client/components/segment-cache/lru.ts new file mode 100644 index 0000000000000..35249244a58b4 --- /dev/null +++ b/packages/next/src/client/components/segment-cache/lru.ts @@ -0,0 +1,138 @@ +export type LRU = { + put(node: T): void + delete(node: T): void + updateSize(node: T, size: number): void +} + +// Doubly-linked list +type LRUNode = { + // Although it's not encoded in the type, these are both null if the node is + // not in the LRU; both non-null if it is. + prev: T | null + next: T | null + size: number +} + +// Rather than create an internal LRU node, the passed-in type must conform +// the LRUNode interface. This is just a memory optimization to avoid creating +// another object; we only use this for Segment Cache entries so it doesn't need +// to be general purpose. +export function createLRU( + // From the LRU's perspective, the size unit is arbitrary, but for our + // purposes this is the byte size. + maxLruSize: number, + onEviction: (node: T) => void +): LRU { + let head: T | null = null + let didScheduleCleanup: boolean = false + let lruSize: number = 0 + + function put(node: T) { + if (head === node) { + // Already at the head + return + } + const prev = node.prev + const next = node.next + if (next === null || prev === null) { + // This is an insertion + lruSize += node.size + // Whenever we add an entry, we need to check if we've exceeded the + // max size. We don't evict entries immediately; they're evicted later in + // an asynchronous task. + ensureCleanupIsScheduled() + } else { + // This is a move. Remove from its current position. + prev.next = next + next.prev = prev + } + + // Move to the front of the list + if (head === null) { + // This is the first entry + node.prev = node + node.next = node + } else { + // Add to the front of the list + const tail = head.prev + node.prev = tail + tail.next = node + node.next = head + head.prev = node + } + head = node + } + + function updateSize(node: T, newNodeSize: number) { + // This is a separate function so that we can resize the entry after it's + // already been inserted. + if (node.next === null) { + // No longer part of LRU. + return + } + const prevNodeSize = node.size + node.size = newNodeSize + lruSize = lruSize - prevNodeSize + newNodeSize + ensureCleanupIsScheduled() + } + + function deleteNode(deleted: T) { + const next = deleted.next + const prev = deleted.prev + if (next !== null && prev !== null) { + lruSize -= deleted.size + + deleted.next = null + deleted.prev = null + + // Remove from the list + if (head === deleted) { + // Update the head + if (next === head) { + // This was the last entry + head = null + } else { + head = next + } + } else { + prev.next = next + next.prev = prev + } + } else { + // Already deleted + } + } + + function ensureCleanupIsScheduled() { + if (didScheduleCleanup || lruSize <= maxLruSize) { + return + } + didScheduleCleanup = true + requestCleanupCallback(cleanup) + } + + function cleanup() { + didScheduleCleanup = false + + // Evict entries until we're at 90% capacity. We can assume this won't + // infinite loop because even if `maxLruSize` were 0, eventually + // `deleteNode` sets `head` to `null` when we run out entries. + const ninetyPercentMax = maxLruSize * 0.9 + while (lruSize > ninetyPercentMax && head !== null) { + const tail = head.prev + deleteNode(tail) + onEviction(tail) + } + } + + return { + put, + delete: deleteNode, + updateSize, + } +} + +const requestCleanupCallback = + typeof requestIdleCallback === 'function' + ? requestIdleCallback + : (cb: () => void) => setTimeout(cb, 0) diff --git a/test/e2e/app-dir/segment-cache/memory-pressure/app/layout.tsx b/test/e2e/app-dir/segment-cache/memory-pressure/app/layout.tsx new file mode 100644 index 0000000000000..dbce4ea8e3aeb --- /dev/null +++ b/test/e2e/app-dir/segment-cache/memory-pressure/app/layout.tsx @@ -0,0 +1,11 @@ +export default function RootLayout({ + children, +}: { + children: React.ReactNode +}) { + return ( + + {children} + + ) +} diff --git a/test/e2e/app-dir/segment-cache/memory-pressure/app/memory-pressure/[step]/layout.tsx b/test/e2e/app-dir/segment-cache/memory-pressure/app/memory-pressure/[step]/layout.tsx new file mode 100644 index 0000000000000..c123822359e0f --- /dev/null +++ b/test/e2e/app-dir/segment-cache/memory-pressure/app/memory-pressure/[step]/layout.tsx @@ -0,0 +1,9 @@ +import { Suspense } from 'react' + +export default async function Layout({ + children, +}: { + children: React.ReactNode +}) { + return {children} +} diff --git a/test/e2e/app-dir/segment-cache/memory-pressure/app/memory-pressure/[step]/page.tsx b/test/e2e/app-dir/segment-cache/memory-pressure/app/memory-pressure/[step]/page.tsx new file mode 100644 index 0000000000000..70dcdb50e9700 --- /dev/null +++ b/test/e2e/app-dir/segment-cache/memory-pressure/app/memory-pressure/[step]/page.tsx @@ -0,0 +1,17 @@ +export default async function Page() { + // Render a large string such that a prefetch of this segment is roughly 1MB + // over the network + return
{'a'.repeat(1024 * 1024)}
+} + +export async function generateStaticParams() { + // Generate some number of steps. This should be just enough to trigger the + // default LRU limit used by the client prefetch cache. + // TODO: Once we add a config option to set the prefetch limit, we can set a + // smaller number, to speed up testing iteration (and CI). + const result = [] + for (let i = 0; i < 60; i++) { + result.push({ step: i.toString() }) + } + return result +} diff --git a/test/e2e/app-dir/segment-cache/memory-pressure/app/memory-pressure/layout.tsx b/test/e2e/app-dir/segment-cache/memory-pressure/app/memory-pressure/layout.tsx new file mode 100644 index 0000000000000..c9a0e0bfb8fdb --- /dev/null +++ b/test/e2e/app-dir/segment-cache/memory-pressure/app/memory-pressure/layout.tsx @@ -0,0 +1,3 @@ +export default function Layout({ children }: { children: React.ReactNode }) { + return children +} diff --git a/test/e2e/app-dir/segment-cache/memory-pressure/app/memory-pressure/page.tsx b/test/e2e/app-dir/segment-cache/memory-pressure/app/memory-pressure/page.tsx new file mode 100644 index 0000000000000..ca73756117910 --- /dev/null +++ b/test/e2e/app-dir/segment-cache/memory-pressure/app/memory-pressure/page.tsx @@ -0,0 +1,85 @@ +'use client' +import Link from 'next/link' +import { useState } from 'react' +import { useRouter } from 'next/navigation' + +function Tab1() { + return Link 0 +} + +function Tab2() { + const links = [] + for (let i = 1; i < 60; i++) { + links.push(Link {i}) + } + return links +} + +export default function MemoryPressure() { + const router = useRouter() + const [selectedTab, setSelectedTab] = useState('1') + + const handlePageChange = (event: React.ChangeEvent) => { + setSelectedTab(event.target.value) + } + + return ( +
+

Memory pressure

+

+ Tests that prefetch data is evicted once the cache size grows too large, + using an LRU. +

+

+ On page load, the first link is preloaded. When you switch to the second + tab, the first link is replaced by a large number of a new links. +

+

The payload for each link's prefetch is about 1MB.

+

+ Switching tabs causes the cache size to exceed the limit{' '} + + (currently hardcoded to 50MB, but we will make this configurable) + + , and the prefetch for the first link will be evicted. +

+
+ +
+
+ +
+
+ +
+
{selectedTab === '1' ? : }
+
+ ) +} diff --git a/test/e2e/app-dir/segment-cache/memory-pressure/next.config.js b/test/e2e/app-dir/segment-cache/memory-pressure/next.config.js new file mode 100644 index 0000000000000..a74129c5a24f2 --- /dev/null +++ b/test/e2e/app-dir/segment-cache/memory-pressure/next.config.js @@ -0,0 +1,12 @@ +/** + * @type {import('next').NextConfig} + */ +const nextConfig = { + experimental: { + ppr: true, + dynamicIO: true, + clientSegmentCache: true, + }, +} + +module.exports = nextConfig diff --git a/test/e2e/app-dir/segment-cache/memory-pressure/segment-cache-memory-pressure.test.ts b/test/e2e/app-dir/segment-cache/memory-pressure/segment-cache-memory-pressure.test.ts new file mode 100644 index 0000000000000..4f1abeb981ee3 --- /dev/null +++ b/test/e2e/app-dir/segment-cache/memory-pressure/segment-cache-memory-pressure.test.ts @@ -0,0 +1,180 @@ +import { nextTestSetup } from 'e2e-utils' +import type * as Playwright from 'playwright' + +describe('segment cache memory pressure', () => { + const { next, isNextDev, skipped } = nextTestSetup({ + files: __dirname, + skipDeployment: true, + }) + if (isNextDev || skipped) { + test('disabled in development / deployment', () => {}) + return + } + + it('evicts least recently used prefetch data once cache size exceeds limit', async () => { + const interceptor = createRequestInterceptor() + + // Loading the page triggers a prefetch of the first link. + const browser = await interceptor.waitForPrefetches(() => + next.browser('/memory-pressure', { + beforePageLoad(page: Playwright.Page) { + page.route('**/*', async (route: Playwright.Route) => { + await interceptor.interceptRoute(page, route) + }) + }, + }) + ) + + const switchToTab1 = await browser.elementByCss( + 'input[type="radio"][value="1"]' + ) + const switchToTab2 = await browser.elementByCss( + 'input[type="radio"][value="2"]' + ) + + // Switching to tab 2 causes the cache to overflow, evicting the prefetch + // for the link on tab 1. + await interceptor.waitForPrefetches(async () => { + await switchToTab2.click() + }) + + // Switching back to tab 1 initiates a new prefetch for the first link. + // Otherwise, this will timeout. + await interceptor.waitForPrefetches(async () => { + await switchToTab1.click() + }) + }) +}) + +function createRequestInterceptor() { + // Test utility for intercepting internal RSC requests so we can control the + // timing of when they resolve. We want to avoid relying on internals and + // implementation details as much as possible, so the only thing this does + // for now is let you block and release requests from happening based on + // their type (prefetch requests, navigation requests). + let pendingPrefetches: Set | null = null + let pendingNavigations: Set | null = null + + let prefetchesPromise: PromiseWithResolvers = null + let lastPrefetchRequest: Playwright.Request | null = null + + return { + lockNavigations() { + if (pendingNavigations !== null) { + throw new Error('Navigations are already locked') + } + pendingNavigations = new Set() + return { + async release() { + if (pendingNavigations === null) { + throw new Error('This lock was already released') + } + const routes = pendingNavigations + pendingNavigations = null + for (const route of routes) { + route.continue() + } + }, + } + }, + + lockPrefetches() { + if (pendingPrefetches !== null) { + throw new Error('Prefetches are already locked') + } + pendingPrefetches = new Set() + return { + release() { + if (pendingPrefetches === null) { + throw new Error('This lock was already released') + } + const routes = pendingPrefetches + pendingPrefetches = null + for (const route of routes) { + route.continue() + } + }, + } + }, + + /** + * Waits for the next for the next prefetch request, then keeps waiting + * until the prefetch queue is empty (to account for network throttling). + * + * If no prefetches are initiated, this will timeout. + */ + async waitForPrefetches( + scope: () => Promise | T = (): undefined => {} + ): Promise { + if (prefetchesPromise === null) { + let resolve + let reject + const promise: Promise = new Promise((res, rej) => { + resolve = res + reject = rej + }) + prefetchesPromise = { + resolve, + reject, + promise, + } + } + const result = await scope() + if (prefetchesPromise !== null) { + await prefetchesPromise.promise + } + return result + }, + + async interceptRoute(page: Playwright.Page, route: Playwright.Route) { + const request = route.request() + const requestHeaders = await request.allHeaders() + + if (requestHeaders['RSC'.toLowerCase()]) { + // This is an RSC request. Check if it's a prefetch or a navigation. + if (requestHeaders['Next-Router-Prefetch'.toLowerCase()]) { + // This is a prefetch request. + if (prefetchesPromise !== null) { + // Wait for the prefetch response to finish, then wait an additional + // async task for additional prefetches to be initiated. + lastPrefetchRequest = request + const waitForMorePrefetches = async () => { + const response = await request.response() + await response.finished() + await page.evaluate( + () => + // If the prefetch queue is network throttled, the next + // request should be issued within a microtask of the previous + // one finishing. + new Promise((res) => requestIdleCallback(() => res())) + ) + if (request === lastPrefetchRequest) { + // No further prefetches were initiated. Assume the prefetch + // queue is now empty. + prefetchesPromise.resolve() + prefetchesPromise = null + lastPrefetchRequest = null + } + } + waitForMorePrefetches().then( + () => {}, + () => {} + ) + } + if (pendingPrefetches !== null) { + pendingPrefetches.add(route) + return + } + } else { + // This is a navigation request. + if (pendingNavigations !== null) { + pendingNavigations.add(route) + return + } + } + } + + await route.continue() + }, + } +}