From f42434ff94adbafa51ea0c8a79e4ec98e355b46e Mon Sep 17 00:00:00 2001 From: Andrew Clark Date: Tue, 26 Nov 2024 19:08:51 -0500 Subject: [PATCH] [Segment Cache] Remove task map Upon reconsideration, I don't think we need to store the tasks in a map. I had thought we would use this to dedupe multiple prefetch tasks to the same URL, but we already dedupe by URL when accessing the route entry. In the future, there will also be multiple kinds of prefetch tasks, so it will be totally possible for the same URL to have multiple tasks associated with it; for example, a prefetch initiated by a entering the viewport, versus a prefetch initiated by an imperative prefetch() call. The prefetch should be cancelable, but the imperative prefetch should not. So it just makes more sense for them to be separate tasks. --- .../client/components/segment-cache/cache.ts | 9 ++- .../components/segment-cache/scheduler.ts | 73 ++++++------------- 2 files changed, 27 insertions(+), 55 deletions(-) diff --git a/packages/next/src/client/components/segment-cache/cache.ts b/packages/next/src/client/components/segment-cache/cache.ts index ddb4bb712dad79..1fa724f04572bc 100644 --- a/packages/next/src/client/components/segment-cache/cache.ts +++ b/packages/next/src/client/components/segment-cache/cache.ts @@ -197,7 +197,7 @@ export function requestRouteCacheEntryFromCache( couldBeIntercepted: false, } const key = task.key - const requestPromise = fetchRouteOnCacheMiss(pendingEntry, key) + const requestPromise = fetchRouteOnCacheMiss(pendingEntry, task) trackPrefetchRequestBandwidth(requestPromise) routeCache.set(key.id, pendingEntry) return pendingEntry @@ -322,12 +322,13 @@ function rejectSegmentCacheEntry( async function fetchRouteOnCacheMiss( entry: PendingRouteCacheEntry, - key: RouteCacheKey + task: PrefetchTask ): Promise { // This function is allowed to use async/await because it contains the actual // fetch that gets issued on a cache miss. Notice though that it does not // return anything; it writes the result to the cache entry directly, then // pings the scheduler to unblock the corresponding prefetch task. + const key = task.key const href = key.href try { const response = await fetchSegmentPrefetchResponse(href, '/_tree') @@ -373,13 +374,13 @@ async function fetchRouteOnCacheMiss( // Either the connection itself failed, or something bad happened while // decoding the response. rejectRouteCacheEntry(entry, Date.now() + 10 * 1000) - pingPrefetchTask(key) + pingPrefetchTask(task) } finally { // The request for the route tree is was blocking this task from prefetching // the segments. Now that the route tree is ready, notify the scheduler to // unblock the prefetch task. We do this even if the request failed, so the // scheduler can dispose of the task. - pingPrefetchTask(key) + pingPrefetchTask(task) } } diff --git a/packages/next/src/client/components/segment-cache/scheduler.ts b/packages/next/src/client/components/segment-cache/scheduler.ts index 739d9ab2acaa08..5474c60519f92d 100644 --- a/packages/next/src/client/components/segment-cache/scheduler.ts +++ b/packages/next/src/client/components/segment-cache/scheduler.ts @@ -6,7 +6,7 @@ import { type FulfilledRouteCacheEntry, type RouteCacheEntry, } from './cache' -import type { RouteCacheKey, RouteCacheKeyId } from './cache-key' +import type { RouteCacheKey } from './cache-key' const scheduleMicrotask = typeof queueMicrotask === 'function' @@ -50,9 +50,9 @@ export type PrefetchTask = { * True if the prefetch is blocked by network data. We remove tasks from the * queue once they are blocked, and add them back when they receive data. * - * isBlocked also indicates whether the task is currently in the queue; - * tasks are removed from the queue (but not the map) when they are blocked. - * Use this to avoid queueing the same task multiple times. + * isBlocked also indicates whether the task is currently in the queue; tasks + * are removed from the queue when they are blocked. Use this to avoid + * queueing the same task multiple times. */ isBlocked: boolean @@ -88,7 +88,6 @@ const enum PrefetchTaskExitStatus { } const taskHeap: Array = [] -const taskMap: Map = new Map() // This is intentionally low so that when a navigation happens, the browser's // internal network queue is not already saturated with prefetch requests. @@ -108,24 +107,6 @@ let didScheduleMicrotask = false * @param key The RouteCacheKey to prefetch. */ export function schedulePrefetchTask(key: RouteCacheKey): void { - const existingTask = taskMap.get(key.id) - if (existingTask !== undefined) { - // A task for this URL already exists, but we should bump it to the top of - // the queue by assigning a new sortId. However, if it already has the - // highest sortId that's been assigned, then we can bail out. - // - // Note that this check is not equivalent to checking if it's at the front - // of the queue, because there could be tasks that have a higher sortId but - // are not currently queued (because they are blocked). - if (existingTask.sortId === sortIdCounter) { - // No-op - return - } - existingTask.sortId = sortIdCounter++ - heapSift(taskHeap, existingTask) - return - } - // Spawn a new prefetch task const task: PrefetchTask = { key, @@ -134,7 +115,6 @@ export function schedulePrefetchTask(key: RouteCacheKey): void { _heapIndex: -1, } heapPush(taskHeap, task) - taskMap.set(key.id, task) // Schedule an async task to process the queue. // @@ -200,15 +180,8 @@ function onPrefetchRequestCompletion(): void { * prefetch. The corresponding task will be added back to the queue (unless the * task has been canceled in the meantime). */ -export function pingPrefetchTask(key: RouteCacheKey) { +export function pingPrefetchTask(task: PrefetchTask) { // "Ping" a prefetch that's already in progress to notify it of new data. - const task = taskMap.get(key.id) - if (task === undefined) { - // There's no matching prefetch task, which means it must have either - // already completed or been canceled. This can also happen if an entry is - // read from the cache outside the context of a prefetch. - return - } if (!task.isBlocked) { // Prefetch is already queued. return @@ -246,11 +219,7 @@ function processQueueInMicrotask() { task = heapPop(taskHeap) continue case PrefetchTaskExitStatus.Done: - // The prefetch is complete. Remove the task from the map so it can be - // garbage collected. - taskMap.delete(task.key.id) - - // Continue to the next task + // The prefetch is complete. Continue to the next task. task = heapPop(taskHeap) continue default: { @@ -364,20 +333,22 @@ function heapPop(heap: Array): PrefetchTask | null { return first } -function heapSift(heap: Array, node: PrefetchTask) { - const index = node._heapIndex - if (index !== -1) { - const parentIndex = (index - 1) >>> 1 - const parent = heap[parentIndex] - if (compareQueuePriority(parent, node) > 0) { - // The parent is larger. Sift up. - heapSiftUp(heap, node, index) - } else { - // The parent is smaller (or equal). Sift down. - heapSiftDown(heap, node, index) - } - } -} +// Not currently used, but will be once we add the ability to update a +// task's priority. +// function heapSift(heap: Array, node: PrefetchTask) { +// const index = node._heapIndex +// if (index !== -1) { +// const parentIndex = (index - 1) >>> 1 +// const parent = heap[parentIndex] +// if (compareQueuePriority(parent, node) > 0) { +// // The parent is larger. Sift up. +// heapSiftUp(heap, node, index) +// } else { +// // The parent is smaller (or equal). Sift down. +// heapSiftDown(heap, node, index) +// } +// } +// } function heapSiftUp( heap: Array,