From 492a7fd511166a35dee3e5abceab1f5b38289696 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Sat, 28 Mar 2020 13:06:28 -0700 Subject: [PATCH 1/8] mm: Disable watermark boosting by default What watermark boosting does is preemptively fire up kswapd to free memory when there hasn't been an allocation failure. It does this by increasing kswapd's high watermark goal and then firing up kswapd. The reason why this causes freezes is because, with the increased high watermark goal, kswapd will steal memory from processes that need it in order to make forward progress. These processes will, in turn, try to allocate memory again, which will cause kswapd to steal necessary pages from those processes again, in a positive feedback loop known as page thrashing. When page thrashing occurs, your system is essentially livelocked until the necessary forward progress can be made to stop processes from trying to continuously allocate memory and trigger kswapd to steal it back. This problem already occurs with kswapd *without* watermark boosting, but it's usually only encountered on machines with a small amount of memory and/or a slow CPU. Watermark boosting just makes the existing problem worse enough to notice on higher spec'd machines. Disable watermark boosting by default since it's a total dumpster fire. I can't imagine why anyone would want to explicitly enable it, but the option is there in case someone does. Signed-off-by: Sultan Alsawaf --- mm/page_alloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7d3460c7a480b5..b0839577121a7c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -297,7 +297,7 @@ static compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = { int min_free_kbytes = 1024; int user_min_free_kbytes = -1; -static int watermark_boost_factor __read_mostly = 15000; +static int watermark_boost_factor __read_mostly; static int watermark_scale_factor = 10; /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ From b66e7c6e1a4040aca5f88fade595d4571a64a86e Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Sun, 19 Apr 2020 19:59:18 -0700 Subject: [PATCH 2/8] mm: Stop kswapd early when nothing's waiting for it to free pages Keeping kswapd running when all the failed allocations that invoked it are satisfied incurs a high overhead due to unnecessary page eviction and writeback, as well as spurious VM pressure events to various registered shrinkers. When kswapd doesn't need to work to make an allocation succeed anymore, stop it prematurely to save resources. Signed-off-by: Sultan Alsawaf --- include/linux/mmzone.h | 1 + mm/mm_init.c | 1 + mm/page_alloc.c | 16 +++++++++++++--- mm/vmscan.c | 3 ++- 4 files changed, 17 insertions(+), 4 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 5e50b78d58ea68..6640c7d5c1c954 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -1301,6 +1301,7 @@ typedef struct pglist_data { unsigned long node_spanned_pages; /* total size of physical page range, including holes */ int node_id; + atomic_t kswapd_waiters; wait_queue_head_t kswapd_wait; wait_queue_head_t pfmemalloc_wait; diff --git a/mm/mm_init.c b/mm/mm_init.c index a1963c3322af43..306f0b171a406b 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -1359,6 +1359,7 @@ static void __meminit pgdat_init_internals(struct pglist_data *pgdat) pgdat_page_ext_init(pgdat); lruvec_init(&pgdat->__lruvec); + pgdat->kswapd_waiters = (atomic_t)ATOMIC_INIT(0); } static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid, diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b0839577121a7c..67215d85937cb1 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3962,6 +3962,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, unsigned int cpuset_mems_cookie; unsigned int zonelist_iter_cookie; int reserve_flags; + pg_data_t *pgdat = ac->preferred_zoneref->zone->zone_pgdat; + bool woke_kswapd = false; restart: compaction_retries = 0; @@ -4001,8 +4003,13 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, goto nopage; } - if (alloc_flags & ALLOC_KSWAPD) + if (alloc_flags & ALLOC_KSWAPD) { + if (!woke_kswapd) { + atomic_inc(&pgdat->kswapd_waiters); + woke_kswapd = true; + } wake_all_kswapds(order, gfp_mask, ac); + } /* * The adjusted alloc_flags might result in immediate success, so try @@ -4217,9 +4224,12 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, goto retry; } fail: - warn_alloc(gfp_mask, ac->nodemask, - "page allocation failure: order:%u", order); got_pg: + if (woke_kswapd) + atomic_dec(&pgdat->kswapd_waiters); + if (!page) + warn_alloc(gfp_mask, ac->nodemask, + "page allocation failure: order:%u", order); return page; } diff --git a/mm/vmscan.c b/mm/vmscan.c index 2fe4a11d63f447..a3d35e1aeaec26 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -7522,7 +7522,8 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx) __fs_reclaim_release(_THIS_IP_); ret = try_to_freeze(); __fs_reclaim_acquire(_THIS_IP_); - if (ret || kthread_should_stop()) + if (ret || kthread_should_stop() || + !atomic_read(&pgdat->kswapd_waiters)) break; /* From da8517a8ffbf9d0e58b7e3af0a05c3967e36a778 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Wed, 20 May 2020 09:55:17 -0700 Subject: [PATCH 3/8] mm: Don't stop kswapd on a per-node basis when there are no waiters The page allocator wakes all kswapds in an allocation context's allowed nodemask in the slow path, so it doesn't make sense to have the kswapd- waiter count per each NUMA node. Instead, it should be a global counter to stop all kswapds when there are no failed allocation requests. Signed-off-by: Sultan Alsawaf --- include/linux/mmzone.h | 1 - mm/internal.h | 1 + mm/mm_init.c | 1 - mm/page_alloc.c | 7 ++++--- mm/vmscan.c | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 6640c7d5c1c954..5e50b78d58ea68 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -1301,7 +1301,6 @@ typedef struct pglist_data { unsigned long node_spanned_pages; /* total size of physical page range, including holes */ int node_id; - atomic_t kswapd_waiters; wait_queue_head_t kswapd_wait; wait_queue_head_t pfmemalloc_wait; diff --git a/mm/internal.h b/mm/internal.h index 8ed127c1c808ce..2f3040ec707d71 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -421,6 +421,7 @@ extern void prep_compound_page(struct page *page, unsigned int order); extern void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags); extern int user_min_free_kbytes; +extern atomic_long_t kswapd_waiters; extern void free_unref_page(struct page *page, unsigned int order); extern void free_unref_page_list(struct list_head *list); diff --git a/mm/mm_init.c b/mm/mm_init.c index 306f0b171a406b..a1963c3322af43 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -1359,7 +1359,6 @@ static void __meminit pgdat_init_internals(struct pglist_data *pgdat) pgdat_page_ext_init(pgdat); lruvec_init(&pgdat->__lruvec); - pgdat->kswapd_waiters = (atomic_t)ATOMIC_INIT(0); } static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid, diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 67215d85937cb1..c498d7c10de60b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -204,6 +204,8 @@ EXPORT_SYMBOL(node_states); gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; +atomic_long_t kswapd_waiters = ATOMIC_LONG_INIT(0); + /* * A cached value of the page's pageblock's migratetype, used when the page is * put on a pcplist. Used to avoid the pageblock migratetype lookup when @@ -3962,7 +3964,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, unsigned int cpuset_mems_cookie; unsigned int zonelist_iter_cookie; int reserve_flags; - pg_data_t *pgdat = ac->preferred_zoneref->zone->zone_pgdat; bool woke_kswapd = false; restart: @@ -4005,7 +4006,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, if (alloc_flags & ALLOC_KSWAPD) { if (!woke_kswapd) { - atomic_inc(&pgdat->kswapd_waiters); + atomic_long_inc(&kswapd_waiters); woke_kswapd = true; } wake_all_kswapds(order, gfp_mask, ac); @@ -4226,7 +4227,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, fail: got_pg: if (woke_kswapd) - atomic_dec(&pgdat->kswapd_waiters); + atomic_long_dec(&kswapd_waiters); if (!page) warn_alloc(gfp_mask, ac->nodemask, "page allocation failure: order:%u", order); diff --git a/mm/vmscan.c b/mm/vmscan.c index a3d35e1aeaec26..51c6878dc0cec6 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -7523,7 +7523,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx) ret = try_to_freeze(); __fs_reclaim_acquire(_THIS_IP_); if (ret || kthread_should_stop() || - !atomic_read(&pgdat->kswapd_waiters)) + !atomic_long_read(&kswapd_waiters)) break; /* From fe84d2ff18eb6925f496b86ad262361eb07eed31 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Fri, 16 Jul 2021 23:35:47 -0700 Subject: [PATCH 4/8] mm: Increment kswapd_waiters for throttled direct reclaimers Throttled direct reclaimers will wake up kswapd and wait for kswapd to satisfy their page allocation request, even when the failed allocation lacks the __GFP_KSWAPD_RECLAIM flag in its gfp mask. As a result, kswapd may think that there are no waiters and thus exit prematurely, causing throttled direct reclaimers lacking __GFP_KSWAPD_RECLAIM to stall on waiting for kswapd to wake them up. Incrementing the kswapd_waiters counter when such direct reclaimers become throttled fixes the problem. Signed-off-by: Sultan Alsawaf --- mm/vmscan.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index 51c6878dc0cec6..b8c64b536dd75c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -6897,7 +6897,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, return 0; } -static bool allow_direct_reclaim(pg_data_t *pgdat) +static bool allow_direct_reclaim(pg_data_t *pgdat, bool using_kswapd) { struct zone *zone; unsigned long pfmemalloc_reserve = 0; @@ -6926,6 +6926,10 @@ static bool allow_direct_reclaim(pg_data_t *pgdat) wmark_ok = free_pages > pfmemalloc_reserve / 2; + /* The throttled direct reclaimer is now a kswapd waiter */ + if (unlikely(!using_kswapd && !wmark_ok)) + atomic_long_inc(&kswapd_waiters); + /* kswapd must be awake if processes are being throttled */ if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { if (READ_ONCE(pgdat->kswapd_highest_zoneidx) > ZONE_NORMAL) @@ -6991,7 +6995,7 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, /* Throttle based on the first usable node */ pgdat = zone->zone_pgdat; - if (allow_direct_reclaim(pgdat)) + if (allow_direct_reclaim(pgdat, gfp_mask & __GFP_KSWAPD_RECLAIM)) goto out; break; } @@ -7013,11 +7017,14 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, */ if (!(gfp_mask & __GFP_FS)) wait_event_interruptible_timeout(pgdat->pfmemalloc_wait, - allow_direct_reclaim(pgdat), HZ); + allow_direct_reclaim(pgdat, true), HZ); else /* Throttle until kswapd wakes the process */ wait_event_killable(zone->zone_pgdat->pfmemalloc_wait, - allow_direct_reclaim(pgdat)); + allow_direct_reclaim(pgdat, true)); + + if (unlikely(!(gfp_mask & __GFP_KSWAPD_RECLAIM))) + atomic_long_dec(&kswapd_waiters); if (fatal_signal_pending(current)) return true; @@ -7515,7 +7522,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx) * able to safely make forward progress. Wake them */ if (waitqueue_active(&pgdat->pfmemalloc_wait) && - allow_direct_reclaim(pgdat)) + allow_direct_reclaim(pgdat, true)) wake_up_all(&pgdat->pfmemalloc_wait); /* Check if kswapd should be suspending */ From a43aaaf9db4d3ca6c8a8b2edbe0e1f54c7af5ed1 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Sat, 24 Oct 2020 22:17:49 -0700 Subject: [PATCH 5/8] mm: Disable proactive compaction by default On-demand compaction works fine assuming that you don't have a need to spam the page allocator nonstop for large order page allocations. Signed-off-by: Sultan Alsawaf --- mm/compaction.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/compaction.c b/mm/compaction.c index eacca2794e4708..e8fa99abc797d5 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1781,7 +1781,7 @@ static int sysctl_compact_unevictable_allowed __read_mostly = CONFIG_COMPACT_UNE * aggressively the kernel should compact memory in the * background. It takes values in the range [0, 100]. */ -static unsigned int __read_mostly sysctl_compaction_proactiveness = 20; +static unsigned int __read_mostly sysctl_compaction_proactiveness; static int sysctl_extfrag_threshold = 500; static int __read_mostly sysctl_compact_memory; From 9b2d90900c3549ef4ce287be570836cc27ebcefc Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Wed, 20 Oct 2021 20:50:32 -0700 Subject: [PATCH 6/8] mm: Don't hog the CPU and zone lock in rmqueue_bulk() There is noticeable scheduling latency and heavy zone lock contention stemming from rmqueue_bulk's single hold of the zone lock while doing its work, as seen with the preemptoff tracer. There's no actual need for rmqueue_bulk() to hold the zone lock the entire time; it only does so for supposed efficiency. As such, we can relax the zone lock and even reschedule when IRQs are enabled in order to keep the scheduling delays and zone lock contention at bay. Forward progress is still guaranteed, as the zone lock can only be relaxed after page removal. With this change, rmqueue_bulk() no longer appears as a serious offender in the preemptoff tracer, and system latency is noticeably improved. Signed-off-by: Sultan Alsawaf --- mm/page_alloc.c | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index c498d7c10de60b..bd2a12f4e04de2 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2154,16 +2154,17 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype, } /* - * Obtain a specified number of elements from the buddy allocator, all under - * a single hold of the lock, for efficiency. Add them to the supplied list. - * Returns the number of new pages which were placed at *list. + * Obtain a specified number of elements from the buddy allocator, and relax the + * zone lock when needed. Add them to the supplied list. Returns the number of + * new pages which were placed at *list. */ static int rmqueue_bulk(struct zone *zone, unsigned int order, unsigned long count, struct list_head *list, int migratetype, unsigned int alloc_flags) { + const bool can_resched = !preempt_count() && !irqs_disabled(); unsigned long flags; - int i; + int i, last_mod = 0; spin_lock_irqsave(&zone->lock, flags); for (i = 0; i < count; ++i) { @@ -2172,6 +2173,18 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, if (unlikely(page == NULL)) break; + /* Reschedule and ease the contention on the lock if needed */ + if (i + 1 < count && ((can_resched && need_resched()) || + spin_needbreak(&zone->lock))) { + __mod_zone_page_state(zone, NR_FREE_PAGES, + -((i + 1 - last_mod) << order)); + last_mod = i + 1; + spin_unlock_irqrestore(&zone->lock, flags); + if (can_resched) + cond_resched(); + spin_lock_irqsave(&zone->lock, flags); + } + /* * Split buddy pages returned by expand() are received here in * physical page order. The page is added to the tail of @@ -2188,7 +2201,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, -(1 << order)); } - __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); + __mod_zone_page_state(zone, NR_FREE_PAGES, -((i - last_mod) << order)); spin_unlock_irqrestore(&zone->lock, flags); return i; From 768a750fa710d0d4350464311ffba77b76e33190 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Sat, 5 Sep 2020 10:45:14 -0700 Subject: [PATCH 7/8] scatterlist: Don't allocate sg lists using __get_free_page Allocating pages with __get_free_page is slower than going through the slab allocator to grab free pages out from a pool. These are the results from running the code at the bottom of this message: [ 1.278602] speedtest: __get_free_page: 9 us [ 1.278606] speedtest: kmalloc: 4 us [ 1.278609] speedtest: kmem_cache_alloc: 4 us [ 1.278611] speedtest: vmalloc: 13 us kmalloc and kmem_cache_alloc (which is what kmalloc uses for common sizes behind the scenes) are the fastest choices. Use kmalloc to speed up sg list allocation. This is the code used to produce the above measurements: static int speedtest(void *data) { static const struct sched_param sched_max_rt_prio = { .sched_priority = MAX_RT_PRIO - 1 }; volatile s64 ctotal = 0, gtotal = 0, ktotal = 0, vtotal = 0; struct kmem_cache *page_pool; int i, j, trials = 1000; volatile ktime_t start; void *ptr[100]; sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_max_rt_prio); page_pool = kmem_cache_create("pages", PAGE_SIZE, PAGE_SIZE, SLAB_PANIC, NULL); for (i = 0; i < trials; i++) { start = ktime_get(); for (j = 0; j < ARRAY_SIZE(ptr); j++) while (!(ptr[j] = kmem_cache_alloc(page_pool, GFP_KERNEL))); ctotal += ktime_us_delta(ktime_get(), start); for (j = 0; j < ARRAY_SIZE(ptr); j++) kmem_cache_free(page_pool, ptr[j]); start = ktime_get(); for (j = 0; j < ARRAY_SIZE(ptr); j++) while (!(ptr[j] = (void *)__get_free_page(GFP_KERNEL))); gtotal += ktime_us_delta(ktime_get(), start); for (j = 0; j < ARRAY_SIZE(ptr); j++) free_page((unsigned long)ptr[j]); start = ktime_get(); for (j = 0; j < ARRAY_SIZE(ptr); j++) while (!(ptr[j] = __kmalloc(PAGE_SIZE, GFP_KERNEL))); ktotal += ktime_us_delta(ktime_get(), start); for (j = 0; j < ARRAY_SIZE(ptr); j++) kfree(ptr[j]); start = ktime_get(); *ptr = vmalloc(ARRAY_SIZE(ptr) * PAGE_SIZE); vtotal += ktime_us_delta(ktime_get(), start); vfree(*ptr); } kmem_cache_destroy(page_pool); printk("%s: __get_free_page: %lld us\n", __func__, gtotal / trials); printk("%s: __kmalloc: %lld us\n", __func__, ktotal / trials); printk("%s: kmem_cache_alloc: %lld us\n", __func__, ctotal / trials); printk("%s: vmalloc: %lld us\n", __func__, vtotal / trials); complete(data); return 0; } static int __init start_test(void) { DECLARE_COMPLETION_ONSTACK(done); BUG_ON(IS_ERR(kthread_run(speedtest, &done, "malloc_test"))); wait_for_completion(&done); return 0; } late_initcall(start_test); Signed-off-by: Sultan Alsawaf --- lib/scatterlist.c | 23 ++--------------------- 1 file changed, 2 insertions(+), 21 deletions(-) diff --git a/lib/scatterlist.c b/lib/scatterlist.c index c65566b4dc662a..d3c8aaa68c5d39 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -150,31 +150,12 @@ EXPORT_SYMBOL(sg_init_one); */ static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) { - if (nents == SG_MAX_SINGLE_ALLOC) { - /* - * Kmemleak doesn't track page allocations as they are not - * commonly used (in a raw form) for kernel data structures. - * As we chain together a list of pages and then a normal - * kmalloc (tracked by kmemleak), in order to for that last - * allocation not to become decoupled (and thus a - * false-positive) we need to inform kmemleak of all the - * intermediate allocations. - */ - void *ptr = (void *) __get_free_page(gfp_mask); - kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask); - return ptr; - } else - return kmalloc_array(nents, sizeof(struct scatterlist), - gfp_mask); + return kmalloc_array(nents, sizeof(struct scatterlist), gfp_mask); } static void sg_kfree(struct scatterlist *sg, unsigned int nents) { - if (nents == SG_MAX_SINGLE_ALLOC) { - kmemleak_free(sg); - free_page((unsigned long) sg); - } else - kfree(sg); + kfree(sg); } /** From 1d6bb0118e70c378cd1bffbc45f6bfd9b3cd4178 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Sun, 26 Sep 2021 00:29:38 -0700 Subject: [PATCH 8/8] mm: Omit RCU read lock in list_lru_count_one() when RCU isn't needed The RCU read lock isn't necessary in list_lru_count_one() when the condition that requires RCU (CONFIG_MEMCG && !CONFIG_SLOB) isn't met. The highly-frequent RCU lock and unlock adds measurable overhead to the shrink_slab() path when it isn't needed. As such, we can simply omit the RCU read lock in this case to improve performance. Signed-off-by: Sultan Alsawaf Signed-off-by: Kazuki Hashimoto --- mm/list_lru.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mm/list_lru.c b/mm/list_lru.c index a05e5bef3b4007..0ead8e6651df0c 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -178,6 +178,7 @@ EXPORT_SYMBOL_GPL(list_lru_isolate_move); unsigned long list_lru_count_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg) { +#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) struct list_lru_one *l; long count; @@ -190,6 +191,9 @@ unsigned long list_lru_count_one(struct list_lru *lru, count = 0; return count; +#else + return READ_ONCE(lru->node[nid].lru.nr_items); +#endif } EXPORT_SYMBOL_GPL(list_lru_count_one);