Skip to content

Commit

Permalink
mm: don't account memmap per-node
Browse files Browse the repository at this point in the history
Fix invalid access to pgdat during hot-remove operation:
ndctl users reported a GPF when trying to destroy a namespace:
$ ndctl destroy-namespace all -r all -f
 Segmentation fault
 dmesg:
 Oops: general protection fault, probably for
 non-canonical address 0xdffffc0000005650: 0000 [#1] PREEMPT SMP KASAN
 PTI
 KASAN: probably user-memory-access in range
 [0x000000000002b280-0x000000000002b287]
 CPU: 26 UID: 0 PID: 1868 Comm: ndctl Not tainted 6.11.0-rc1 #1
 Hardware name: Dell Inc. PowerEdge R640/08HT8T, BIOS
 2.20.1 09/13/2023
 RIP: 0010:mod_node_page_state+0x2a/0x110

cxl-test users report a GPF when trying to unload the test module:
$ modrpobe -r cxl-test
 dmesg
 BUG: unable to handle page fault for address: 0000000000004200
 #PF: supervisor read access in kernel mode
 #PF: error_code(0x0000) - not-present page
 PGD 0 P4D 0
 Oops: Oops: 0000 [#1] PREEMPT SMP PTI
 CPU: 0 UID: 0 PID: 1076 Comm: modprobe Tainted: G O N 6.11.0-rc1 torvalds#197
 Tainted: [O]=OOT_MODULE, [N]=TEST
 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/15
 RIP: 0010:mod_node_page_state+0x6/0x90

Currently, when memory is hot-plugged or hot-removed the accounting is
done based on the assumption that memmap is allocated from the same node
as the hot-plugged/hot-removed memory, which is not always the case.

In addition, there are challenges with keeping the node id of the memory
that is being remove to the time when memmap accounting is actually
performed: since this is done after remove_pfn_range_from_zone(), and
also after remove_memory_block_devices(). Meaning that we cannot use
pgdat nor walking though memblocks to get the nid.

Given all of that, account the memmap overhead system wide instead.

For this we are going to be using global atomic counters, but given that
memmap size is rarely modified, and normally is only modified either
during early boot when there is only one CPU, or under a hotplug global
mutex lock, therefore there is no need for per-cpu optimizations.

Also, while we are here rename nr_memmap to nr_memmap_pages, and
nr_memmap_boot to nr_memmap_boot_pages to be self explanatory that the
units are in page count.

Link: https://lkml.kernel.org/r/[email protected]
Link: https://lkml.kernel.org/r/[email protected]
Fixes: 15995a3 ("mm: report per-page metadata information")
Signed-off-by: Pasha Tatashin <[email protected]>
Reported-by: Yi Zhang <[email protected]>
Closes: https://lore.kernel.org/linux-cxl/CAHj4cs9Ax1=CoJkgBGP_+sNu6-6=6v=_L-ZBZY0bVLD3wUWZQg@mail.gmail.com
Reported-by: Alison Schofield <[email protected]>
Closes: https://lore.kernel.org/linux-mm/Zq0tPd2h6alFz8XF@aschofie-mobl2/#t
Tested-by: Dan Williams <[email protected]>
Tested-by: Alison Schofield <[email protected]>
Acked-by: David Hildenbrand <[email protected]>
Acked-by: David Rientjes <[email protected]>
Cc: Domenico Cerasuolo <[email protected]>
Cc: Fan Ni <[email protected]>
Cc: Joel Granados <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: Li Zhijian <[email protected]>
Cc: Matthew Wilcox (Oracle) <[email protected]>
Cc: Mike Rapoport <[email protected]>
Cc: Muchun Song <[email protected]>
Cc: Nhat Pham <[email protected]>
Cc: Sourav Panda <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: Yosry Ahmed <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
  • Loading branch information
soleen authored and akpm00 committed Aug 14, 2024
1 parent 82f0e57 commit 8e4a97b
Show file tree
Hide file tree
Showing 9 changed files with 40 additions and 60 deletions.
2 changes: 0 additions & 2 deletions include/linux/mmzone.h
Original file line number Diff line number Diff line change
Expand Up @@ -220,8 +220,6 @@ enum node_stat_item {
PGDEMOTE_KSWAPD,
PGDEMOTE_DIRECT,
PGDEMOTE_KHUGEPAGED,
NR_MEMMAP, /* page metadata allocated through buddy allocator */
NR_MEMMAP_BOOT, /* page metadata allocated through boot allocator */
NR_VM_NODE_STAT_ITEMS
};

Expand Down
7 changes: 4 additions & 3 deletions include/linux/vmstat.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,8 @@ struct reclaim_stat {
enum vm_stat_item {
NR_DIRTY_THRESHOLD,
NR_DIRTY_BG_THRESHOLD,
NR_MEMMAP, /* page metadata allocated through buddy allocator */
NR_MEMMAP_BOOT, /* page metadata allocated through boot allocator */
NR_VM_STAT_ITEMS,
};

Expand Down Expand Up @@ -618,7 +620,6 @@ static inline void lruvec_stat_sub_folio(struct folio *folio,
lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
}

void __meminit mod_node_early_perpage_metadata(int nid, long delta);
void __meminit store_early_perpage_metadata(void);

void mod_memmap_boot(long delta);
void mod_memmap(long delta);
#endif /* _LINUX_VMSTAT_H */
8 changes: 4 additions & 4 deletions mm/hugetlb_vmemmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -185,11 +185,11 @@ static int vmemmap_remap_range(unsigned long start, unsigned long end,
static inline void free_vmemmap_page(struct page *page)
{
if (PageReserved(page)) {
mod_memmap_boot(-1);
free_bootmem_page(page);
mod_node_page_state(page_pgdat(page), NR_MEMMAP_BOOT, -1);
} else {
mod_memmap(-1);
__free_page(page);
mod_node_page_state(page_pgdat(page), NR_MEMMAP, -1);
}
}

Expand Down Expand Up @@ -341,7 +341,7 @@ static int vmemmap_remap_free(unsigned long start, unsigned long end,
copy_page(page_to_virt(walk.reuse_page),
(void *)walk.reuse_addr);
list_add(&walk.reuse_page->lru, vmemmap_pages);
mod_node_page_state(NODE_DATA(nid), NR_MEMMAP, 1);
mod_memmap(1);
}

/*
Expand Down Expand Up @@ -396,7 +396,7 @@ static int alloc_vmemmap_page_list(unsigned long start, unsigned long end,
goto out;
list_add(&page->lru, list);
}
mod_node_page_state(NODE_DATA(nid), NR_MEMMAP, nr_pages);
mod_memmap(nr_pages);

return 0;
out:
Expand Down
3 changes: 1 addition & 2 deletions mm/mm_init.c
Original file line number Diff line number Diff line change
Expand Up @@ -1623,8 +1623,7 @@ static void __init alloc_node_mem_map(struct pglist_data *pgdat)
panic("Failed to allocate %ld bytes for node %d memory map\n",
size, pgdat->node_id);
pgdat->node_mem_map = map + offset;
mod_node_early_perpage_metadata(pgdat->node_id,
DIV_ROUND_UP(size, PAGE_SIZE));
mod_memmap_boot(DIV_ROUND_UP(size, PAGE_SIZE));
pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
__func__, pgdat->node_id, (unsigned long)pgdat,
(unsigned long)pgdat->node_mem_map);
Expand Down
1 change: 0 additions & 1 deletion mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -5755,7 +5755,6 @@ void __init setup_per_cpu_pageset(void)
for_each_online_pgdat(pgdat)
pgdat->per_cpu_nodestats =
alloc_percpu(struct per_cpu_nodestat);
store_early_perpage_metadata();
}

__meminit void zone_pcp_init(struct zone *zone)
Expand Down
18 changes: 4 additions & 14 deletions mm/page_ext.c
Original file line number Diff line number Diff line change
Expand Up @@ -214,8 +214,7 @@ static int __init alloc_node_page_ext(int nid)
return -ENOMEM;
NODE_DATA(nid)->node_page_ext = base;
total_usage += table_size;
mod_node_page_state(NODE_DATA(nid), NR_MEMMAP_BOOT,
DIV_ROUND_UP(table_size, PAGE_SIZE));
mod_memmap_boot(DIV_ROUND_UP(table_size, PAGE_SIZE));
return 0;
}

Expand Down Expand Up @@ -275,10 +274,8 @@ static void *__meminit alloc_page_ext(size_t size, int nid)
else
addr = vzalloc_node(size, nid);

if (addr) {
mod_node_page_state(NODE_DATA(nid), NR_MEMMAP,
DIV_ROUND_UP(size, PAGE_SIZE));
}
if (addr)
mod_memmap(DIV_ROUND_UP(size, PAGE_SIZE));

return addr;
}
Expand Down Expand Up @@ -323,25 +320,18 @@ static void free_page_ext(void *addr)
{
size_t table_size;
struct page *page;
struct pglist_data *pgdat;

table_size = page_ext_size * PAGES_PER_SECTION;
mod_memmap(-1L * (DIV_ROUND_UP(table_size, PAGE_SIZE)));

if (is_vmalloc_addr(addr)) {
page = vmalloc_to_page(addr);
pgdat = page_pgdat(page);
vfree(addr);
} else {
page = virt_to_page(addr);
pgdat = page_pgdat(page);
BUG_ON(PageReserved(page));
kmemleak_free(addr);
free_pages_exact(addr, table_size);
}

mod_node_page_state(pgdat, NR_MEMMAP,
-1L * (DIV_ROUND_UP(table_size, PAGE_SIZE)));

}

static void __free_page_ext(unsigned long pfn)
Expand Down
11 changes: 4 additions & 7 deletions mm/sparse-vmemmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -469,13 +469,10 @@ struct page * __meminit __populate_section_memmap(unsigned long pfn,
if (r < 0)
return NULL;

if (system_state == SYSTEM_BOOTING) {
mod_node_early_perpage_metadata(nid, DIV_ROUND_UP(end - start,
PAGE_SIZE));
} else {
mod_node_page_state(NODE_DATA(nid), NR_MEMMAP,
DIV_ROUND_UP(end - start, PAGE_SIZE));
}
if (system_state == SYSTEM_BOOTING)
mod_memmap_boot(DIV_ROUND_UP(end - start, PAGE_SIZE));
else
mod_memmap(DIV_ROUND_UP(end - start, PAGE_SIZE));

return pfn_to_page(pfn);
}
5 changes: 2 additions & 3 deletions mm/sparse.c
Original file line number Diff line number Diff line change
Expand Up @@ -463,7 +463,7 @@ static void __init sparse_buffer_init(unsigned long size, int nid)
sparsemap_buf = memmap_alloc(size, section_map_size(), addr, nid, true);
sparsemap_buf_end = sparsemap_buf + size;
#ifndef CONFIG_SPARSEMEM_VMEMMAP
mod_node_early_perpage_metadata(nid, DIV_ROUND_UP(size, PAGE_SIZE));
mod_memmap_boot(DIV_ROUND_UP(size, PAGE_SIZE));
#endif
}

Expand Down Expand Up @@ -643,8 +643,7 @@ static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
unsigned long start = (unsigned long) pfn_to_page(pfn);
unsigned long end = start + nr_pages * sizeof(struct page);

mod_node_page_state(page_pgdat(pfn_to_page(pfn)), NR_MEMMAP,
-1L * (DIV_ROUND_UP(end - start, PAGE_SIZE)));
mod_memmap(-1L * (DIV_ROUND_UP(end - start, PAGE_SIZE)));
vmemmap_free(start, end, altmap);
}
static void free_map_bootmem(struct page *memmap)
Expand Down
45 changes: 21 additions & 24 deletions mm/vmstat.c
Original file line number Diff line number Diff line change
Expand Up @@ -1033,6 +1033,23 @@ unsigned long node_page_state(struct pglist_data *pgdat,
}
#endif

/*
* Count number of pages "struct page" and "struct page_ext" consume.
* nr_memmap_boot: # of pages allocated by boot allocator & not part of MemTotal
* nr_memmap: # of pages that were allocated by buddy allocator
*/
static atomic_long_t nr_memmap_boot, nr_memmap;

void mod_memmap_boot(long delta)
{
atomic_long_add(delta, &nr_memmap_boot);
}

void mod_memmap(long delta)
{
atomic_long_add(delta, &nr_memmap);
}

#ifdef CONFIG_COMPACTION

struct contig_page_info {
Expand Down Expand Up @@ -1255,11 +1272,11 @@ const char * const vmstat_text[] = {
"pgdemote_kswapd",
"pgdemote_direct",
"pgdemote_khugepaged",
"nr_memmap",
"nr_memmap_boot",
/* system-wide enum vm_stat_item counters */
"nr_dirty_threshold",
"nr_dirty_background_threshold",
"nr_memmap_pages",
"nr_memmap_boot_pages",

#if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
/* enum vm_event_item counters */
Expand Down Expand Up @@ -1827,6 +1844,8 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)

global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
v + NR_DIRTY_THRESHOLD);
v[NR_MEMMAP_BOOT] = atomic_long_read(&nr_memmap_boot);
v[NR_MEMMAP] = atomic_long_read(&nr_memmap);
v += NR_VM_STAT_ITEMS;

#ifdef CONFIG_VM_EVENT_COUNTERS
Expand Down Expand Up @@ -2285,25 +2304,3 @@ static int __init extfrag_debug_init(void)
module_init(extfrag_debug_init);

#endif

/*
* Page metadata size (struct page and page_ext) in pages
*/
static unsigned long early_perpage_metadata[MAX_NUMNODES] __meminitdata;

void __meminit mod_node_early_perpage_metadata(int nid, long delta)
{
early_perpage_metadata[nid] += delta;
}

void __meminit store_early_perpage_metadata(void)
{
int nid;
struct pglist_data *pgdat;

for_each_online_pgdat(pgdat) {
nid = pgdat->node_id;
mod_node_page_state(NODE_DATA(nid), NR_MEMMAP_BOOT,
early_perpage_metadata[nid]);
}
}

0 comments on commit 8e4a97b

Please sign in to comment.