Skip to content

Commit

Permalink
Revert "x86/mm: Fix the size calculation of mapping tables"
Browse files Browse the repository at this point in the history
Commit:

   722bc6b x86/mm: Fix the size calculation of mapping tables

Tried to address the issue that the first 2/4M should use 4k pages
if PSE enabled, but extra counts should only be valid for x86_32.

This commit caused a kdump regression: the kdump kernel hangs.

Work is in progress to fundamentally fix the various page table
initialization issues that we have, via the design suggested
by H. Peter Anvin, but it's not ready yet to be merged.

So, to get a working kdump revert to the last known working version,
which is the revert of this commit and of a followup fix (which was
incomplete):

   bd2753b x86/mm: Only add extra pages count for the first memory range during pre-allocation

Tested kdump on physical and virtual machines.

Signed-off-by: Dave Young <[email protected]>
Acked-by: Yinghai Lu <[email protected]>
Acked-by: Cong Wang <[email protected]>
Acked-by: Flavio Leitner <[email protected]>
Tested-by: Flavio Leitner <[email protected]>
Cc: Dan Carpenter <[email protected]>
Cc: Cong Wang <[email protected]>
Cc: Flavio Leitner <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: [email protected]
Cc: Vivek Goyal <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
  • Loading branch information
daveyoung authored and Ingo Molnar committed Oct 24, 2012
1 parent 0e9e3e3 commit 7b16bbf
Showing 1 changed file with 9 additions and 13 deletions.
22 changes: 9 additions & 13 deletions arch/x86/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,14 +29,8 @@ int direct_gbpages
#endif
;

struct map_range {
unsigned long start;
unsigned long end;
unsigned page_size_mask;
};

static void __init find_early_table_space(struct map_range *mr, unsigned long end,
int use_pse, int use_gbpages)
static void __init find_early_table_space(unsigned long end, int use_pse,
int use_gbpages)
{
unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
phys_addr_t base;
Expand All @@ -61,10 +55,6 @@ static void __init find_early_table_space(struct map_range *mr, unsigned long en
#ifdef CONFIG_X86_32
extra += PMD_SIZE;
#endif
/* The first 2/4M doesn't use large pages. */
if (mr->start < PMD_SIZE)
extra += mr->end - mr->start;

ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
} else
ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
Expand Down Expand Up @@ -95,6 +85,12 @@ void __init native_pagetable_reserve(u64 start, u64 end)
memblock_reserve(start, end - start);
}

struct map_range {
unsigned long start;
unsigned long end;
unsigned page_size_mask;
};

#ifdef CONFIG_X86_32
#define NR_RANGE_MR 3
#else /* CONFIG_X86_64 */
Expand Down Expand Up @@ -267,7 +263,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
* nodes are discovered.
*/
if (!after_bootmem)
find_early_table_space(&mr[0], end, use_pse, use_gbpages);
find_early_table_space(end, use_pse, use_gbpages);

for (i = 0; i < nr_range; i++)
ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
Expand Down

0 comments on commit 7b16bbf

Please sign in to comment.