x86: make max_pfn cover acpi table below 4g

When system have 4g less ram installed, and acpi table sit
near end of ram, make max_pfn cover them too,
so 64bit kernel don't need to mess up fixmap.

Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Cc: "Suresh Siddha" <suresh.b.siddha@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Yinghai Lu 2008-07-08 18:56:38 -07:00 committed by Ingo Molnar
parent 49c980df55
commit 2dc807b37b
3 changed files with 16 additions and 17 deletions

View File

@ -1056,12 +1056,20 @@ unsigned long __initdata end_user_pfn = MAX_ARCH_PFN;
/* /*
* Find the highest page frame number we have available * Find the highest page frame number we have available
*/ */
unsigned long __init e820_end_of_ram(void) unsigned long __init e820_end(void)
{ {
unsigned long last_pfn; int i;
unsigned long last_pfn = 0;
unsigned long max_arch_pfn = MAX_ARCH_PFN; unsigned long max_arch_pfn = MAX_ARCH_PFN;
last_pfn = find_max_pfn_with_active_regions(); for (i = 0; i < e820.nr_map; i++) {
struct e820entry *ei = &e820.map[i];
unsigned long end_pfn;
end_pfn = (ei->addr + ei->size) >> PAGE_SHIFT;
if (end_pfn > last_pfn)
last_pfn = end_pfn;
}
if (last_pfn > max_arch_pfn) if (last_pfn > max_arch_pfn)
last_pfn = max_arch_pfn; last_pfn = max_arch_pfn;
@ -1192,9 +1200,7 @@ static int __init parse_memmap_opt(char *p)
* the real mem size before original memory map is * the real mem size before original memory map is
* reset. * reset.
*/ */
e820_register_active_regions(0, 0, -1UL); saved_max_pfn = e820_end();
saved_max_pfn = e820_end_of_ram();
remove_all_active_ranges();
#endif #endif
e820.nr_map = 0; e820.nr_map = 0;
userdef = 1; userdef = 1;

View File

@ -709,22 +709,18 @@ void __init setup_arch(char **cmdline_p)
early_gart_iommu_check(); early_gart_iommu_check();
#endif #endif
e820_register_active_regions(0, 0, -1UL);
/* /*
* partially used pages are not usable - thus * partially used pages are not usable - thus
* we are rounding upwards: * we are rounding upwards:
*/ */
max_pfn = e820_end_of_ram(); max_pfn = e820_end();
/* preallocate 4k for mptable mpc */ /* preallocate 4k for mptable mpc */
early_reserve_e820_mpc_new(); early_reserve_e820_mpc_new();
/* update e820 for memory not covered by WB MTRRs */ /* update e820 for memory not covered by WB MTRRs */
mtrr_bp_init(); mtrr_bp_init();
if (mtrr_trim_uncached_memory(max_pfn)) { if (mtrr_trim_uncached_memory(max_pfn))
remove_all_active_ranges(); max_pfn = e820_end();
e820_register_active_regions(0, 0, -1UL);
max_pfn = e820_end_of_ram();
}
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/* max_low_pfn get updated here */ /* max_low_pfn get updated here */
@ -767,9 +763,6 @@ void __init setup_arch(char **cmdline_p)
*/ */
acpi_boot_table_init(); acpi_boot_table_init();
/* Remove active ranges so rediscovery with NUMA-awareness happens */
remove_all_active_ranges();
#ifdef CONFIG_ACPI_NUMA #ifdef CONFIG_ACPI_NUMA
/* /*
* Parse SRAT to discover nodes. * Parse SRAT to discover nodes.

View File

@ -99,7 +99,7 @@ extern void free_early(u64 start, u64 end);
extern void early_res_to_bootmem(u64 start, u64 end); extern void early_res_to_bootmem(u64 start, u64 end);
extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align); extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
extern unsigned long e820_end_of_ram(void); extern unsigned long e820_end(void);
extern int e820_find_active_region(const struct e820entry *ei, extern int e820_find_active_region(const struct e820entry *ei,
unsigned long start_pfn, unsigned long start_pfn,
unsigned long last_pfn, unsigned long last_pfn,