forked from luck/tmp_suning_uos_patched
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Thomas Gleixner: - Fix for a CPU hot-add deadlock in microcode update code - Fix for idle consolidation fallout - Documentation update for initial kernel direct mapping * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/mm: Add missing comments for initial kernel direct mapping x86/microcode: Add local mutex to fix physical CPU hot-add deadlock x86: Fix idle consolidation fallout
This commit is contained in:
commit
ae3b29e67c
|
@ -487,6 +487,7 @@ static inline void show_saved_mc(void)
|
|||
#endif
|
||||
|
||||
#if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU)
|
||||
static DEFINE_MUTEX(x86_cpu_microcode_mutex);
|
||||
/*
|
||||
* Save this mc into mc_saved_data. So it will be loaded early when a CPU is
|
||||
* hot added or resumes.
|
||||
|
@ -507,7 +508,7 @@ int save_mc_for_early(u8 *mc)
|
|||
* Hold hotplug lock so mc_saved_data is not accessed by a CPU in
|
||||
* hotplug.
|
||||
*/
|
||||
cpu_hotplug_driver_lock();
|
||||
mutex_lock(&x86_cpu_microcode_mutex);
|
||||
|
||||
mc_saved_count_init = mc_saved_data.mc_saved_count;
|
||||
mc_saved_count = mc_saved_data.mc_saved_count;
|
||||
|
@ -544,7 +545,7 @@ int save_mc_for_early(u8 *mc)
|
|||
}
|
||||
|
||||
out:
|
||||
cpu_hotplug_driver_unlock();
|
||||
mutex_unlock(&x86_cpu_microcode_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -312,6 +312,8 @@ void arch_cpu_idle(void)
|
|||
{
|
||||
if (cpuidle_idle_call())
|
||||
x86_idle();
|
||||
else
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -368,9 +370,6 @@ void amd_e400_remove_cpu(int cpu)
|
|||
*/
|
||||
static void amd_e400_idle(void)
|
||||
{
|
||||
if (need_resched())
|
||||
return;
|
||||
|
||||
if (!amd_e400_c1e_detected) {
|
||||
u32 lo, hi;
|
||||
|
||||
|
|
|
@ -359,7 +359,17 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
|
|||
}
|
||||
|
||||
/*
|
||||
* would have hole in the middle or ends, and only ram parts will be mapped.
|
||||
* We need to iterate through the E820 memory map and create direct mappings
|
||||
* for only E820_RAM and E820_KERN_RESERVED regions. We cannot simply
|
||||
* create direct mappings for all pfns from [0 to max_low_pfn) and
|
||||
* [4GB to max_pfn) because of possible memory holes in high addresses
|
||||
* that cannot be marked as UC by fixed/variable range MTRRs.
|
||||
* Depending on the alignment of E820 ranges, this may possibly result
|
||||
* in using smaller size (i.e. 4K instead of 2M or 1G) page tables.
|
||||
*
|
||||
* init_mem_mapping() calls init_range_memory_mapping() with big range.
|
||||
* That range would have hole in the middle or ends, and only ram parts
|
||||
* will be mapped in init_range_memory_mapping().
|
||||
*/
|
||||
static unsigned long __init init_range_memory_mapping(
|
||||
unsigned long r_start,
|
||||
|
@ -419,6 +429,13 @@ void __init init_mem_mapping(void)
|
|||
max_pfn_mapped = 0; /* will get exact value next */
|
||||
min_pfn_mapped = real_end >> PAGE_SHIFT;
|
||||
last_start = start = real_end;
|
||||
|
||||
/*
|
||||
* We start from the top (end of memory) and go to the bottom.
|
||||
* The memblock_find_in_range() gets us a block of RAM from the
|
||||
* end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
|
||||
* for page table.
|
||||
*/
|
||||
while (last_start > ISA_END_ADDRESS) {
|
||||
if (last_start > step_size) {
|
||||
start = round_down(last_start - 1, step_size);
|
||||
|
|
Loading…
Reference in New Issue
Block a user