forked from luck/tmp_suning_uos_patched
x86/mm/tlb: Restructure switch_mm_irqs_off()
Move some code that will be needed for the lazy -> !lazy state transition when a lazy TLB CPU has gotten out of date. No functional changes, since the if (real_prev == next) branch always returns. Suggested-by: Andy Lutomirski <luto@kernel.org> Signed-off-by: Rik van Riel <riel@surriel.com> Acked-by: Dave Hansen <dave.hansen@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: efault@gmx.de Cc: kernel-team@fb.com Link: http://lkml.kernel.org/r/20180716190337.26133-4-riel@surriel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
2ff6ddf19c
commit
61d0beb579
|
@ -187,6 +187,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
||||||
u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
|
u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
|
||||||
unsigned cpu = smp_processor_id();
|
unsigned cpu = smp_processor_id();
|
||||||
u64 next_tlb_gen;
|
u64 next_tlb_gen;
|
||||||
|
bool need_flush;
|
||||||
|
u16 new_asid;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* NB: The scheduler will call us with prev == next when switching
|
* NB: The scheduler will call us with prev == next when switching
|
||||||
|
@ -252,8 +254,6 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
||||||
|
|
||||||
return;
|
return;
|
||||||
} else {
|
} else {
|
||||||
u16 new_asid;
|
|
||||||
bool need_flush;
|
|
||||||
u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id);
|
u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -297,41 +297,41 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
||||||
next_tlb_gen = atomic64_read(&next->context.tlb_gen);
|
next_tlb_gen = atomic64_read(&next->context.tlb_gen);
|
||||||
|
|
||||||
choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
|
choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
|
||||||
|
}
|
||||||
|
|
||||||
if (need_flush) {
|
if (need_flush) {
|
||||||
this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
|
this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
|
||||||
this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
|
this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
|
||||||
load_new_mm_cr3(next->pgd, new_asid, true);
|
load_new_mm_cr3(next->pgd, new_asid, true);
|
||||||
|
|
||||||
/*
|
|
||||||
* NB: This gets called via leave_mm() in the idle path
|
|
||||||
* where RCU functions differently. Tracing normally
|
|
||||||
* uses RCU, so we need to use the _rcuidle variant.
|
|
||||||
*
|
|
||||||
* (There is no good reason for this. The idle code should
|
|
||||||
* be rearranged to call this before rcu_idle_enter().)
|
|
||||||
*/
|
|
||||||
trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
|
|
||||||
} else {
|
|
||||||
/* The new ASID is already up to date. */
|
|
||||||
load_new_mm_cr3(next->pgd, new_asid, false);
|
|
||||||
|
|
||||||
/* See above wrt _rcuidle. */
|
|
||||||
trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Record last user mm's context id, so we can avoid
|
* NB: This gets called via leave_mm() in the idle path
|
||||||
* flushing branch buffer with IBPB if we switch back
|
* where RCU functions differently. Tracing normally
|
||||||
* to the same user.
|
* uses RCU, so we need to use the _rcuidle variant.
|
||||||
|
*
|
||||||
|
* (There is no good reason for this. The idle code should
|
||||||
|
* be rearranged to call this before rcu_idle_enter().)
|
||||||
*/
|
*/
|
||||||
if (next != &init_mm)
|
trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
|
||||||
this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
|
} else {
|
||||||
|
/* The new ASID is already up to date. */
|
||||||
|
load_new_mm_cr3(next->pgd, new_asid, false);
|
||||||
|
|
||||||
this_cpu_write(cpu_tlbstate.loaded_mm, next);
|
/* See above wrt _rcuidle. */
|
||||||
this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
|
trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Record last user mm's context id, so we can avoid
|
||||||
|
* flushing branch buffer with IBPB if we switch back
|
||||||
|
* to the same user.
|
||||||
|
*/
|
||||||
|
if (next != &init_mm)
|
||||||
|
this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
|
||||||
|
|
||||||
|
this_cpu_write(cpu_tlbstate.loaded_mm, next);
|
||||||
|
this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
|
||||||
|
|
||||||
load_mm_cr4(next);
|
load_mm_cr4(next);
|
||||||
switch_ldt(real_prev, next);
|
switch_ldt(real_prev, next);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue
Block a user