forked from luck/tmp_suning_uos_patched
sched/core: Robustify preemption leak checks
When we warn about a preempt_count leak; reset the preempt_count to the known good value such that the problem does not ripple forward. This is most important on x86 which has a per cpu preempt_count that is not saved/restored (after this series). So if you schedule with an invalid (!2*PREEMPT_DISABLE_OFFSET) preempt_count the next task is messed up too. Enforcing this invariant limits the borkage to just the one task. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Steven Rostedt <rostedt@goodmis.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
3d8f74dd4c
commit
1dc0fffc48
|
@ -706,10 +706,12 @@ void do_exit(long code)
|
|||
smp_mb();
|
||||
raw_spin_unlock_wait(&tsk->pi_lock);
|
||||
|
||||
if (unlikely(in_atomic()))
|
||||
if (unlikely(in_atomic())) {
|
||||
pr_info("note: %s[%d] exited with preempt_count %d\n",
|
||||
current->comm, task_pid_nr(current),
|
||||
preempt_count());
|
||||
preempt_count_set(PREEMPT_ENABLED);
|
||||
}
|
||||
|
||||
/* sync mm's RSS info before statistics gathering */
|
||||
if (tsk->mm)
|
||||
|
|
|
@ -2968,8 +2968,10 @@ static inline void schedule_debug(struct task_struct *prev)
|
|||
BUG_ON(unlikely(task_stack_end_corrupted(prev)));
|
||||
#endif
|
||||
|
||||
if (unlikely(in_atomic_preempt_off()))
|
||||
if (unlikely(in_atomic_preempt_off())) {
|
||||
__schedule_bug(prev);
|
||||
preempt_count_set(PREEMPT_DISABLED);
|
||||
}
|
||||
rcu_sleep_check();
|
||||
|
||||
profile_hit(SCHED_PROFILING, __builtin_return_address(0));
|
||||
|
|
Loading…
Reference in New Issue
Block a user