From 1dc0fffc48af94513e621f95dff730ed4f7317ec Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 28 Sep 2015 17:57:39 +0200 Subject: [PATCH] sched/core: Robustify preemption leak checks When we warn about a preempt_count leak; reset the preempt_count to the known good value such that the problem does not ripple forward. This is most important on x86 which has a per cpu preempt_count that is not saved/restored (after this series). So if you schedule with an invalid (!2*PREEMPT_DISABLE_OFFSET) preempt_count the next task is messed up too. Enforcing this invariant limits the borkage to just the one task. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Frederic Weisbecker Reviewed-by: Thomas Gleixner Reviewed-by: Steven Rostedt Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/exit.c | 4 +++- kernel/sched/core.c | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/kernel/exit.c b/kernel/exit.c index ea95ee1b5ef7..443677c8efe6 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -706,10 +706,12 @@ void do_exit(long code) smp_mb(); raw_spin_unlock_wait(&tsk->pi_lock); - if (unlikely(in_atomic())) + if (unlikely(in_atomic())) { pr_info("note: %s[%d] exited with preempt_count %d\n", current->comm, task_pid_nr(current), preempt_count()); + preempt_count_set(PREEMPT_ENABLED); + } /* sync mm's RSS info before statistics gathering */ if (tsk->mm) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 6344d82a84f6..d6989f85c641 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2968,8 +2968,10 @@ static inline void schedule_debug(struct task_struct *prev) BUG_ON(unlikely(task_stack_end_corrupted(prev))); #endif - if (unlikely(in_atomic_preempt_off())) + if (unlikely(in_atomic_preempt_off())) { __schedule_bug(prev); + preempt_count_set(PREEMPT_DISABLED); + } rcu_sleep_check(); profile_hit(SCHED_PROFILING, __builtin_return_address(0));