forked from luck/tmp_suning_uos_patched
4eaca0a887
preempt_schedule_context() is a tracing safe preemption point but it's
only used when CONFIG_CONTEXT_TRACKING=y. Other configs have tracing
recursion issues since commit:
b30f0e3ffe
("sched/preempt: Optimize preemption operations on __schedule() callers")
introduced function based preemp_count_*() ops.
Lets make it available on all configs and give it a more appropriate
name for its new position.
Reported-by: Fengguang Wu <fengguang.wu@intel.com>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1433432349-1021-3-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
87 lines
1.8 KiB
C
87 lines
1.8 KiB
C
#ifndef __ASM_PREEMPT_H
|
|
#define __ASM_PREEMPT_H
|
|
|
|
#include <linux/thread_info.h>
|
|
|
|
#define PREEMPT_ENABLED (0)
|
|
|
|
static __always_inline int preempt_count(void)
|
|
{
|
|
return current_thread_info()->preempt_count;
|
|
}
|
|
|
|
static __always_inline int *preempt_count_ptr(void)
|
|
{
|
|
return ¤t_thread_info()->preempt_count;
|
|
}
|
|
|
|
static __always_inline void preempt_count_set(int pc)
|
|
{
|
|
*preempt_count_ptr() = pc;
|
|
}
|
|
|
|
/*
|
|
* must be macros to avoid header recursion hell
|
|
*/
|
|
#define init_task_preempt_count(p) do { \
|
|
task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
|
|
} while (0)
|
|
|
|
#define init_idle_preempt_count(p, cpu) do { \
|
|
task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
|
|
} while (0)
|
|
|
|
static __always_inline void set_preempt_need_resched(void)
|
|
{
|
|
}
|
|
|
|
static __always_inline void clear_preempt_need_resched(void)
|
|
{
|
|
}
|
|
|
|
static __always_inline bool test_preempt_need_resched(void)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
/*
|
|
* The various preempt_count add/sub methods
|
|
*/
|
|
|
|
static __always_inline void __preempt_count_add(int val)
|
|
{
|
|
*preempt_count_ptr() += val;
|
|
}
|
|
|
|
static __always_inline void __preempt_count_sub(int val)
|
|
{
|
|
*preempt_count_ptr() -= val;
|
|
}
|
|
|
|
static __always_inline bool __preempt_count_dec_and_test(void)
|
|
{
|
|
/*
|
|
* Because of load-store architectures cannot do per-cpu atomic
|
|
* operations; we cannot use PREEMPT_NEED_RESCHED because it might get
|
|
* lost.
|
|
*/
|
|
return !--*preempt_count_ptr() && tif_need_resched();
|
|
}
|
|
|
|
/*
|
|
* Returns true when we need to resched and can (barring IRQ state).
|
|
*/
|
|
static __always_inline bool should_resched(void)
|
|
{
|
|
return unlikely(!preempt_count() && tif_need_resched());
|
|
}
|
|
|
|
#ifdef CONFIG_PREEMPT
|
|
extern asmlinkage void preempt_schedule(void);
|
|
#define __preempt_schedule() preempt_schedule()
|
|
extern asmlinkage void preempt_schedule_notrace(void);
|
|
#define __preempt_schedule_notrace() preempt_schedule_notrace()
|
|
#endif /* CONFIG_PREEMPT */
|
|
|
|
#endif /* __ASM_PREEMPT_H */
|