forked from luck/tmp_suning_uos_patched
A set of fixes for lockdep, tracing and RCU:
- Prevent recursion by using raw_cpu_* operations - Fixup the interrupt state in the cpu idle code to be consistent - Push rcu_idle_enter/exit() invocations deeper into the idle path so that the lock operations are inside the RCU watching sections - Move trace_cpu_idle() into generic code so it's called before RCU goes idle. - Handle raw_local_irq* vs. local_irq* operations correctly - Move the tracepoints out from under the lockdep recursion handling which turned out to be fragile and inconsistent. -----BEGIN PGP SIGNATURE----- iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAl9L5qETHHRnbHhAbGlu dXRyb25peC5kZQAKCRCmGPVMDXSYoV/NEADG+h02tj2I4gP7IQ3nVodEzS1+odPI orabY5ggH0kn4YIhPB4UtOd5zKZjr3FJs9wEhyhQpV6ZhvFfgaIKiYqfg+Q81aMO /BXrfh6jBD2Hu7gaPBnVdkKeh1ehl+w0PhTeJhPBHEEvbGeLUYWwyPNlaKz//VQl XCWl7e7o/Uw2UyJ469SCx3z+M2DMNqwdMys/zcqvTLiBdLNCwp4TW5ACzEA0rfHh Pepu3eIKnMURyt82QanrOATvT2io9pOOaUh59zeKi2WM8ikwKd/Eho2kXYng6GvM GzX4Kn13MsNobZXf9BhqEGICdRkaJqLsXlmBNmbJdSTCn5W2lLZqu2wCEp5VZHCc XwMbey8ek+BRskJMqAV4oq2GA8Om9KEYWOOdixyOG0UJCiW5qDowuDYBXTLV7FWj XhzLGuHpUF9eKLKokJ7ideLaDcpzwYjHr58pFLQrqPwmjVKWguLeYMg5BhhTiEuV wNfiLIGdMNsCpYKhnce3o9paV8+hy1ZveWhNy+/4HaDLoEwI2T62i8R7xxbrcWMg sgdAiQG+kVLwSJ13bN+Cz79uLYTIbqGaZHtOXmeIT3jSxBjx5RlXfzocwTHSYrNk GuLYHd7+QaemN49Rrf4bPR16Db7ifL32QkUtLBTBLcnos9jM+fcl+BWyqYRxhgDv xzDS+vfK8DvRiA== =Hgt6 -----END PGP SIGNATURE----- Merge tag 'locking-urgent-2020-08-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull locking fixes from Thomas Gleixner: "A set of fixes for lockdep, tracing and RCU: - Prevent recursion by using raw_cpu_* operations - Fixup the interrupt state in the cpu idle code to be consistent - Push rcu_idle_enter/exit() invocations deeper into the idle path so that the lock operations are inside the RCU watching sections - Move trace_cpu_idle() into generic code so it's called before RCU goes idle. - Handle raw_local_irq* vs. local_irq* operations correctly - Move the tracepoints out from under the lockdep recursion handling which turned out to be fragile and inconsistent" * tag 'locking-urgent-2020-08-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: lockdep,trace: Expose tracepoints lockdep: Only trace IRQ edges mips: Implement arch_irqs_disabled() arm64: Implement arch_irqs_disabled() nds32: Implement arch_irqs_disabled() locking/lockdep: Cleanup x86/entry: Remove unused THUNKs cpuidle: Move trace_cpu_idle() into generic code cpuidle: Make CPUIDLE_FLAG_TLB_FLUSHED generic sched,idle,rcu: Push rcu_idle deeper into the idle path cpuidle: Fixup IRQ state lockdep: Use raw_cpu_*() for per-cpu variables
This commit is contained in:
commit
b69bea8a65
|
@ -298,11 +298,7 @@ static void omap3_pm_idle(void)
|
|||
if (omap_irq_pending())
|
||||
return;
|
||||
|
||||
trace_cpu_idle_rcuidle(1, smp_processor_id());
|
||||
|
||||
omap_sram_idle();
|
||||
|
||||
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SUSPEND
|
||||
|
|
|
@ -95,6 +95,11 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
|
|||
return res;
|
||||
}
|
||||
|
||||
static inline int arch_irqs_disabled(void)
|
||||
{
|
||||
return arch_irqs_disabled_flags(arch_local_save_flags());
|
||||
}
|
||||
|
||||
static inline unsigned long arch_local_irq_save(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
|
|
@ -123,10 +123,8 @@ void arch_cpu_idle(void)
|
|||
* This should do all the clock switching and wait for interrupt
|
||||
* tricks
|
||||
*/
|
||||
trace_cpu_idle_rcuidle(1, smp_processor_id());
|
||||
cpu_do_idle();
|
||||
local_irq_enable();
|
||||
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
|
|
|
@ -137,6 +137,11 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
|
|||
return !(flags & 1);
|
||||
}
|
||||
|
||||
static inline int arch_irqs_disabled(void)
|
||||
{
|
||||
return arch_irqs_disabled_flags(arch_local_save_flags());
|
||||
}
|
||||
|
||||
#endif /* #ifndef __ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
|
|
|
@ -34,3 +34,8 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
|
|||
{
|
||||
return !flags;
|
||||
}
|
||||
|
||||
static inline int arch_irqs_disabled(void)
|
||||
{
|
||||
return arch_irqs_disabled_flags(arch_local_save_flags());
|
||||
}
|
||||
|
|
|
@ -200,17 +200,14 @@ static inline bool arch_irqs_disabled(void)
|
|||
#define powerpc_local_irq_pmu_save(flags) \
|
||||
do { \
|
||||
raw_local_irq_pmu_save(flags); \
|
||||
trace_hardirqs_off(); \
|
||||
if (!raw_irqs_disabled_flags(flags)) \
|
||||
trace_hardirqs_off(); \
|
||||
} while(0)
|
||||
#define powerpc_local_irq_pmu_restore(flags) \
|
||||
do { \
|
||||
if (raw_irqs_disabled_flags(flags)) { \
|
||||
raw_local_irq_pmu_restore(flags); \
|
||||
trace_hardirqs_off(); \
|
||||
} else { \
|
||||
if (!raw_irqs_disabled_flags(flags)) \
|
||||
trace_hardirqs_on(); \
|
||||
raw_local_irq_pmu_restore(flags); \
|
||||
} \
|
||||
raw_local_irq_pmu_restore(flags); \
|
||||
} while(0)
|
||||
#else
|
||||
#define powerpc_local_irq_pmu_save(flags) \
|
||||
|
|
|
@ -33,14 +33,13 @@ void enabled_wait(void)
|
|||
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
|
||||
clear_cpu_flag(CIF_NOHZ_DELAY);
|
||||
|
||||
trace_cpu_idle_rcuidle(1, smp_processor_id());
|
||||
local_irq_save(flags);
|
||||
/* Call the assembler magic in entry.S */
|
||||
psw_idle(idle, psw_mask);
|
||||
local_irq_restore(flags);
|
||||
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
|
||||
|
||||
/* Account time spent with enabled wait psw loaded as idle time. */
|
||||
/* XXX seqcount has tracepoints that require RCU */
|
||||
write_seqcount_begin(&idle->seqcount);
|
||||
idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
|
||||
idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
|
||||
|
|
|
@ -29,11 +29,6 @@ SYM_CODE_START_NOALIGN(\name)
|
|||
SYM_CODE_END(\name)
|
||||
.endm
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
THUNK trace_hardirqs_on_thunk,trace_hardirqs_on_caller,1
|
||||
THUNK trace_hardirqs_off_thunk,trace_hardirqs_off_caller,1
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
THUNK preempt_schedule_thunk, preempt_schedule
|
||||
THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace
|
||||
|
|
|
@ -59,5 +59,6 @@ typedef struct {
|
|||
}
|
||||
|
||||
void leave_mm(int cpu);
|
||||
#define leave_mm leave_mm
|
||||
|
||||
#endif /* _ASM_X86_MMU_H */
|
||||
|
|
|
@ -684,9 +684,7 @@ void arch_cpu_idle(void)
|
|||
*/
|
||||
void __cpuidle default_idle(void)
|
||||
{
|
||||
trace_cpu_idle_rcuidle(1, smp_processor_id());
|
||||
safe_halt();
|
||||
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
|
||||
}
|
||||
#if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
|
||||
EXPORT_SYMBOL(default_idle);
|
||||
|
@ -792,7 +790,6 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
|
|||
static __cpuidle void mwait_idle(void)
|
||||
{
|
||||
if (!current_set_polling_and_test()) {
|
||||
trace_cpu_idle_rcuidle(1, smp_processor_id());
|
||||
if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
|
||||
mb(); /* quirk */
|
||||
clflush((void *)¤t_thread_info()->flags);
|
||||
|
@ -804,7 +801,6 @@ static __cpuidle void mwait_idle(void)
|
|||
__sti_mwait(0, 0);
|
||||
else
|
||||
local_irq_enable();
|
||||
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
|
||||
} else {
|
||||
local_irq_enable();
|
||||
}
|
||||
|
|
|
@ -555,21 +555,12 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
|||
this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
|
||||
load_new_mm_cr3(next->pgd, new_asid, true);
|
||||
|
||||
/*
|
||||
* NB: This gets called via leave_mm() in the idle path
|
||||
* where RCU functions differently. Tracing normally
|
||||
* uses RCU, so we need to use the _rcuidle variant.
|
||||
*
|
||||
* (There is no good reason for this. The idle code should
|
||||
* be rearranged to call this before rcu_idle_enter().)
|
||||
*/
|
||||
trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
|
||||
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
|
||||
} else {
|
||||
/* The new ASID is already up to date. */
|
||||
load_new_mm_cr3(next->pgd, new_asid, false);
|
||||
|
||||
/* See above wrt _rcuidle. */
|
||||
trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
|
||||
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0);
|
||||
}
|
||||
|
||||
/* Make sure we write CR3 before loaded_mm. */
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/tick.h>
|
||||
#include <linux/mmu_context.h>
|
||||
#include <trace/events/power.h>
|
||||
|
||||
#include "cpuidle.h"
|
||||
|
@ -145,21 +146,24 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv,
|
|||
* executing it contains RCU usage regarded as invalid in the idle
|
||||
* context, so tell RCU about that.
|
||||
*/
|
||||
RCU_NONIDLE(tick_freeze());
|
||||
tick_freeze();
|
||||
/*
|
||||
* The state used here cannot be a "coupled" one, because the "coupled"
|
||||
* cpuidle mechanism enables interrupts and doing that with timekeeping
|
||||
* suspended is generally unsafe.
|
||||
*/
|
||||
stop_critical_timings();
|
||||
rcu_idle_enter();
|
||||
drv->states[index].enter_s2idle(dev, drv, index);
|
||||
WARN_ON(!irqs_disabled());
|
||||
if (WARN_ON_ONCE(!irqs_disabled()))
|
||||
local_irq_disable();
|
||||
/*
|
||||
* timekeeping_resume() that will be called by tick_unfreeze() for the
|
||||
* first CPU executing it calls functions containing RCU read-side
|
||||
* critical sections, so tell RCU about that.
|
||||
*/
|
||||
RCU_NONIDLE(tick_unfreeze());
|
||||
rcu_idle_exit();
|
||||
tick_unfreeze();
|
||||
start_critical_timings();
|
||||
|
||||
time_end = ns_to_ktime(local_clock());
|
||||
|
@ -225,19 +229,24 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
|
|||
broadcast = false;
|
||||
}
|
||||
|
||||
if (target_state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
|
||||
leave_mm(dev->cpu);
|
||||
|
||||
/* Take note of the planned idle state. */
|
||||
sched_idle_set_state(target_state);
|
||||
|
||||
trace_cpu_idle_rcuidle(index, dev->cpu);
|
||||
trace_cpu_idle(index, dev->cpu);
|
||||
time_start = ns_to_ktime(local_clock());
|
||||
|
||||
stop_critical_timings();
|
||||
rcu_idle_enter();
|
||||
entered_state = target_state->enter(dev, drv, index);
|
||||
rcu_idle_exit();
|
||||
start_critical_timings();
|
||||
|
||||
sched_clock_idle_wakeup_event();
|
||||
time_end = ns_to_ktime(local_clock());
|
||||
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
|
||||
trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
|
||||
|
||||
/* The cpu is no longer idle or about to enter idle. */
|
||||
sched_idle_set_state(NULL);
|
||||
|
|
|
@ -89,14 +89,6 @@ static unsigned int mwait_substates __initdata;
|
|||
*/
|
||||
#define CPUIDLE_FLAG_ALWAYS_ENABLE BIT(15)
|
||||
|
||||
/*
|
||||
* Set this flag for states where the HW flushes the TLB for us
|
||||
* and so we don't need cross-calls to keep it consistent.
|
||||
* If this flag is set, SW flushes the TLB, so even if the
|
||||
* HW doesn't do the flushing, this flag is safe to use.
|
||||
*/
|
||||
#define CPUIDLE_FLAG_TLB_FLUSHED BIT(16)
|
||||
|
||||
/*
|
||||
* MWAIT takes an 8-bit "hint" in EAX "suggesting"
|
||||
* the C-state (top nibble) and sub-state (bottom nibble)
|
||||
|
@ -131,14 +123,6 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev,
|
|||
unsigned long eax = flg2MWAIT(state->flags);
|
||||
unsigned long ecx = 1; /* break on interrupt flag */
|
||||
bool tick;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
/*
|
||||
* leave_mm() to avoid costly and often unnecessary wakeups
|
||||
* for flushing the user TLB's associated with the active mm.
|
||||
*/
|
||||
if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
|
||||
leave_mm(cpu);
|
||||
|
||||
if (!static_cpu_has(X86_FEATURE_ARAT)) {
|
||||
/*
|
||||
|
|
|
@ -75,12 +75,13 @@ struct cpuidle_state {
|
|||
};
|
||||
|
||||
/* Idle State Flags */
|
||||
#define CPUIDLE_FLAG_NONE (0x00)
|
||||
#define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */
|
||||
#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */
|
||||
#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */
|
||||
#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */
|
||||
#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */
|
||||
#define CPUIDLE_FLAG_NONE (0x00)
|
||||
#define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */
|
||||
#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */
|
||||
#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */
|
||||
#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */
|
||||
#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */
|
||||
#define CPUIDLE_FLAG_TLB_FLUSHED BIT(5) /* idle-state flushes TLBs */
|
||||
|
||||
struct cpuidle_device_kobj;
|
||||
struct cpuidle_state_kobj;
|
||||
|
|
|
@ -49,17 +49,18 @@ struct irqtrace_events {
|
|||
DECLARE_PER_CPU(int, hardirqs_enabled);
|
||||
DECLARE_PER_CPU(int, hardirq_context);
|
||||
|
||||
extern void trace_hardirqs_on_prepare(void);
|
||||
extern void trace_hardirqs_off_finish(void);
|
||||
extern void trace_hardirqs_on(void);
|
||||
extern void trace_hardirqs_off(void);
|
||||
# define lockdep_hardirq_context() (this_cpu_read(hardirq_context))
|
||||
extern void trace_hardirqs_on_prepare(void);
|
||||
extern void trace_hardirqs_off_finish(void);
|
||||
extern void trace_hardirqs_on(void);
|
||||
extern void trace_hardirqs_off(void);
|
||||
|
||||
# define lockdep_hardirq_context() (raw_cpu_read(hardirq_context))
|
||||
# define lockdep_softirq_context(p) ((p)->softirq_context)
|
||||
# define lockdep_hardirqs_enabled() (this_cpu_read(hardirqs_enabled))
|
||||
# define lockdep_softirqs_enabled(p) ((p)->softirqs_enabled)
|
||||
# define lockdep_hardirq_enter() \
|
||||
do { \
|
||||
if (this_cpu_inc_return(hardirq_context) == 1) \
|
||||
if (__this_cpu_inc_return(hardirq_context) == 1)\
|
||||
current->hardirq_threaded = 0; \
|
||||
} while (0)
|
||||
# define lockdep_hardirq_threaded() \
|
||||
|
@ -68,7 +69,7 @@ do { \
|
|||
} while (0)
|
||||
# define lockdep_hardirq_exit() \
|
||||
do { \
|
||||
this_cpu_dec(hardirq_context); \
|
||||
__this_cpu_dec(hardirq_context); \
|
||||
} while (0)
|
||||
# define lockdep_softirq_enter() \
|
||||
do { \
|
||||
|
@ -120,17 +121,17 @@ do { \
|
|||
#else
|
||||
# define trace_hardirqs_on_prepare() do { } while (0)
|
||||
# define trace_hardirqs_off_finish() do { } while (0)
|
||||
# define trace_hardirqs_on() do { } while (0)
|
||||
# define trace_hardirqs_off() do { } while (0)
|
||||
# define lockdep_hardirq_context() 0
|
||||
# define lockdep_softirq_context(p) 0
|
||||
# define lockdep_hardirqs_enabled() 0
|
||||
# define lockdep_softirqs_enabled(p) 0
|
||||
# define lockdep_hardirq_enter() do { } while (0)
|
||||
# define lockdep_hardirq_threaded() do { } while (0)
|
||||
# define lockdep_hardirq_exit() do { } while (0)
|
||||
# define lockdep_softirq_enter() do { } while (0)
|
||||
# define lockdep_softirq_exit() do { } while (0)
|
||||
# define trace_hardirqs_on() do { } while (0)
|
||||
# define trace_hardirqs_off() do { } while (0)
|
||||
# define lockdep_hardirq_context() 0
|
||||
# define lockdep_softirq_context(p) 0
|
||||
# define lockdep_hardirqs_enabled() 0
|
||||
# define lockdep_softirqs_enabled(p) 0
|
||||
# define lockdep_hardirq_enter() do { } while (0)
|
||||
# define lockdep_hardirq_threaded() do { } while (0)
|
||||
# define lockdep_hardirq_exit() do { } while (0)
|
||||
# define lockdep_softirq_enter() do { } while (0)
|
||||
# define lockdep_softirq_exit() do { } while (0)
|
||||
# define lockdep_hrtimer_enter(__hrtimer) false
|
||||
# define lockdep_hrtimer_exit(__context) do { } while (0)
|
||||
# define lockdep_posixtimer_enter() do { } while (0)
|
||||
|
@ -181,26 +182,33 @@ do { \
|
|||
* if !TRACE_IRQFLAGS.
|
||||
*/
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
#define local_irq_enable() \
|
||||
do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0)
|
||||
#define local_irq_disable() \
|
||||
do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0)
|
||||
|
||||
#define local_irq_enable() \
|
||||
do { \
|
||||
trace_hardirqs_on(); \
|
||||
raw_local_irq_enable(); \
|
||||
} while (0)
|
||||
|
||||
#define local_irq_disable() \
|
||||
do { \
|
||||
bool was_disabled = raw_irqs_disabled();\
|
||||
raw_local_irq_disable(); \
|
||||
if (!was_disabled) \
|
||||
trace_hardirqs_off(); \
|
||||
} while (0)
|
||||
|
||||
#define local_irq_save(flags) \
|
||||
do { \
|
||||
raw_local_irq_save(flags); \
|
||||
trace_hardirqs_off(); \
|
||||
if (!raw_irqs_disabled_flags(flags)) \
|
||||
trace_hardirqs_off(); \
|
||||
} while (0)
|
||||
|
||||
|
||||
#define local_irq_restore(flags) \
|
||||
do { \
|
||||
if (raw_irqs_disabled_flags(flags)) { \
|
||||
raw_local_irq_restore(flags); \
|
||||
trace_hardirqs_off(); \
|
||||
} else { \
|
||||
if (!raw_irqs_disabled_flags(flags)) \
|
||||
trace_hardirqs_on(); \
|
||||
raw_local_irq_restore(flags); \
|
||||
} \
|
||||
raw_local_irq_restore(flags); \
|
||||
} while (0)
|
||||
|
||||
#define safe_halt() \
|
||||
|
@ -214,10 +222,7 @@ do { \
|
|||
|
||||
#define local_irq_enable() do { raw_local_irq_enable(); } while (0)
|
||||
#define local_irq_disable() do { raw_local_irq_disable(); } while (0)
|
||||
#define local_irq_save(flags) \
|
||||
do { \
|
||||
raw_local_irq_save(flags); \
|
||||
} while (0)
|
||||
#define local_irq_save(flags) do { raw_local_irq_save(flags); } while (0)
|
||||
#define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0)
|
||||
#define safe_halt() do { raw_safe_halt(); } while (0)
|
||||
|
||||
|
|
|
@ -535,19 +535,27 @@ do { \
|
|||
DECLARE_PER_CPU(int, hardirqs_enabled);
|
||||
DECLARE_PER_CPU(int, hardirq_context);
|
||||
|
||||
/*
|
||||
* The below lockdep_assert_*() macros use raw_cpu_read() to access the above
|
||||
* per-cpu variables. This is required because this_cpu_read() will potentially
|
||||
* call into preempt/irq-disable and that obviously isn't right. This is also
|
||||
* correct because when IRQs are enabled, it doesn't matter if we accidentally
|
||||
* read the value from our previous CPU.
|
||||
*/
|
||||
|
||||
#define lockdep_assert_irqs_enabled() \
|
||||
do { \
|
||||
WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirqs_enabled)); \
|
||||
WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirqs_enabled)); \
|
||||
} while (0)
|
||||
|
||||
#define lockdep_assert_irqs_disabled() \
|
||||
do { \
|
||||
WARN_ON_ONCE(debug_locks && this_cpu_read(hardirqs_enabled)); \
|
||||
WARN_ON_ONCE(debug_locks && raw_cpu_read(hardirqs_enabled)); \
|
||||
} while (0)
|
||||
|
||||
#define lockdep_assert_in_irq() \
|
||||
do { \
|
||||
WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirq_context)); \
|
||||
WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirq_context)); \
|
||||
} while (0)
|
||||
|
||||
#define lockdep_assert_preemption_enabled() \
|
||||
|
@ -555,7 +563,7 @@ do { \
|
|||
WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
|
||||
debug_locks && \
|
||||
(preempt_count() != 0 || \
|
||||
!this_cpu_read(hardirqs_enabled))); \
|
||||
!raw_cpu_read(hardirqs_enabled))); \
|
||||
} while (0)
|
||||
|
||||
#define lockdep_assert_preemption_disabled() \
|
||||
|
@ -563,7 +571,7 @@ do { \
|
|||
WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
|
||||
debug_locks && \
|
||||
(preempt_count() == 0 && \
|
||||
this_cpu_read(hardirqs_enabled))); \
|
||||
raw_cpu_read(hardirqs_enabled))); \
|
||||
} while (0)
|
||||
|
||||
#else
|
||||
|
|
|
@ -3,10 +3,15 @@
|
|||
#define _LINUX_MMU_CONTEXT_H
|
||||
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/mmu.h>
|
||||
|
||||
/* Architectures that care about IRQ state in switch_mm can override this. */
|
||||
#ifndef switch_mm_irqs_off
|
||||
# define switch_mm_irqs_off switch_mm
|
||||
#endif
|
||||
|
||||
#ifndef leave_mm
|
||||
static inline void leave_mm(int cpu) { }
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -3756,7 +3756,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip)
|
|||
|
||||
skip_checks:
|
||||
/* we'll do an OFF -> ON transition: */
|
||||
this_cpu_write(hardirqs_enabled, 1);
|
||||
__this_cpu_write(hardirqs_enabled, 1);
|
||||
trace->hardirq_enable_ip = ip;
|
||||
trace->hardirq_enable_event = ++trace->irq_events;
|
||||
debug_atomic_inc(hardirqs_on_events);
|
||||
|
@ -3795,7 +3795,7 @@ void noinstr lockdep_hardirqs_off(unsigned long ip)
|
|||
/*
|
||||
* We have done an ON -> OFF transition:
|
||||
*/
|
||||
this_cpu_write(hardirqs_enabled, 0);
|
||||
__this_cpu_write(hardirqs_enabled, 0);
|
||||
trace->hardirq_disable_ip = ip;
|
||||
trace->hardirq_disable_event = ++trace->irq_events;
|
||||
debug_atomic_inc(hardirqs_off_events);
|
||||
|
@ -4977,6 +4977,8 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
|||
{
|
||||
unsigned long flags;
|
||||
|
||||
trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
|
||||
|
||||
if (unlikely(current->lockdep_recursion)) {
|
||||
/* XXX allow trylock from NMI ?!? */
|
||||
if (lockdep_nmi() && !trylock) {
|
||||
|
@ -5001,7 +5003,6 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
|||
check_flags(flags);
|
||||
|
||||
current->lockdep_recursion++;
|
||||
trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
|
||||
__lock_acquire(lock, subclass, trylock, read, check,
|
||||
irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
|
||||
lockdep_recursion_finish();
|
||||
|
@ -5013,13 +5014,15 @@ void lock_release(struct lockdep_map *lock, unsigned long ip)
|
|||
{
|
||||
unsigned long flags;
|
||||
|
||||
trace_lock_release(lock, ip);
|
||||
|
||||
if (unlikely(current->lockdep_recursion))
|
||||
return;
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
check_flags(flags);
|
||||
|
||||
current->lockdep_recursion++;
|
||||
trace_lock_release(lock, ip);
|
||||
if (__lock_release(lock, ip))
|
||||
check_chain_key(current);
|
||||
lockdep_recursion_finish();
|
||||
|
@ -5205,8 +5208,6 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
|
|||
hlock->holdtime_stamp = now;
|
||||
}
|
||||
|
||||
trace_lock_acquired(lock, ip);
|
||||
|
||||
stats = get_lock_stats(hlock_class(hlock));
|
||||
if (waittime) {
|
||||
if (hlock->read)
|
||||
|
@ -5225,6 +5226,8 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
|
|||
{
|
||||
unsigned long flags;
|
||||
|
||||
trace_lock_acquired(lock, ip);
|
||||
|
||||
if (unlikely(!lock_stat || !debug_locks))
|
||||
return;
|
||||
|
||||
|
@ -5234,7 +5237,6 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
|
|||
raw_local_irq_save(flags);
|
||||
check_flags(flags);
|
||||
current->lockdep_recursion++;
|
||||
trace_lock_contended(lock, ip);
|
||||
__lock_contended(lock, ip);
|
||||
lockdep_recursion_finish();
|
||||
raw_local_irq_restore(flags);
|
||||
|
@ -5245,6 +5247,8 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip)
|
|||
{
|
||||
unsigned long flags;
|
||||
|
||||
trace_lock_contended(lock, ip);
|
||||
|
||||
if (unlikely(!lock_stat || !debug_locks))
|
||||
return;
|
||||
|
||||
|
|
|
@ -54,17 +54,18 @@ __setup("hlt", cpu_idle_nopoll_setup);
|
|||
|
||||
static noinline int __cpuidle cpu_idle_poll(void)
|
||||
{
|
||||
rcu_idle_enter();
|
||||
trace_cpu_idle_rcuidle(0, smp_processor_id());
|
||||
local_irq_enable();
|
||||
trace_cpu_idle(0, smp_processor_id());
|
||||
stop_critical_timings();
|
||||
rcu_idle_enter();
|
||||
local_irq_enable();
|
||||
|
||||
while (!tif_need_resched() &&
|
||||
(cpu_idle_force_poll || tick_check_broadcast_expired()))
|
||||
(cpu_idle_force_poll || tick_check_broadcast_expired()))
|
||||
cpu_relax();
|
||||
start_critical_timings();
|
||||
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
|
||||
|
||||
rcu_idle_exit();
|
||||
start_critical_timings();
|
||||
trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@ -90,9 +91,14 @@ void __cpuidle default_idle_call(void)
|
|||
if (current_clr_polling_and_test()) {
|
||||
local_irq_enable();
|
||||
} else {
|
||||
|
||||
trace_cpu_idle(1, smp_processor_id());
|
||||
stop_critical_timings();
|
||||
rcu_idle_enter();
|
||||
arch_cpu_idle();
|
||||
rcu_idle_exit();
|
||||
start_critical_timings();
|
||||
trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -158,7 +164,6 @@ static void cpuidle_idle_call(void)
|
|||
|
||||
if (cpuidle_not_available(drv, dev)) {
|
||||
tick_nohz_idle_stop_tick();
|
||||
rcu_idle_enter();
|
||||
|
||||
default_idle_call();
|
||||
goto exit_idle;
|
||||
|
@ -178,21 +183,17 @@ static void cpuidle_idle_call(void)
|
|||
u64 max_latency_ns;
|
||||
|
||||
if (idle_should_enter_s2idle()) {
|
||||
rcu_idle_enter();
|
||||
|
||||
entered_state = call_cpuidle_s2idle(drv, dev);
|
||||
if (entered_state > 0)
|
||||
goto exit_idle;
|
||||
|
||||
rcu_idle_exit();
|
||||
|
||||
max_latency_ns = U64_MAX;
|
||||
} else {
|
||||
max_latency_ns = dev->forced_idle_latency_limit_ns;
|
||||
}
|
||||
|
||||
tick_nohz_idle_stop_tick();
|
||||
rcu_idle_enter();
|
||||
|
||||
next_state = cpuidle_find_deepest_state(drv, dev, max_latency_ns);
|
||||
call_cpuidle(drv, dev, next_state);
|
||||
|
@ -209,8 +210,6 @@ static void cpuidle_idle_call(void)
|
|||
else
|
||||
tick_nohz_idle_retain_tick();
|
||||
|
||||
rcu_idle_enter();
|
||||
|
||||
entered_state = call_cpuidle(drv, dev, next_state);
|
||||
/*
|
||||
* Give the governor an opportunity to reflect on the outcome
|
||||
|
@ -226,8 +225,6 @@ static void cpuidle_idle_call(void)
|
|||
*/
|
||||
if (WARN_ON_ONCE(irqs_disabled()))
|
||||
local_irq_enable();
|
||||
|
||||
rcu_idle_exit();
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue
Block a user