forked from luck/tmp_suning_uos_patched
lockdep: Revert "lockdep: Use raw_cpu_*() for per-cpu variables"
The thinking in commit:fddf9055a6
("lockdep: Use raw_cpu_*() for per-cpu variables") is flawed. While it is true that when we're migratable both CPUs will have a 0 value, it doesn't hold that when we do get migrated in the middle of a raw_cpu_op(), the old CPU will still have 0 by the time we get around to reading it on the new CPU. Luckily, the reason for that commit (s390 using preempt_disable() instead of preempt_disable_notrace() in their percpu code), has since been fixed by commit:1196f12a2c
("s390: don't trace preemption in percpu macros") An audit of arch/*/include/asm/percpu*.h shows there are no other architectures affected by this particular issue. Fixes:fddf9055a6
("lockdep: Use raw_cpu_*() for per-cpu variables") Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lkml.kernel.org/r/20201005095958.GJ2651@hirez.programming.kicks-ass.net
This commit is contained in:
parent
4d004099a6
commit
baffd723e4
|
@ -536,29 +536,21 @@ DECLARE_PER_CPU(int, hardirqs_enabled);
|
|||
DECLARE_PER_CPU(int, hardirq_context);
|
||||
DECLARE_PER_CPU(unsigned int, lockdep_recursion);
|
||||
|
||||
/*
|
||||
* The below lockdep_assert_*() macros use raw_cpu_read() to access the above
|
||||
* per-cpu variables. This is required because this_cpu_read() will potentially
|
||||
* call into preempt/irq-disable and that obviously isn't right. This is also
|
||||
* correct because when IRQs are enabled, it doesn't matter if we accidentally
|
||||
* read the value from our previous CPU.
|
||||
*/
|
||||
|
||||
#define __lockdep_enabled (debug_locks && !raw_cpu_read(lockdep_recursion))
|
||||
#define __lockdep_enabled (debug_locks && !this_cpu_read(lockdep_recursion))
|
||||
|
||||
#define lockdep_assert_irqs_enabled() \
|
||||
do { \
|
||||
WARN_ON_ONCE(__lockdep_enabled && !raw_cpu_read(hardirqs_enabled)); \
|
||||
WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \
|
||||
} while (0)
|
||||
|
||||
#define lockdep_assert_irqs_disabled() \
|
||||
do { \
|
||||
WARN_ON_ONCE(__lockdep_enabled && raw_cpu_read(hardirqs_enabled)); \
|
||||
WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \
|
||||
} while (0)
|
||||
|
||||
#define lockdep_assert_in_irq() \
|
||||
do { \
|
||||
WARN_ON_ONCE(__lockdep_enabled && !raw_cpu_read(hardirq_context)); \
|
||||
WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \
|
||||
} while (0)
|
||||
|
||||
#define lockdep_assert_preemption_enabled() \
|
||||
|
@ -566,7 +558,7 @@ do { \
|
|||
WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
|
||||
__lockdep_enabled && \
|
||||
(preempt_count() != 0 || \
|
||||
!raw_cpu_read(hardirqs_enabled))); \
|
||||
!this_cpu_read(hardirqs_enabled))); \
|
||||
} while (0)
|
||||
|
||||
#define lockdep_assert_preemption_disabled() \
|
||||
|
@ -574,7 +566,7 @@ do { \
|
|||
WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
|
||||
__lockdep_enabled && \
|
||||
(preempt_count() == 0 && \
|
||||
raw_cpu_read(hardirqs_enabled))); \
|
||||
this_cpu_read(hardirqs_enabled))); \
|
||||
} while (0)
|
||||
|
||||
#else
|
||||
|
|
Loading…
Reference in New Issue
Block a user