forked from luck/tmp_suning_uos_patched
seqlock: lockdep assert non-preemptibility on seqcount_t write
Preemption must be disabled before entering a sequence count write side critical section. Failing to do so, the seqcount read side can preempt the write side section and spin for the entire scheduler tick. If that reader belongs to a real-time scheduling class, it can spin forever and the kernel will livelock. Assert through lockdep that preemption is disabled for seqcount writers. Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20200720155530.1173732-9-a.darwish@linutronix.de
This commit is contained in:
parent
8fd8ad5c5d
commit
859247d39f
|
@ -266,6 +266,12 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
|
|||
kcsan_nestable_atomic_end();
|
||||
}
|
||||
|
||||
static inline void __write_seqcount_begin_nested(seqcount_t *s, int subclass)
|
||||
{
|
||||
raw_write_seqcount_begin(s);
|
||||
seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
|
||||
}
|
||||
|
||||
/**
|
||||
* write_seqcount_begin_nested() - start a seqcount_t write section with
|
||||
* custom lockdep nesting level
|
||||
|
@ -276,8 +282,19 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
|
|||
*/
|
||||
static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
|
||||
{
|
||||
raw_write_seqcount_begin(s);
|
||||
seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
|
||||
lockdep_assert_preemption_disabled();
|
||||
__write_seqcount_begin_nested(s, subclass);
|
||||
}
|
||||
|
||||
/*
|
||||
* A write_seqcount_begin() variant w/o lockdep non-preemptibility checks.
|
||||
*
|
||||
* Use for internal seqlock.h code where it's known that preemption is
|
||||
* already disabled. For example, seqlock_t write side functions.
|
||||
*/
|
||||
static inline void __write_seqcount_begin(seqcount_t *s)
|
||||
{
|
||||
__write_seqcount_begin_nested(s, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -575,7 +592,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
|
|||
static inline void write_seqlock(seqlock_t *sl)
|
||||
{
|
||||
spin_lock(&sl->lock);
|
||||
write_seqcount_begin(&sl->seqcount);
|
||||
__write_seqcount_begin(&sl->seqcount);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -601,7 +618,7 @@ static inline void write_sequnlock(seqlock_t *sl)
|
|||
static inline void write_seqlock_bh(seqlock_t *sl)
|
||||
{
|
||||
spin_lock_bh(&sl->lock);
|
||||
write_seqcount_begin(&sl->seqcount);
|
||||
__write_seqcount_begin(&sl->seqcount);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -628,7 +645,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl)
|
|||
static inline void write_seqlock_irq(seqlock_t *sl)
|
||||
{
|
||||
spin_lock_irq(&sl->lock);
|
||||
write_seqcount_begin(&sl->seqcount);
|
||||
__write_seqcount_begin(&sl->seqcount);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -649,7 +666,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
|
|||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&sl->lock, flags);
|
||||
write_seqcount_begin(&sl->seqcount);
|
||||
__write_seqcount_begin(&sl->seqcount);
|
||||
return flags;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user