forked from luck/tmp_suning_uos_patched
lockdep: Add recursive read locks into dependency graph
Since we have all the fundamental to handle recursive read locks, we now add them into the dependency graph. Signed-off-by: Boqun Feng <boqun.feng@gmail.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20200807074238.1632519-13-boqun.feng@gmail.com
This commit is contained in:
parent
f08e388857
commit
621c9dac0e
|
@ -2808,16 +2808,6 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
|
|||
if (!check_irq_usage(curr, prev, next))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* For recursive read-locks we do all the dependency checks,
|
||||
* but we dont store read-triggered dependencies (only
|
||||
* write-triggered dependencies). This ensures that only the
|
||||
* write-side dependencies matter, and that if for example a
|
||||
* write-lock never takes any other locks, then the reads are
|
||||
* equivalent to a NOP.
|
||||
*/
|
||||
if (next->read == 2 || prev->read == 2)
|
||||
return 1;
|
||||
/*
|
||||
* Is the <prev> -> <next> dependency already present?
|
||||
*
|
||||
|
@ -2935,13 +2925,8 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
|
|||
u16 distance = curr->lockdep_depth - depth + 1;
|
||||
hlock = curr->held_locks + depth - 1;
|
||||
|
||||
/*
|
||||
* Only non-recursive-read entries get new dependencies
|
||||
* added:
|
||||
*/
|
||||
if (hlock->read != 2 && hlock->check) {
|
||||
int ret = check_prev_add(curr, hlock, next, distance,
|
||||
&trace);
|
||||
if (hlock->check) {
|
||||
int ret = check_prev_add(curr, hlock, next, distance, &trace);
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user