lockdep: Reduce the size of lock_list::distance

lock_list::distance is always not greater than MAX_LOCK_DEPTH (which
is 48 right now), so a u16 will fit. This patch reduces the size of
lock_list::distance to save space, so that we can introduce other fields
to help detect recursive read lock deadlocks without increasing the size
of lock_list structure.

Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20200807074238.1632519-6-boqun.feng@gmail.com
This commit is contained in:
Boqun Feng 2020-08-07 15:42:24 +08:00 committed by Peter Zijlstra
parent d563bc6ead
commit bd76eca10d
2 changed files with 4 additions and 4 deletions

View File

@ -54,7 +54,7 @@ struct lock_list {
struct lock_class *class;
struct lock_class *links_to;
const struct lock_trace *trace;
int distance;
u16 distance;
/*
* The parent field is used to implement breadth-first search, and the

View File

@ -1320,7 +1320,7 @@ static struct lock_list *alloc_list_entry(void)
*/
static int add_lock_to_list(struct lock_class *this,
struct lock_class *links_to, struct list_head *head,
unsigned long ip, int distance,
unsigned long ip, u16 distance,
const struct lock_trace *trace)
{
struct lock_list *entry;
@ -2489,7 +2489,7 @@ check_deadlock(struct task_struct *curr, struct held_lock *next)
*/
static int
check_prev_add(struct task_struct *curr, struct held_lock *prev,
struct held_lock *next, int distance,
struct held_lock *next, u16 distance,
struct lock_trace **const trace)
{
struct lock_list *entry;
@ -2622,7 +2622,7 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
goto out_bug;
for (;;) {
int distance = curr->lockdep_depth - depth + 1;
u16 distance = curr->lockdep_depth - depth + 1;
hlock = curr->held_locks + depth - 1;
/*