forked from luck/tmp_suning_uos_patched
locking/lockdep: Untangle xhlock history save/restore from task independence
Where XHLOCK_{SOFT,HARD} are save/restore points in the xhlocks[] to ensure the temporal IRQ events don't interact with task state, the XHLOCK_PROC is a fundament different beast that just happens to share the interface. The purpose of XHLOCK_PROC is to annotate independent execution inside one task. For example workqueues, each work should appear to run in its own 'pristine' 'task'. Remove XHLOCK_PROC in favour of its own interface to avoid confusion. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Byungchul Park <byungchul.park@lge.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: boqun.feng@gmail.com Cc: david@fromorbit.com Cc: johannes@sipsolutions.net Cc: kernel-team@lge.com Cc: oleg@redhat.com Cc: tj@kernel.org Link: http://lkml.kernel.org/r/20170829085939.ggmb6xiohw67micb@hirez.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
7b3d61cc73
commit
f52be57080
|
@ -26,7 +26,7 @@
|
|||
# define trace_hardirq_enter() \
|
||||
do { \
|
||||
current->hardirq_context++; \
|
||||
crossrelease_hist_start(XHLOCK_HARD, 0);\
|
||||
crossrelease_hist_start(XHLOCK_HARD); \
|
||||
} while (0)
|
||||
# define trace_hardirq_exit() \
|
||||
do { \
|
||||
|
@ -36,7 +36,7 @@ do { \
|
|||
# define lockdep_softirq_enter() \
|
||||
do { \
|
||||
current->softirq_context++; \
|
||||
crossrelease_hist_start(XHLOCK_SOFT, 0);\
|
||||
crossrelease_hist_start(XHLOCK_SOFT); \
|
||||
} while (0)
|
||||
# define lockdep_softirq_exit() \
|
||||
do { \
|
||||
|
|
|
@ -551,7 +551,6 @@ struct pin_cookie { };
|
|||
enum xhlock_context_t {
|
||||
XHLOCK_HARD,
|
||||
XHLOCK_SOFT,
|
||||
XHLOCK_PROC,
|
||||
XHLOCK_CTX_NR,
|
||||
};
|
||||
|
||||
|
@ -580,8 +579,9 @@ extern void lock_commit_crosslock(struct lockdep_map *lock);
|
|||
#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
|
||||
{ .name = (_name), .key = (void *)(_key), .cross = 0, }
|
||||
|
||||
extern void crossrelease_hist_start(enum xhlock_context_t c, bool force);
|
||||
extern void crossrelease_hist_start(enum xhlock_context_t c);
|
||||
extern void crossrelease_hist_end(enum xhlock_context_t c);
|
||||
extern void lockdep_invariant_state(bool force);
|
||||
extern void lockdep_init_task(struct task_struct *task);
|
||||
extern void lockdep_free_task(struct task_struct *task);
|
||||
#else /* !CROSSRELEASE */
|
||||
|
@ -593,8 +593,9 @@ extern void lockdep_free_task(struct task_struct *task);
|
|||
#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
|
||||
{ .name = (_name), .key = (void *)(_key), }
|
||||
|
||||
static inline void crossrelease_hist_start(enum xhlock_context_t c, bool force) {}
|
||||
static inline void crossrelease_hist_start(enum xhlock_context_t c) {}
|
||||
static inline void crossrelease_hist_end(enum xhlock_context_t c) {}
|
||||
static inline void lockdep_invariant_state(bool force) {}
|
||||
static inline void lockdep_init_task(struct task_struct *task) {}
|
||||
static inline void lockdep_free_task(struct task_struct *task) {}
|
||||
#endif /* CROSSRELEASE */
|
||||
|
|
|
@ -4623,13 +4623,8 @@ asmlinkage __visible void lockdep_sys_exit(void)
|
|||
/*
|
||||
* The lock history for each syscall should be independent. So wipe the
|
||||
* slate clean on return to userspace.
|
||||
*
|
||||
* crossrelease_hist_end() works well here even when getting here
|
||||
* without starting (i.e. just after forking), because it rolls back
|
||||
* the index to point to the last entry, which is already invalid.
|
||||
*/
|
||||
crossrelease_hist_end(XHLOCK_PROC);
|
||||
crossrelease_hist_start(XHLOCK_PROC, false);
|
||||
lockdep_invariant_state(false);
|
||||
}
|
||||
|
||||
void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
|
||||
|
@ -4723,19 +4718,47 @@ static inline void invalidate_xhlock(struct hist_lock *xhlock)
|
|||
}
|
||||
|
||||
/*
|
||||
* Lock history stacks; we have 3 nested lock history stacks:
|
||||
* Lock history stacks; we have 2 nested lock history stacks:
|
||||
*
|
||||
* HARD(IRQ)
|
||||
* SOFT(IRQ)
|
||||
* PROC(ess)
|
||||
*
|
||||
* The thing is that once we complete a HARD/SOFT IRQ the future task locks
|
||||
* should not depend on any of the locks observed while running the IRQ. So
|
||||
* what we do is rewind the history buffer and erase all our knowledge of that
|
||||
* temporal event.
|
||||
*
|
||||
* The PROCess one is special though; it is used to annotate independence
|
||||
* inside a task.
|
||||
*/
|
||||
|
||||
void crossrelease_hist_start(enum xhlock_context_t c)
|
||||
{
|
||||
struct task_struct *cur = current;
|
||||
|
||||
if (!cur->xhlocks)
|
||||
return;
|
||||
|
||||
cur->xhlock_idx_hist[c] = cur->xhlock_idx;
|
||||
cur->hist_id_save[c] = cur->hist_id;
|
||||
}
|
||||
|
||||
void crossrelease_hist_end(enum xhlock_context_t c)
|
||||
{
|
||||
struct task_struct *cur = current;
|
||||
|
||||
if (cur->xhlocks) {
|
||||
unsigned int idx = cur->xhlock_idx_hist[c];
|
||||
struct hist_lock *h = &xhlock(idx);
|
||||
|
||||
cur->xhlock_idx = idx;
|
||||
|
||||
/* Check if the ring was overwritten. */
|
||||
if (h->hist_id != cur->hist_id_save[c])
|
||||
invalidate_xhlock(h);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* lockdep_invariant_state() is used to annotate independence inside a task, to
|
||||
* make one task look like multiple independent 'tasks'.
|
||||
*
|
||||
* Take for instance workqueues; each work is independent of the last. The
|
||||
* completion of a future work does not depend on the completion of a past work
|
||||
|
@ -4758,40 +4781,14 @@ static inline void invalidate_xhlock(struct hist_lock *xhlock)
|
|||
* entry. Similarly, independence per-definition means it does not depend on
|
||||
* prior state.
|
||||
*/
|
||||
void crossrelease_hist_start(enum xhlock_context_t c, bool force)
|
||||
void lockdep_invariant_state(bool force)
|
||||
{
|
||||
struct task_struct *cur = current;
|
||||
|
||||
if (!cur->xhlocks)
|
||||
return;
|
||||
|
||||
/*
|
||||
* We call this at an invariant point, no current state, no history.
|
||||
* Verify the former, enforce the latter.
|
||||
*/
|
||||
if (c == XHLOCK_PROC) {
|
||||
/* verified the former, ensure the latter */
|
||||
WARN_ON_ONCE(!force && cur->lockdep_depth);
|
||||
invalidate_xhlock(&xhlock(cur->xhlock_idx));
|
||||
}
|
||||
|
||||
cur->xhlock_idx_hist[c] = cur->xhlock_idx;
|
||||
cur->hist_id_save[c] = cur->hist_id;
|
||||
}
|
||||
|
||||
void crossrelease_hist_end(enum xhlock_context_t c)
|
||||
{
|
||||
struct task_struct *cur = current;
|
||||
|
||||
if (cur->xhlocks) {
|
||||
unsigned int idx = cur->xhlock_idx_hist[c];
|
||||
struct hist_lock *h = &xhlock(idx);
|
||||
|
||||
cur->xhlock_idx = idx;
|
||||
|
||||
/* Check if the ring was overwritten. */
|
||||
if (h->hist_id != cur->hist_id_save[c])
|
||||
invalidate_xhlock(h);
|
||||
}
|
||||
WARN_ON_ONCE(!force && current->lockdep_depth);
|
||||
invalidate_xhlock(&xhlock(current->xhlock_idx));
|
||||
}
|
||||
|
||||
static int cross_lock(struct lockdep_map *lock)
|
||||
|
|
|
@ -2094,8 +2094,8 @@ __acquires(&pool->lock)
|
|||
lock_map_acquire(&pwq->wq->lockdep_map);
|
||||
lock_map_acquire(&lockdep_map);
|
||||
/*
|
||||
* Strictly speaking we should do start(PROC) without holding any
|
||||
* locks, that is, before these two lock_map_acquire()'s.
|
||||
* Strictly speaking we should mark the invariant state without holding
|
||||
* any locks, that is, before these two lock_map_acquire()'s.
|
||||
*
|
||||
* However, that would result in:
|
||||
*
|
||||
|
@ -2107,14 +2107,14 @@ __acquires(&pool->lock)
|
|||
* Which would create W1->C->W1 dependencies, even though there is no
|
||||
* actual deadlock possible. There are two solutions, using a
|
||||
* read-recursive acquire on the work(queue) 'locks', but this will then
|
||||
* hit the lockdep limitation on recursive locks, or simly discard
|
||||
* hit the lockdep limitation on recursive locks, or simply discard
|
||||
* these locks.
|
||||
*
|
||||
* AFAICT there is no possible deadlock scenario between the
|
||||
* flush_work() and complete() primitives (except for single-threaded
|
||||
* workqueues), so hiding them isn't a problem.
|
||||
*/
|
||||
crossrelease_hist_start(XHLOCK_PROC, true);
|
||||
lockdep_invariant_state(true);
|
||||
trace_workqueue_execute_start(work);
|
||||
worker->current_func(work);
|
||||
/*
|
||||
|
@ -2122,7 +2122,6 @@ __acquires(&pool->lock)
|
|||
* point will only record its address.
|
||||
*/
|
||||
trace_workqueue_execute_end(work);
|
||||
crossrelease_hist_end(XHLOCK_PROC);
|
||||
lock_map_release(&lockdep_map);
|
||||
lock_map_release(&pwq->wq->lockdep_map);
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user