forked from luck/tmp_suning_uos_patched
delayacct: Use raw_spinlocks
try_to_wake_up() might invoke delayacct_blkio_end() while holding the pi_lock (which is a raw_spinlock_t). delayacct_blkio_end() acquires task_delay_info.lock which is a spinlock_t. This causes a might sleep splat on -RT where non raw spinlocks are converted to 'sleeping' spinlocks. task_delay_info.lock is only held for a short amount of time so it's not a problem latency wise to make convert it to a raw spinlock. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Balbir Singh <bsingharora@gmail.com> Link: https://lkml.kernel.org/r/20180423161024.6710-1-bigeasy@linutronix.de
This commit is contained in:
parent
3bea9adc96
commit
02acc80d19
|
@ -29,7 +29,7 @@
|
|||
|
||||
#ifdef CONFIG_TASK_DELAY_ACCT
|
||||
struct task_delay_info {
|
||||
spinlock_t lock;
|
||||
raw_spinlock_t lock;
|
||||
unsigned int flags; /* Private per-task flags */
|
||||
|
||||
/* For each stat XXX, add following, aligned appropriately
|
||||
|
|
|
@ -44,23 +44,24 @@ void __delayacct_tsk_init(struct task_struct *tsk)
|
|||
{
|
||||
tsk->delays = kmem_cache_zalloc(delayacct_cache, GFP_KERNEL);
|
||||
if (tsk->delays)
|
||||
spin_lock_init(&tsk->delays->lock);
|
||||
raw_spin_lock_init(&tsk->delays->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Finish delay accounting for a statistic using its timestamps (@start),
|
||||
* accumalator (@total) and @count
|
||||
*/
|
||||
static void delayacct_end(spinlock_t *lock, u64 *start, u64 *total, u32 *count)
|
||||
static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total,
|
||||
u32 *count)
|
||||
{
|
||||
s64 ns = ktime_get_ns() - *start;
|
||||
unsigned long flags;
|
||||
|
||||
if (ns > 0) {
|
||||
spin_lock_irqsave(lock, flags);
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
*total += ns;
|
||||
(*count)++;
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -127,7 +128,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
|
|||
|
||||
/* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */
|
||||
|
||||
spin_lock_irqsave(&tsk->delays->lock, flags);
|
||||
raw_spin_lock_irqsave(&tsk->delays->lock, flags);
|
||||
tmp = d->blkio_delay_total + tsk->delays->blkio_delay;
|
||||
d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp;
|
||||
tmp = d->swapin_delay_total + tsk->delays->swapin_delay;
|
||||
|
@ -137,7 +138,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
|
|||
d->blkio_count += tsk->delays->blkio_count;
|
||||
d->swapin_count += tsk->delays->swapin_count;
|
||||
d->freepages_count += tsk->delays->freepages_count;
|
||||
spin_unlock_irqrestore(&tsk->delays->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&tsk->delays->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -147,10 +148,10 @@ __u64 __delayacct_blkio_ticks(struct task_struct *tsk)
|
|||
__u64 ret;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&tsk->delays->lock, flags);
|
||||
raw_spin_lock_irqsave(&tsk->delays->lock, flags);
|
||||
ret = nsec_to_clock_t(tsk->delays->blkio_delay +
|
||||
tsk->delays->swapin_delay);
|
||||
spin_unlock_irqrestore(&tsk->delays->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&tsk->delays->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user