percpu_counter: make APIs irq safe

In my usage, sometimes the percpu APIs are called with irq locked,
sometimes not. lockdep complains there is potential deadlock. Let's
always use percpucounter lock in irq safe way. There should be no
performance penality, as all those are slow code path.

Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Shaohua Li <shli@fusionio.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Shaohua Li 2013-10-24 09:06:45 +01:00 committed by Jens Axboe
parent 71fe07d040
commit 098faf5805

View File

@ -60,14 +60,15 @@ static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
void percpu_counter_set(struct percpu_counter *fbc, s64 amount) void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
{ {
int cpu; int cpu;
unsigned long flags;
raw_spin_lock(&fbc->lock); raw_spin_lock_irqsave(&fbc->lock, flags);
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu); s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
*pcount = 0; *pcount = 0;
} }
fbc->count = amount; fbc->count = amount;
raw_spin_unlock(&fbc->lock); raw_spin_unlock_irqrestore(&fbc->lock, flags);
} }
EXPORT_SYMBOL(percpu_counter_set); EXPORT_SYMBOL(percpu_counter_set);
@ -78,9 +79,10 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
preempt_disable(); preempt_disable();
count = __this_cpu_read(*fbc->counters) + amount; count = __this_cpu_read(*fbc->counters) + amount;
if (count >= batch || count <= -batch) { if (count >= batch || count <= -batch) {
raw_spin_lock(&fbc->lock); unsigned long flags;
raw_spin_lock_irqsave(&fbc->lock, flags);
fbc->count += count; fbc->count += count;
raw_spin_unlock(&fbc->lock); raw_spin_unlock_irqrestore(&fbc->lock, flags);
__this_cpu_write(*fbc->counters, 0); __this_cpu_write(*fbc->counters, 0);
} else { } else {
__this_cpu_write(*fbc->counters, count); __this_cpu_write(*fbc->counters, count);
@ -97,14 +99,15 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
{ {
s64 ret; s64 ret;
int cpu; int cpu;
unsigned long flags;
raw_spin_lock(&fbc->lock); raw_spin_lock_irqsave(&fbc->lock, flags);
ret = fbc->count; ret = fbc->count;
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu); s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount; ret += *pcount;
} }
raw_spin_unlock(&fbc->lock); raw_spin_unlock_irqrestore(&fbc->lock, flags);
return ret; return ret;
} }
EXPORT_SYMBOL(__percpu_counter_sum); EXPORT_SYMBOL(__percpu_counter_sum);