forked from luck/tmp_suning_uos_patched
sbitmap: re-initialize allocation hints after resize
After a struct sbitmap_queue is resized smaller, the allocation hints may still be set to bits beyond the new depth of the bitmap. This means that, for example, if the number of blk-mq tags is reduced through sysfs, more requests than the nominal queue depth may be in flight. It's tempting to fix this at resize time by doing a one-time reinitialization of the hints, but this can race with __sbitmap_queue_get() updating the hint. Instead, check the hint before we use it. This caused no measurable performance difference in my synthetic benchmarks. Signed-off-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
98d95416db
commit
05fd095d53
|
@ -246,10 +246,15 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
|
|||
|
||||
int __sbitmap_queue_get(struct sbitmap_queue *sbq)
|
||||
{
|
||||
unsigned int hint;
|
||||
unsigned int hint, depth;
|
||||
int nr;
|
||||
|
||||
hint = this_cpu_read(*sbq->alloc_hint);
|
||||
depth = READ_ONCE(sbq->sb.depth);
|
||||
if (unlikely(hint >= depth)) {
|
||||
hint = depth ? prandom_u32() % depth : 0;
|
||||
this_cpu_write(*sbq->alloc_hint, hint);
|
||||
}
|
||||
nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin);
|
||||
|
||||
if (nr == -1) {
|
||||
|
@ -258,7 +263,7 @@ int __sbitmap_queue_get(struct sbitmap_queue *sbq)
|
|||
} else if (nr == hint || unlikely(sbq->round_robin)) {
|
||||
/* Only update the hint if we used it. */
|
||||
hint = nr + 1;
|
||||
if (hint >= sbq->sb.depth - 1)
|
||||
if (hint >= depth - 1)
|
||||
hint = 0;
|
||||
this_cpu_write(*sbq->alloc_hint, hint);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user