forked from luck/tmp_suning_uos_patched
block: move sysfs_lock into elevator_init
Both callers take just around so function call, so move it in. Also remove the now pointless blk_mq_sched_init wrapper. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Damien Le Moal <damien.lemoal@wdc.com> Tested-by: Damien Le Moal <damien.lemoal@wdc.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
ddb7253254
commit
acddf3b308
|
@ -1175,16 +1175,8 @@ int blk_init_allocated_queue(struct request_queue *q)
|
|||
|
||||
q->sg_reserved_size = INT_MAX;
|
||||
|
||||
/* Protect q->elevator from elevator_change */
|
||||
mutex_lock(&q->sysfs_lock);
|
||||
|
||||
/* init elevator */
|
||||
if (elevator_init(q)) {
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
if (elevator_init(q))
|
||||
goto out_exit_flush_rq;
|
||||
}
|
||||
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
return 0;
|
||||
|
||||
out_exit_flush_rq:
|
||||
|
|
|
@ -647,14 +647,3 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
|
|||
blk_mq_sched_tags_teardown(q);
|
||||
q->elevator = NULL;
|
||||
}
|
||||
|
||||
int blk_mq_sched_init(struct request_queue *q)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&q->sysfs_lock);
|
||||
ret = elevator_init(q);
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -33,8 +33,6 @@ int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
|
|||
void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
|
||||
unsigned int hctx_idx);
|
||||
|
||||
int blk_mq_sched_init(struct request_queue *q);
|
||||
|
||||
static inline bool
|
||||
blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
|
|
|
@ -2573,7 +2573,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
|
|||
if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
|
||||
int ret;
|
||||
|
||||
ret = blk_mq_sched_init(q);
|
||||
ret = elevator_init(q);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
|
|
@ -202,16 +202,15 @@ static void elevator_release(struct kobject *kobj)
|
|||
int elevator_init(struct request_queue *q)
|
||||
{
|
||||
struct elevator_type *e = NULL;
|
||||
int err;
|
||||
int err = 0;
|
||||
|
||||
/*
|
||||
* q->sysfs_lock must be held to provide mutual exclusion between
|
||||
* elevator_switch() and here.
|
||||
*/
|
||||
lockdep_assert_held(&q->sysfs_lock);
|
||||
|
||||
mutex_lock(&q->sysfs_lock);
|
||||
if (unlikely(q->elevator))
|
||||
return 0;
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* Use the default elevator specified by config boot param for
|
||||
|
@ -237,7 +236,7 @@ int elevator_init(struct request_queue *q)
|
|||
if (q->nr_hw_queues == 1)
|
||||
e = elevator_get(q, "mq-deadline", false);
|
||||
if (!e)
|
||||
return 0;
|
||||
goto out_unlock;
|
||||
} else
|
||||
e = elevator_get(q, CONFIG_DEFAULT_IOSCHED, false);
|
||||
|
||||
|
@ -255,6 +254,8 @@ int elevator_init(struct request_queue *q)
|
|||
err = e->ops.sq.elevator_init_fn(q, e);
|
||||
if (err)
|
||||
elevator_put(e);
|
||||
out_unlock:
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user