forked from luck/tmp_suning_uos_patched
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block fixes and updates from Jens Axboe: "Some fixes and followup features/changes that should go in, in this merge window. This contains: - Two fixes for lightnvm from Javier, fixing problems in the new code merge previously in this merge window. - A fix from Jan for the backing device changes, fixing an issue in NFS that causes a failure to mount on certain setups. - A change from Christoph, cleaning up the blk-mq init and exit request paths. - Remove elevator_change(), which is now unused. From Bart. - A fix for queue operation invocation on a dead queue, from Bart. - A series fixing up mtip32xx for blk-mq scheduling, removing a bandaid we previously had in place for this. From me. - A regression fix for this series, fixing a case where we wait on workqueue flushing from an invalid (non-blocking) context. From me. - A fix/optimization from Ming, ensuring that we don't both quiesce and freeze a queue at the same time. - A fix from Peter on lock ordering for CPU hotplug. Not a real problem right now, but will be once the CPU hotplug rework goes in. - A series from Omar, cleaning up out blk-mq debugfs support, and adding support for exporting info from schedulers in debugfs as well. This is really useful in debugging stalls or livelocks. From Omar" * 'for-linus' of git://git.kernel.dk/linux-block: (28 commits) mq-deadline: add debugfs attributes kyber: add debugfs attributes blk-mq-debugfs: allow schedulers to register debugfs attributes blk-mq: untangle debugfs and sysfs blk-mq: move debugfs declarations to a separate header file blk-mq: Do not invoke queue operations on a dead queue blk-mq-debugfs: get rid of a bunch of boilerplate blk-mq-debugfs: rename hw queue directories from <n> to hctx<n> blk-mq-debugfs: don't open code strstrip() blk-mq-debugfs: error on long write to queue "state" file blk-mq-debugfs: clean up flag definitions blk-mq-debugfs: separate flags with | nfs: Fix bdi handling for cloned superblocks block/mq: Cure cpu hotplug lock inversion lightnvm: fix bad back free on error path lightnvm: create cmd before allocating request blk-mq: don't use sync workqueue flushing from drivers mtip32xx: convert internal commands to regular block infrastructure mtip32xx: cleanup internal tag assumptions block: don't call blk_mq_quiesce_queue() after queue is frozen ...
This commit is contained in:
commit
044f1daaaa
|
@ -561,13 +561,9 @@ void blk_cleanup_queue(struct request_queue *q)
|
|||
* prevent that q->request_fn() gets invoked after draining finished.
|
||||
*/
|
||||
blk_freeze_queue(q);
|
||||
if (!q->mq_ops) {
|
||||
spin_lock_irq(lock);
|
||||
spin_lock_irq(lock);
|
||||
if (!q->mq_ops)
|
||||
__blk_drain_queue(q, true);
|
||||
} else {
|
||||
blk_mq_debugfs_unregister_mq(q);
|
||||
spin_lock_irq(lock);
|
||||
}
|
||||
queue_flag_set(QUEUE_FLAG_DEAD, q);
|
||||
spin_unlock_irq(lock);
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
82
block/blk-mq-debugfs.h
Normal file
82
block/blk-mq-debugfs.h
Normal file
|
@ -0,0 +1,82 @@
|
|||
#ifndef INT_BLK_MQ_DEBUGFS_H
|
||||
#define INT_BLK_MQ_DEBUGFS_H
|
||||
|
||||
#ifdef CONFIG_BLK_DEBUG_FS
|
||||
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
struct blk_mq_debugfs_attr {
|
||||
const char *name;
|
||||
umode_t mode;
|
||||
int (*show)(void *, struct seq_file *);
|
||||
ssize_t (*write)(void *, const char __user *, size_t, loff_t *);
|
||||
/* Set either .show or .seq_ops. */
|
||||
const struct seq_operations *seq_ops;
|
||||
};
|
||||
|
||||
int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq);
|
||||
int blk_mq_debugfs_rq_show(struct seq_file *m, void *v);
|
||||
|
||||
int blk_mq_debugfs_register(struct request_queue *q);
|
||||
void blk_mq_debugfs_unregister(struct request_queue *q);
|
||||
int blk_mq_debugfs_register_hctx(struct request_queue *q,
|
||||
struct blk_mq_hw_ctx *hctx);
|
||||
void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx);
|
||||
int blk_mq_debugfs_register_hctxs(struct request_queue *q);
|
||||
void blk_mq_debugfs_unregister_hctxs(struct request_queue *q);
|
||||
|
||||
int blk_mq_debugfs_register_sched(struct request_queue *q);
|
||||
void blk_mq_debugfs_unregister_sched(struct request_queue *q);
|
||||
int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
|
||||
struct blk_mq_hw_ctx *hctx);
|
||||
void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx);
|
||||
#else
|
||||
static inline int blk_mq_debugfs_register(struct request_queue *q)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void blk_mq_debugfs_unregister(struct request_queue *q)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int blk_mq_debugfs_register_hctx(struct request_queue *q,
|
||||
struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int blk_mq_debugfs_register_hctxs(struct request_queue *q)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int blk_mq_debugfs_register_sched(struct request_queue *q)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void blk_mq_debugfs_unregister_sched(struct request_queue *q)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
|
||||
struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -11,6 +11,7 @@
|
|||
|
||||
#include "blk.h"
|
||||
#include "blk-mq.h"
|
||||
#include "blk-mq-debugfs.h"
|
||||
#include "blk-mq-sched.h"
|
||||
#include "blk-mq-tag.h"
|
||||
#include "blk-wbt.h"
|
||||
|
@ -82,11 +83,7 @@ struct request *blk_mq_sched_get_request(struct request_queue *q,
|
|||
if (likely(!data->hctx))
|
||||
data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
|
||||
|
||||
/*
|
||||
* For a reserved tag, allocate a normal request since we might
|
||||
* have driver dependencies on the value of the internal tag.
|
||||
*/
|
||||
if (e && !(data->flags & BLK_MQ_REQ_RESERVED)) {
|
||||
if (e) {
|
||||
data->flags |= BLK_MQ_REQ_INTERNAL;
|
||||
|
||||
/*
|
||||
|
@ -476,6 +473,8 @@ int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
|
|||
}
|
||||
}
|
||||
|
||||
blk_mq_debugfs_register_sched_hctx(q, hctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -487,6 +486,8 @@ void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
|
|||
if (!e)
|
||||
return;
|
||||
|
||||
blk_mq_debugfs_unregister_sched_hctx(hctx);
|
||||
|
||||
if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
|
||||
e->type->ops.mq.exit_hctx(hctx, hctx_idx);
|
||||
hctx->sched_data = NULL;
|
||||
|
@ -523,8 +524,10 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
|
|||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (e->ops.mq.init_hctx) {
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
blk_mq_debugfs_register_sched(q);
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
if (e->ops.mq.init_hctx) {
|
||||
ret = e->ops.mq.init_hctx(hctx, i);
|
||||
if (ret) {
|
||||
eq = q->elevator;
|
||||
|
@ -533,6 +536,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
|
|||
return ret;
|
||||
}
|
||||
}
|
||||
blk_mq_debugfs_register_sched_hctx(q, hctx);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -548,14 +552,14 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
|
|||
struct blk_mq_hw_ctx *hctx;
|
||||
unsigned int i;
|
||||
|
||||
if (e->type->ops.mq.exit_hctx) {
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
if (hctx->sched_data) {
|
||||
e->type->ops.mq.exit_hctx(hctx, i);
|
||||
hctx->sched_data = NULL;
|
||||
}
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
blk_mq_debugfs_unregister_sched_hctx(hctx);
|
||||
if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
|
||||
e->type->ops.mq.exit_hctx(hctx, i);
|
||||
hctx->sched_data = NULL;
|
||||
}
|
||||
}
|
||||
blk_mq_debugfs_unregister_sched(q);
|
||||
if (e->type->ops.mq.exit_sched)
|
||||
e->type->ops.mq.exit_sched(e);
|
||||
blk_mq_sched_tags_teardown(q);
|
||||
|
|
|
@ -258,8 +258,6 @@ static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
|
|||
queue_for_each_hw_ctx(q, hctx, i)
|
||||
blk_mq_unregister_hctx(hctx);
|
||||
|
||||
blk_mq_debugfs_unregister_mq(q);
|
||||
|
||||
kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
|
||||
kobject_del(&q->mq_kobj);
|
||||
kobject_put(&dev->kobj);
|
||||
|
@ -318,8 +316,6 @@ int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
|
|||
|
||||
kobject_uevent(&q->mq_kobj, KOBJ_ADD);
|
||||
|
||||
blk_mq_debugfs_register(q);
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
ret = blk_mq_register_hctx(hctx);
|
||||
if (ret)
|
||||
|
@ -335,8 +331,6 @@ int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
|
|||
while (--i >= 0)
|
||||
blk_mq_unregister_hctx(q->queue_hw_ctx[i]);
|
||||
|
||||
blk_mq_debugfs_unregister_mq(q);
|
||||
|
||||
kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
|
||||
kobject_del(&q->mq_kobj);
|
||||
kobject_put(&dev->kobj);
|
||||
|
@ -364,8 +358,6 @@ void blk_mq_sysfs_unregister(struct request_queue *q)
|
|||
if (!q->mq_sysfs_init_done)
|
||||
goto unlock;
|
||||
|
||||
blk_mq_debugfs_unregister_mq(q);
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i)
|
||||
blk_mq_unregister_hctx(hctx);
|
||||
|
||||
|
@ -382,8 +374,6 @@ int blk_mq_sysfs_register(struct request_queue *q)
|
|||
if (!q->mq_sysfs_init_done)
|
||||
goto unlock;
|
||||
|
||||
blk_mq_debugfs_register_mq(q);
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
ret = blk_mq_register_hctx(hctx);
|
||||
if (ret)
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include <linux/blk-mq.h>
|
||||
#include "blk.h"
|
||||
#include "blk-mq.h"
|
||||
#include "blk-mq-debugfs.h"
|
||||
#include "blk-mq-tag.h"
|
||||
#include "blk-stat.h"
|
||||
#include "blk-wbt.h"
|
||||
|
@ -41,6 +42,7 @@ static LIST_HEAD(all_q_list);
|
|||
|
||||
static void blk_mq_poll_stats_start(struct request_queue *q);
|
||||
static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
|
||||
static void __blk_mq_stop_hw_queues(struct request_queue *q, bool sync);
|
||||
|
||||
static int blk_mq_poll_stats_bkt(const struct request *rq)
|
||||
{
|
||||
|
@ -166,7 +168,7 @@ void blk_mq_quiesce_queue(struct request_queue *q)
|
|||
unsigned int i;
|
||||
bool rcu = false;
|
||||
|
||||
blk_mq_stop_hw_queues(q);
|
||||
__blk_mq_stop_hw_queues(q, true);
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
if (hctx->flags & BLK_MQ_F_BLOCKING)
|
||||
|
@ -1218,20 +1220,34 @@ bool blk_mq_queue_stopped(struct request_queue *q)
|
|||
}
|
||||
EXPORT_SYMBOL(blk_mq_queue_stopped);
|
||||
|
||||
static void __blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx, bool sync)
|
||||
{
|
||||
if (sync)
|
||||
cancel_delayed_work_sync(&hctx->run_work);
|
||||
else
|
||||
cancel_delayed_work(&hctx->run_work);
|
||||
|
||||
set_bit(BLK_MQ_S_STOPPED, &hctx->state);
|
||||
}
|
||||
|
||||
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
cancel_delayed_work_sync(&hctx->run_work);
|
||||
set_bit(BLK_MQ_S_STOPPED, &hctx->state);
|
||||
__blk_mq_stop_hw_queue(hctx, false);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_stop_hw_queue);
|
||||
|
||||
void blk_mq_stop_hw_queues(struct request_queue *q)
|
||||
void __blk_mq_stop_hw_queues(struct request_queue *q, bool sync)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i)
|
||||
blk_mq_stop_hw_queue(hctx);
|
||||
__blk_mq_stop_hw_queue(hctx, sync);
|
||||
}
|
||||
|
||||
void blk_mq_stop_hw_queues(struct request_queue *q)
|
||||
{
|
||||
__blk_mq_stop_hw_queues(q, false);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_stop_hw_queues);
|
||||
|
||||
|
@ -1655,8 +1671,7 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
|
|||
|
||||
if (!rq)
|
||||
continue;
|
||||
set->ops->exit_request(set->driver_data, rq,
|
||||
hctx_idx, i);
|
||||
set->ops->exit_request(set, rq, hctx_idx);
|
||||
tags->static_rqs[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -1787,8 +1802,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
|
|||
|
||||
tags->static_rqs[i] = rq;
|
||||
if (set->ops->init_request) {
|
||||
if (set->ops->init_request(set->driver_data,
|
||||
rq, hctx_idx, i,
|
||||
if (set->ops->init_request(set, rq, hctx_idx,
|
||||
node)) {
|
||||
tags->static_rqs[i] = NULL;
|
||||
goto fail;
|
||||
|
@ -1849,14 +1863,12 @@ static void blk_mq_exit_hctx(struct request_queue *q,
|
|||
struct blk_mq_tag_set *set,
|
||||
struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
|
||||
{
|
||||
unsigned flush_start_tag = set->queue_depth;
|
||||
blk_mq_debugfs_unregister_hctx(hctx);
|
||||
|
||||
blk_mq_tag_idle(hctx);
|
||||
|
||||
if (set->ops->exit_request)
|
||||
set->ops->exit_request(set->driver_data,
|
||||
hctx->fq->flush_rq, hctx_idx,
|
||||
flush_start_tag + hctx_idx);
|
||||
set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
|
||||
|
||||
blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
|
||||
|
||||
|
@ -1889,7 +1901,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
|
|||
struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
|
||||
{
|
||||
int node;
|
||||
unsigned flush_start_tag = set->queue_depth;
|
||||
|
||||
node = hctx->numa_node;
|
||||
if (node == NUMA_NO_NODE)
|
||||
|
@ -1933,14 +1944,15 @@ static int blk_mq_init_hctx(struct request_queue *q,
|
|||
goto sched_exit_hctx;
|
||||
|
||||
if (set->ops->init_request &&
|
||||
set->ops->init_request(set->driver_data,
|
||||
hctx->fq->flush_rq, hctx_idx,
|
||||
flush_start_tag + hctx_idx, node))
|
||||
set->ops->init_request(set, hctx->fq->flush_rq, hctx_idx,
|
||||
node))
|
||||
goto free_fq;
|
||||
|
||||
if (hctx->flags & BLK_MQ_F_BLOCKING)
|
||||
init_srcu_struct(&hctx->queue_rq_srcu);
|
||||
|
||||
blk_mq_debugfs_register_hctx(q, hctx);
|
||||
|
||||
return 0;
|
||||
|
||||
free_fq:
|
||||
|
@ -2329,15 +2341,15 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
|
|||
|
||||
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
|
||||
|
||||
get_online_cpus();
|
||||
mutex_lock(&all_q_mutex);
|
||||
get_online_cpus();
|
||||
|
||||
list_add_tail(&q->all_q_node, &all_q_list);
|
||||
blk_mq_add_queue_tag_set(set, q);
|
||||
blk_mq_map_swqueue(q, cpu_online_mask);
|
||||
|
||||
mutex_unlock(&all_q_mutex);
|
||||
put_online_cpus();
|
||||
mutex_unlock(&all_q_mutex);
|
||||
|
||||
if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
|
||||
int ret;
|
||||
|
@ -2378,6 +2390,7 @@ static void blk_mq_queue_reinit(struct request_queue *q,
|
|||
{
|
||||
WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
|
||||
|
||||
blk_mq_debugfs_unregister_hctxs(q);
|
||||
blk_mq_sysfs_unregister(q);
|
||||
|
||||
/*
|
||||
|
@ -2389,6 +2402,7 @@ static void blk_mq_queue_reinit(struct request_queue *q,
|
|||
blk_mq_map_swqueue(q, online_mask);
|
||||
|
||||
blk_mq_sysfs_register(q);
|
||||
blk_mq_debugfs_register_hctxs(q);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2617,7 +2631,6 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
|
|||
return -EINVAL;
|
||||
|
||||
blk_mq_freeze_queue(q);
|
||||
blk_mq_quiesce_queue(q);
|
||||
|
||||
ret = 0;
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
|
@ -2643,7 +2656,6 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
|
|||
q->nr_requests = nr;
|
||||
|
||||
blk_mq_unfreeze_queue(q);
|
||||
blk_mq_start_stopped_hw_queues(q, true);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -83,34 +83,6 @@ extern int blk_mq_sysfs_register(struct request_queue *q);
|
|||
extern void blk_mq_sysfs_unregister(struct request_queue *q);
|
||||
extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
|
||||
|
||||
/*
|
||||
* debugfs helpers
|
||||
*/
|
||||
#ifdef CONFIG_BLK_DEBUG_FS
|
||||
int blk_mq_debugfs_register(struct request_queue *q);
|
||||
void blk_mq_debugfs_unregister(struct request_queue *q);
|
||||
int blk_mq_debugfs_register_mq(struct request_queue *q);
|
||||
void blk_mq_debugfs_unregister_mq(struct request_queue *q);
|
||||
#else
|
||||
static inline int blk_mq_debugfs_register(struct request_queue *q)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void blk_mq_debugfs_unregister(struct request_queue *q)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int blk_mq_debugfs_register_mq(struct request_queue *q)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void blk_mq_debugfs_unregister_mq(struct request_queue *q)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
|
||||
|
||||
void blk_mq_release(struct request_queue *q);
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
|
||||
#include "blk.h"
|
||||
#include "blk-mq.h"
|
||||
#include "blk-mq-debugfs.h"
|
||||
#include "blk-wbt.h"
|
||||
|
||||
struct queue_sysfs_entry {
|
||||
|
@ -889,6 +890,8 @@ int blk_register_queue(struct gendisk *disk)
|
|||
if (q->mq_ops)
|
||||
__blk_mq_register_dev(dev, q);
|
||||
|
||||
blk_mq_debugfs_register(q);
|
||||
|
||||
kobject_uevent(&q->kobj, KOBJ_ADD);
|
||||
|
||||
wbt_enable_default(q);
|
||||
|
|
|
@ -950,7 +950,6 @@ static int elevator_switch_mq(struct request_queue *q,
|
|||
int ret;
|
||||
|
||||
blk_mq_freeze_queue(q);
|
||||
blk_mq_quiesce_queue(q);
|
||||
|
||||
if (q->elevator) {
|
||||
if (q->elevator->registered)
|
||||
|
@ -978,9 +977,7 @@ static int elevator_switch_mq(struct request_queue *q,
|
|||
|
||||
out:
|
||||
blk_mq_unfreeze_queue(q);
|
||||
blk_mq_start_stopped_hw_queues(q, true);
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1088,19 +1085,6 @@ static int __elevator_change(struct request_queue *q, const char *name)
|
|||
return elevator_switch(q, e);
|
||||
}
|
||||
|
||||
int elevator_change(struct request_queue *q, const char *name)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Protect q->elevator from elevator_init() */
|
||||
mutex_lock(&q->sysfs_lock);
|
||||
ret = __elevator_change(q, name);
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(elevator_change);
|
||||
|
||||
static inline bool elv_support_iosched(struct request_queue *q)
|
||||
{
|
||||
if (q->mq_ops && q->tag_set && (q->tag_set->flags &
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
|
||||
#include "blk.h"
|
||||
#include "blk-mq.h"
|
||||
#include "blk-mq-debugfs.h"
|
||||
#include "blk-mq-sched.h"
|
||||
#include "blk-mq-tag.h"
|
||||
#include "blk-stat.h"
|
||||
|
@ -683,6 +684,131 @@ static struct elv_fs_entry kyber_sched_attrs[] = {
|
|||
};
|
||||
#undef KYBER_LAT_ATTR
|
||||
|
||||
#ifdef CONFIG_BLK_DEBUG_FS
|
||||
#define KYBER_DEBUGFS_DOMAIN_ATTRS(domain, name) \
|
||||
static int kyber_##name##_tokens_show(void *data, struct seq_file *m) \
|
||||
{ \
|
||||
struct request_queue *q = data; \
|
||||
struct kyber_queue_data *kqd = q->elevator->elevator_data; \
|
||||
\
|
||||
sbitmap_queue_show(&kqd->domain_tokens[domain], m); \
|
||||
return 0; \
|
||||
} \
|
||||
\
|
||||
static void *kyber_##name##_rqs_start(struct seq_file *m, loff_t *pos) \
|
||||
__acquires(&khd->lock) \
|
||||
{ \
|
||||
struct blk_mq_hw_ctx *hctx = m->private; \
|
||||
struct kyber_hctx_data *khd = hctx->sched_data; \
|
||||
\
|
||||
spin_lock(&khd->lock); \
|
||||
return seq_list_start(&khd->rqs[domain], *pos); \
|
||||
} \
|
||||
\
|
||||
static void *kyber_##name##_rqs_next(struct seq_file *m, void *v, \
|
||||
loff_t *pos) \
|
||||
{ \
|
||||
struct blk_mq_hw_ctx *hctx = m->private; \
|
||||
struct kyber_hctx_data *khd = hctx->sched_data; \
|
||||
\
|
||||
return seq_list_next(v, &khd->rqs[domain], pos); \
|
||||
} \
|
||||
\
|
||||
static void kyber_##name##_rqs_stop(struct seq_file *m, void *v) \
|
||||
__releases(&khd->lock) \
|
||||
{ \
|
||||
struct blk_mq_hw_ctx *hctx = m->private; \
|
||||
struct kyber_hctx_data *khd = hctx->sched_data; \
|
||||
\
|
||||
spin_unlock(&khd->lock); \
|
||||
} \
|
||||
\
|
||||
static const struct seq_operations kyber_##name##_rqs_seq_ops = { \
|
||||
.start = kyber_##name##_rqs_start, \
|
||||
.next = kyber_##name##_rqs_next, \
|
||||
.stop = kyber_##name##_rqs_stop, \
|
||||
.show = blk_mq_debugfs_rq_show, \
|
||||
}; \
|
||||
\
|
||||
static int kyber_##name##_waiting_show(void *data, struct seq_file *m) \
|
||||
{ \
|
||||
struct blk_mq_hw_ctx *hctx = data; \
|
||||
struct kyber_hctx_data *khd = hctx->sched_data; \
|
||||
wait_queue_t *wait = &khd->domain_wait[domain]; \
|
||||
\
|
||||
seq_printf(m, "%d\n", !list_empty_careful(&wait->task_list)); \
|
||||
return 0; \
|
||||
}
|
||||
KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ, read)
|
||||
KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_SYNC_WRITE, sync_write)
|
||||
KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_OTHER, other)
|
||||
#undef KYBER_DEBUGFS_DOMAIN_ATTRS
|
||||
|
||||
static int kyber_async_depth_show(void *data, struct seq_file *m)
|
||||
{
|
||||
struct request_queue *q = data;
|
||||
struct kyber_queue_data *kqd = q->elevator->elevator_data;
|
||||
|
||||
seq_printf(m, "%u\n", kqd->async_depth);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kyber_cur_domain_show(void *data, struct seq_file *m)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx = data;
|
||||
struct kyber_hctx_data *khd = hctx->sched_data;
|
||||
|
||||
switch (khd->cur_domain) {
|
||||
case KYBER_READ:
|
||||
seq_puts(m, "READ\n");
|
||||
break;
|
||||
case KYBER_SYNC_WRITE:
|
||||
seq_puts(m, "SYNC_WRITE\n");
|
||||
break;
|
||||
case KYBER_OTHER:
|
||||
seq_puts(m, "OTHER\n");
|
||||
break;
|
||||
default:
|
||||
seq_printf(m, "%u\n", khd->cur_domain);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kyber_batching_show(void *data, struct seq_file *m)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx = data;
|
||||
struct kyber_hctx_data *khd = hctx->sched_data;
|
||||
|
||||
seq_printf(m, "%u\n", khd->batching);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define KYBER_QUEUE_DOMAIN_ATTRS(name) \
|
||||
{#name "_tokens", 0400, kyber_##name##_tokens_show}
|
||||
static const struct blk_mq_debugfs_attr kyber_queue_debugfs_attrs[] = {
|
||||
KYBER_QUEUE_DOMAIN_ATTRS(read),
|
||||
KYBER_QUEUE_DOMAIN_ATTRS(sync_write),
|
||||
KYBER_QUEUE_DOMAIN_ATTRS(other),
|
||||
{"async_depth", 0400, kyber_async_depth_show},
|
||||
{},
|
||||
};
|
||||
#undef KYBER_QUEUE_DOMAIN_ATTRS
|
||||
|
||||
#define KYBER_HCTX_DOMAIN_ATTRS(name) \
|
||||
{#name "_rqs", 0400, .seq_ops = &kyber_##name##_rqs_seq_ops}, \
|
||||
{#name "_waiting", 0400, kyber_##name##_waiting_show}
|
||||
static const struct blk_mq_debugfs_attr kyber_hctx_debugfs_attrs[] = {
|
||||
KYBER_HCTX_DOMAIN_ATTRS(read),
|
||||
KYBER_HCTX_DOMAIN_ATTRS(sync_write),
|
||||
KYBER_HCTX_DOMAIN_ATTRS(other),
|
||||
{"cur_domain", 0400, kyber_cur_domain_show},
|
||||
{"batching", 0400, kyber_batching_show},
|
||||
{},
|
||||
};
|
||||
#undef KYBER_HCTX_DOMAIN_ATTRS
|
||||
#endif
|
||||
|
||||
static struct elevator_type kyber_sched = {
|
||||
.ops.mq = {
|
||||
.init_sched = kyber_init_sched,
|
||||
|
@ -696,6 +822,10 @@ static struct elevator_type kyber_sched = {
|
|||
.has_work = kyber_has_work,
|
||||
},
|
||||
.uses_mq = true,
|
||||
#ifdef CONFIG_BLK_DEBUG_FS
|
||||
.queue_debugfs_attrs = kyber_queue_debugfs_attrs,
|
||||
.hctx_debugfs_attrs = kyber_hctx_debugfs_attrs,
|
||||
#endif
|
||||
.elevator_attrs = kyber_sched_attrs,
|
||||
.elevator_name = "kyber",
|
||||
.elevator_owner = THIS_MODULE,
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
#include "blk.h"
|
||||
#include "blk-mq.h"
|
||||
#include "blk-mq-debugfs.h"
|
||||
#include "blk-mq-tag.h"
|
||||
#include "blk-mq-sched.h"
|
||||
|
||||
|
@ -517,6 +518,125 @@ static struct elv_fs_entry deadline_attrs[] = {
|
|||
__ATTR_NULL
|
||||
};
|
||||
|
||||
#ifdef CONFIG_BLK_DEBUG_FS
|
||||
#define DEADLINE_DEBUGFS_DDIR_ATTRS(ddir, name) \
|
||||
static void *deadline_##name##_fifo_start(struct seq_file *m, \
|
||||
loff_t *pos) \
|
||||
__acquires(&dd->lock) \
|
||||
{ \
|
||||
struct request_queue *q = m->private; \
|
||||
struct deadline_data *dd = q->elevator->elevator_data; \
|
||||
\
|
||||
spin_lock(&dd->lock); \
|
||||
return seq_list_start(&dd->fifo_list[ddir], *pos); \
|
||||
} \
|
||||
\
|
||||
static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \
|
||||
loff_t *pos) \
|
||||
{ \
|
||||
struct request_queue *q = m->private; \
|
||||
struct deadline_data *dd = q->elevator->elevator_data; \
|
||||
\
|
||||
return seq_list_next(v, &dd->fifo_list[ddir], pos); \
|
||||
} \
|
||||
\
|
||||
static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \
|
||||
__releases(&dd->lock) \
|
||||
{ \
|
||||
struct request_queue *q = m->private; \
|
||||
struct deadline_data *dd = q->elevator->elevator_data; \
|
||||
\
|
||||
spin_unlock(&dd->lock); \
|
||||
} \
|
||||
\
|
||||
static const struct seq_operations deadline_##name##_fifo_seq_ops = { \
|
||||
.start = deadline_##name##_fifo_start, \
|
||||
.next = deadline_##name##_fifo_next, \
|
||||
.stop = deadline_##name##_fifo_stop, \
|
||||
.show = blk_mq_debugfs_rq_show, \
|
||||
}; \
|
||||
\
|
||||
static int deadline_##name##_next_rq_show(void *data, \
|
||||
struct seq_file *m) \
|
||||
{ \
|
||||
struct request_queue *q = data; \
|
||||
struct deadline_data *dd = q->elevator->elevator_data; \
|
||||
struct request *rq = dd->next_rq[ddir]; \
|
||||
\
|
||||
if (rq) \
|
||||
__blk_mq_debugfs_rq_show(m, rq); \
|
||||
return 0; \
|
||||
}
|
||||
DEADLINE_DEBUGFS_DDIR_ATTRS(READ, read)
|
||||
DEADLINE_DEBUGFS_DDIR_ATTRS(WRITE, write)
|
||||
#undef DEADLINE_DEBUGFS_DDIR_ATTRS
|
||||
|
||||
static int deadline_batching_show(void *data, struct seq_file *m)
|
||||
{
|
||||
struct request_queue *q = data;
|
||||
struct deadline_data *dd = q->elevator->elevator_data;
|
||||
|
||||
seq_printf(m, "%u\n", dd->batching);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int deadline_starved_show(void *data, struct seq_file *m)
|
||||
{
|
||||
struct request_queue *q = data;
|
||||
struct deadline_data *dd = q->elevator->elevator_data;
|
||||
|
||||
seq_printf(m, "%u\n", dd->starved);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *deadline_dispatch_start(struct seq_file *m, loff_t *pos)
|
||||
__acquires(&dd->lock)
|
||||
{
|
||||
struct request_queue *q = m->private;
|
||||
struct deadline_data *dd = q->elevator->elevator_data;
|
||||
|
||||
spin_lock(&dd->lock);
|
||||
return seq_list_start(&dd->dispatch, *pos);
|
||||
}
|
||||
|
||||
static void *deadline_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
{
|
||||
struct request_queue *q = m->private;
|
||||
struct deadline_data *dd = q->elevator->elevator_data;
|
||||
|
||||
return seq_list_next(v, &dd->dispatch, pos);
|
||||
}
|
||||
|
||||
static void deadline_dispatch_stop(struct seq_file *m, void *v)
|
||||
__releases(&dd->lock)
|
||||
{
|
||||
struct request_queue *q = m->private;
|
||||
struct deadline_data *dd = q->elevator->elevator_data;
|
||||
|
||||
spin_unlock(&dd->lock);
|
||||
}
|
||||
|
||||
static const struct seq_operations deadline_dispatch_seq_ops = {
|
||||
.start = deadline_dispatch_start,
|
||||
.next = deadline_dispatch_next,
|
||||
.stop = deadline_dispatch_stop,
|
||||
.show = blk_mq_debugfs_rq_show,
|
||||
};
|
||||
|
||||
#define DEADLINE_QUEUE_DDIR_ATTRS(name) \
|
||||
{#name "_fifo_list", 0400, .seq_ops = &deadline_##name##_fifo_seq_ops}, \
|
||||
{#name "_next_rq", 0400, deadline_##name##_next_rq_show}
|
||||
static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
|
||||
DEADLINE_QUEUE_DDIR_ATTRS(read),
|
||||
DEADLINE_QUEUE_DDIR_ATTRS(write),
|
||||
{"batching", 0400, deadline_batching_show},
|
||||
{"starved", 0400, deadline_starved_show},
|
||||
{"dispatch", 0400, .seq_ops = &deadline_dispatch_seq_ops},
|
||||
{},
|
||||
};
|
||||
#undef DEADLINE_QUEUE_DDIR_ATTRS
|
||||
#endif
|
||||
|
||||
static struct elevator_type mq_deadline = {
|
||||
.ops.mq = {
|
||||
.insert_requests = dd_insert_requests,
|
||||
|
@ -533,6 +653,9 @@ static struct elevator_type mq_deadline = {
|
|||
},
|
||||
|
||||
.uses_mq = true,
|
||||
#ifdef CONFIG_BLK_DEBUG_FS
|
||||
.queue_debugfs_attrs = deadline_queue_debugfs_attrs,
|
||||
#endif
|
||||
.elevator_attrs = deadline_attrs,
|
||||
.elevator_name = "mq-deadline",
|
||||
.elevator_owner = THIS_MODULE,
|
||||
|
|
|
@ -1697,9 +1697,8 @@ static void loop_queue_work(struct kthread_work *work)
|
|||
loop_handle_cmd(cmd);
|
||||
}
|
||||
|
||||
static int loop_init_request(void *data, struct request *rq,
|
||||
unsigned int hctx_idx, unsigned int request_idx,
|
||||
unsigned int numa_node)
|
||||
static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq,
|
||||
unsigned int hctx_idx, unsigned int numa_node)
|
||||
{
|
||||
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
|
||||
|
||||
|
|
|
@ -195,7 +195,7 @@ static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
|
|||
if (mtip_check_surprise_removal(dd->pdev))
|
||||
return NULL;
|
||||
|
||||
rq = blk_mq_alloc_request(dd->queue, 0, BLK_MQ_REQ_RESERVED);
|
||||
rq = blk_mq_alloc_request(dd->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_RESERVED);
|
||||
if (IS_ERR(rq))
|
||||
return NULL;
|
||||
|
||||
|
@ -205,66 +205,12 @@ static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
|
|||
return blk_mq_rq_to_pdu(rq);
|
||||
}
|
||||
|
||||
static void mtip_put_int_command(struct driver_data *dd, struct mtip_cmd *cmd)
|
||||
{
|
||||
blk_put_request(blk_mq_rq_from_pdu(cmd));
|
||||
}
|
||||
|
||||
/*
|
||||
* Once we add support for one hctx per mtip group, this will change a bit
|
||||
*/
|
||||
static struct request *mtip_rq_from_tag(struct driver_data *dd,
|
||||
unsigned int tag)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0];
|
||||
|
||||
return blk_mq_tag_to_rq(hctx->tags, tag);
|
||||
}
|
||||
|
||||
static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd,
|
||||
unsigned int tag)
|
||||
{
|
||||
struct request *rq = mtip_rq_from_tag(dd, tag);
|
||||
struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0];
|
||||
|
||||
return blk_mq_rq_to_pdu(rq);
|
||||
}
|
||||
|
||||
/*
|
||||
* IO completion function.
|
||||
*
|
||||
* This completion function is called by the driver ISR when a
|
||||
* command that was issued by the kernel completes. It first calls the
|
||||
* asynchronous completion function which normally calls back into the block
|
||||
* layer passing the asynchronous callback data, then unmaps the
|
||||
* scatter list associated with the completed command, and finally
|
||||
* clears the allocated bit associated with the completed command.
|
||||
*
|
||||
* @port Pointer to the port data structure.
|
||||
* @tag Tag of the command.
|
||||
* @data Pointer to driver_data.
|
||||
* @status Completion status.
|
||||
*
|
||||
* return value
|
||||
* None
|
||||
*/
|
||||
static void mtip_async_complete(struct mtip_port *port,
|
||||
int tag, struct mtip_cmd *cmd, int status)
|
||||
{
|
||||
struct driver_data *dd = port->dd;
|
||||
struct request *rq;
|
||||
|
||||
if (unlikely(!dd) || unlikely(!port))
|
||||
return;
|
||||
|
||||
if (unlikely(status == PORT_IRQ_TF_ERR)) {
|
||||
dev_warn(&port->dd->pdev->dev,
|
||||
"Command tag %d failed due to TFE\n", tag);
|
||||
}
|
||||
|
||||
rq = mtip_rq_from_tag(dd, tag);
|
||||
|
||||
cmd->status = status;
|
||||
blk_mq_complete_request(rq);
|
||||
return blk_mq_rq_to_pdu(blk_mq_tag_to_rq(hctx->tags, tag));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -581,43 +527,19 @@ static void print_tags(struct driver_data *dd,
|
|||
"%d command(s) %s: tagmap [%s]", cnt, msg, tagmap);
|
||||
}
|
||||
|
||||
/*
|
||||
* Internal command completion callback function.
|
||||
*
|
||||
* This function is normally called by the driver ISR when an internal
|
||||
* command completed. This function signals the command completion by
|
||||
* calling complete().
|
||||
*
|
||||
* @port Pointer to the port data structure.
|
||||
* @tag Tag of the command that has completed.
|
||||
* @data Pointer to a completion structure.
|
||||
* @status Completion status.
|
||||
*
|
||||
* return value
|
||||
* None
|
||||
*/
|
||||
static void mtip_completion(struct mtip_port *port,
|
||||
int tag, struct mtip_cmd *command, int status)
|
||||
{
|
||||
struct completion *waiting = command->comp_data;
|
||||
if (unlikely(status == PORT_IRQ_TF_ERR))
|
||||
dev_warn(&port->dd->pdev->dev,
|
||||
"Internal command %d completed with TFE\n", tag);
|
||||
|
||||
command->comp_func = NULL;
|
||||
command->comp_data = NULL;
|
||||
complete(waiting);
|
||||
}
|
||||
|
||||
static void mtip_null_completion(struct mtip_port *port,
|
||||
int tag, struct mtip_cmd *command, int status)
|
||||
{
|
||||
}
|
||||
|
||||
static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
|
||||
dma_addr_t buffer_dma, unsigned int sectors);
|
||||
static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
|
||||
struct smart_attr *attrib);
|
||||
|
||||
static void mtip_complete_command(struct mtip_cmd *cmd, int status)
|
||||
{
|
||||
struct request *req = blk_mq_rq_from_pdu(cmd);
|
||||
|
||||
cmd->status = status;
|
||||
blk_mq_complete_request(req);
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle an error.
|
||||
*
|
||||
|
@ -646,11 +568,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
|
|||
if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
|
||||
cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
|
||||
dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
|
||||
|
||||
if (cmd->comp_data && cmd->comp_func) {
|
||||
cmd->comp_func(port, MTIP_TAG_INTERNAL,
|
||||
cmd, PORT_IRQ_TF_ERR);
|
||||
}
|
||||
mtip_complete_command(cmd, -EIO);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -677,19 +595,9 @@ static void mtip_handle_tfe(struct driver_data *dd)
|
|||
continue;
|
||||
|
||||
cmd = mtip_cmd_from_tag(dd, tag);
|
||||
if (likely(cmd->comp_func)) {
|
||||
set_bit(tag, tagaccum);
|
||||
cmd_cnt++;
|
||||
cmd->comp_func(port, tag, cmd, 0);
|
||||
} else {
|
||||
dev_err(&port->dd->pdev->dev,
|
||||
"Missing completion func for tag %d",
|
||||
tag);
|
||||
if (mtip_check_surprise_removal(dd->pdev)) {
|
||||
/* don't proceed further */
|
||||
return;
|
||||
}
|
||||
}
|
||||
mtip_complete_command(cmd, 0);
|
||||
set_bit(tag, tagaccum);
|
||||
cmd_cnt++;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -759,10 +667,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
|
|||
tag,
|
||||
fail_reason != NULL ?
|
||||
fail_reason : "unknown");
|
||||
if (cmd->comp_func) {
|
||||
cmd->comp_func(port, tag,
|
||||
cmd, -ENODATA);
|
||||
}
|
||||
mtip_complete_command(cmd, -ENODATA);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
@ -785,12 +690,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
|
|||
dev_warn(&port->dd->pdev->dev,
|
||||
"retiring tag %d\n", tag);
|
||||
|
||||
if (cmd->comp_func)
|
||||
cmd->comp_func(port, tag, cmd, PORT_IRQ_TF_ERR);
|
||||
else
|
||||
dev_warn(&port->dd->pdev->dev,
|
||||
"Bad completion for tag %d\n",
|
||||
tag);
|
||||
mtip_complete_command(cmd, -EIO);
|
||||
}
|
||||
}
|
||||
print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
|
||||
|
@ -823,18 +723,7 @@ static inline void mtip_workq_sdbfx(struct mtip_port *port, int group,
|
|||
continue;
|
||||
|
||||
command = mtip_cmd_from_tag(dd, tag);
|
||||
if (likely(command->comp_func))
|
||||
command->comp_func(port, tag, command, 0);
|
||||
else {
|
||||
dev_dbg(&dd->pdev->dev,
|
||||
"Null completion for tag %d",
|
||||
tag);
|
||||
|
||||
if (mtip_check_surprise_removal(
|
||||
dd->pdev)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
mtip_complete_command(command, 0);
|
||||
}
|
||||
completed >>= 1;
|
||||
}
|
||||
|
@ -852,16 +741,13 @@ static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat)
|
|||
struct mtip_port *port = dd->port;
|
||||
struct mtip_cmd *cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
|
||||
|
||||
if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) &&
|
||||
(cmd != NULL) && !(readl(port->cmd_issue[MTIP_TAG_INTERNAL])
|
||||
& (1 << MTIP_TAG_INTERNAL))) {
|
||||
if (cmd->comp_func) {
|
||||
cmd->comp_func(port, MTIP_TAG_INTERNAL, cmd, 0);
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) && cmd) {
|
||||
int group = MTIP_TAG_INDEX(MTIP_TAG_INTERNAL);
|
||||
int status = readl(port->cmd_issue[group]);
|
||||
|
||||
return;
|
||||
if (!(status & (1 << MTIP_TAG_BIT(MTIP_TAG_INTERNAL))))
|
||||
mtip_complete_command(cmd, 0);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -869,7 +755,6 @@ static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat)
|
|||
*/
|
||||
static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat)
|
||||
{
|
||||
|
||||
if (unlikely(port_stat & PORT_IRQ_CONNECT)) {
|
||||
dev_warn(&dd->pdev->dev,
|
||||
"Clearing PxSERR.DIAG.x\n");
|
||||
|
@ -996,8 +881,7 @@ static irqreturn_t mtip_irq_handler(int irq, void *instance)
|
|||
|
||||
static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag)
|
||||
{
|
||||
writel(1 << MTIP_TAG_BIT(tag),
|
||||
port->cmd_issue[MTIP_TAG_INDEX(tag)]);
|
||||
writel(1 << MTIP_TAG_BIT(tag), port->cmd_issue[MTIP_TAG_INDEX(tag)]);
|
||||
}
|
||||
|
||||
static bool mtip_pause_ncq(struct mtip_port *port,
|
||||
|
@ -1035,53 +919,53 @@ static bool mtip_pause_ncq(struct mtip_port *port,
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool mtip_commands_active(struct mtip_port *port)
|
||||
{
|
||||
unsigned int active;
|
||||
unsigned int n;
|
||||
|
||||
/*
|
||||
* Ignore s_active bit 0 of array element 0.
|
||||
* This bit will always be set
|
||||
*/
|
||||
active = readl(port->s_active[0]) & 0xFFFFFFFE;
|
||||
for (n = 1; n < port->dd->slot_groups; n++)
|
||||
active |= readl(port->s_active[n]);
|
||||
|
||||
return active != 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait for port to quiesce
|
||||
*
|
||||
* @port Pointer to port data structure
|
||||
* @timeout Max duration to wait (ms)
|
||||
* @atomic gfp_t flag to indicate blockable context or not
|
||||
*
|
||||
* return value
|
||||
* 0 Success
|
||||
* -EBUSY Commands still active
|
||||
*/
|
||||
static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout,
|
||||
gfp_t atomic)
|
||||
static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
|
||||
{
|
||||
unsigned long to;
|
||||
unsigned int n;
|
||||
unsigned int active = 1;
|
||||
bool active = true;
|
||||
|
||||
blk_mq_stop_hw_queues(port->dd->queue);
|
||||
|
||||
to = jiffies + msecs_to_jiffies(timeout);
|
||||
do {
|
||||
if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) &&
|
||||
test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags) &&
|
||||
atomic == GFP_KERNEL) {
|
||||
test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
|
||||
msleep(20);
|
||||
continue; /* svc thd is actively issuing commands */
|
||||
}
|
||||
|
||||
if (atomic == GFP_KERNEL)
|
||||
msleep(100);
|
||||
else {
|
||||
cpu_relax();
|
||||
udelay(100);
|
||||
}
|
||||
msleep(100);
|
||||
|
||||
if (mtip_check_surprise_removal(port->dd->pdev))
|
||||
goto err_fault;
|
||||
|
||||
/*
|
||||
* Ignore s_active bit 0 of array element 0.
|
||||
* This bit will always be set
|
||||
*/
|
||||
active = readl(port->s_active[0]) & 0xFFFFFFFE;
|
||||
for (n = 1; n < port->dd->slot_groups; n++)
|
||||
active |= readl(port->s_active[n]);
|
||||
|
||||
active = mtip_commands_active(port);
|
||||
if (!active)
|
||||
break;
|
||||
} while (time_before(jiffies, to));
|
||||
|
@ -1093,6 +977,13 @@ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout,
|
|||
return -EFAULT;
|
||||
}
|
||||
|
||||
struct mtip_int_cmd {
|
||||
int fis_len;
|
||||
dma_addr_t buffer;
|
||||
int buf_len;
|
||||
u32 opts;
|
||||
};
|
||||
|
||||
/*
|
||||
* Execute an internal command and wait for the completion.
|
||||
*
|
||||
|
@ -1117,13 +1008,17 @@ static int mtip_exec_internal_command(struct mtip_port *port,
|
|||
dma_addr_t buffer,
|
||||
int buf_len,
|
||||
u32 opts,
|
||||
gfp_t atomic,
|
||||
unsigned long timeout)
|
||||
{
|
||||
struct mtip_cmd_sg *command_sg;
|
||||
DECLARE_COMPLETION_ONSTACK(wait);
|
||||
struct mtip_cmd *int_cmd;
|
||||
struct driver_data *dd = port->dd;
|
||||
struct request *rq;
|
||||
struct mtip_int_cmd icmd = {
|
||||
.fis_len = fis_len,
|
||||
.buffer = buffer,
|
||||
.buf_len = buf_len,
|
||||
.opts = opts
|
||||
};
|
||||
int rv = 0;
|
||||
unsigned long start;
|
||||
|
||||
|
@ -1138,6 +1033,8 @@ static int mtip_exec_internal_command(struct mtip_port *port,
|
|||
dbg_printk(MTIP_DRV_NAME "Unable to allocate tag for PIO cmd\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
rq = blk_mq_rq_from_pdu(int_cmd);
|
||||
rq->special = &icmd;
|
||||
|
||||
set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
|
||||
|
||||
|
@ -1146,135 +1043,60 @@ static int mtip_exec_internal_command(struct mtip_port *port,
|
|||
|
||||
clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
|
||||
|
||||
if (atomic == GFP_KERNEL) {
|
||||
if (fis->command != ATA_CMD_STANDBYNOW1) {
|
||||
/* wait for io to complete if non atomic */
|
||||
if (mtip_quiesce_io(port,
|
||||
MTIP_QUIESCE_IO_TIMEOUT_MS, atomic) < 0) {
|
||||
dev_warn(&dd->pdev->dev,
|
||||
"Failed to quiesce IO\n");
|
||||
mtip_put_int_command(dd, int_cmd);
|
||||
clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
|
||||
wake_up_interruptible(&port->svc_wait);
|
||||
return -EBUSY;
|
||||
}
|
||||
if (fis->command != ATA_CMD_STANDBYNOW1) {
|
||||
/* wait for io to complete if non atomic */
|
||||
if (mtip_quiesce_io(port, MTIP_QUIESCE_IO_TIMEOUT_MS) < 0) {
|
||||
dev_warn(&dd->pdev->dev, "Failed to quiesce IO\n");
|
||||
blk_mq_free_request(rq);
|
||||
clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
|
||||
wake_up_interruptible(&port->svc_wait);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* Set the completion function and data for the command. */
|
||||
int_cmd->comp_data = &wait;
|
||||
int_cmd->comp_func = mtip_completion;
|
||||
|
||||
} else {
|
||||
/* Clear completion - we're going to poll */
|
||||
int_cmd->comp_data = NULL;
|
||||
int_cmd->comp_func = mtip_null_completion;
|
||||
}
|
||||
|
||||
/* Copy the command to the command table */
|
||||
memcpy(int_cmd->command, fis, fis_len*4);
|
||||
|
||||
/* Populate the SG list */
|
||||
int_cmd->command_header->opts =
|
||||
__force_bit2int cpu_to_le32(opts | fis_len);
|
||||
if (buf_len) {
|
||||
command_sg = int_cmd->command + AHCI_CMD_TBL_HDR_SZ;
|
||||
|
||||
command_sg->info =
|
||||
__force_bit2int cpu_to_le32((buf_len-1) & 0x3FFFFF);
|
||||
command_sg->dba =
|
||||
__force_bit2int cpu_to_le32(buffer & 0xFFFFFFFF);
|
||||
command_sg->dba_upper =
|
||||
__force_bit2int cpu_to_le32((buffer >> 16) >> 16);
|
||||
|
||||
int_cmd->command_header->opts |=
|
||||
__force_bit2int cpu_to_le32((1 << 16));
|
||||
}
|
||||
|
||||
/* Populate the command header */
|
||||
int_cmd->command_header->byte_count = 0;
|
||||
|
||||
start = jiffies;
|
||||
rq->timeout = timeout;
|
||||
|
||||
/* Issue the command to the hardware */
|
||||
mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL);
|
||||
/* insert request and run queue */
|
||||
blk_execute_rq(rq->q, NULL, rq, true);
|
||||
|
||||
if (atomic == GFP_KERNEL) {
|
||||
/* Wait for the command to complete or timeout. */
|
||||
if ((rv = wait_for_completion_interruptible_timeout(
|
||||
&wait,
|
||||
msecs_to_jiffies(timeout))) <= 0) {
|
||||
rv = int_cmd->status;
|
||||
if (rv < 0) {
|
||||
if (rv == -ERESTARTSYS) { /* interrupted */
|
||||
dev_err(&dd->pdev->dev,
|
||||
"Internal command [%02X] was interrupted after %u ms\n",
|
||||
fis->command,
|
||||
jiffies_to_msecs(jiffies - start));
|
||||
rv = -EINTR;
|
||||
goto exec_ic_exit;
|
||||
} else if (rv == 0) /* timeout */
|
||||
dev_err(&dd->pdev->dev,
|
||||
"Internal command did not complete [%02X] within timeout of %lu ms\n",
|
||||
fis->command, timeout);
|
||||
else
|
||||
dev_err(&dd->pdev->dev,
|
||||
"Internal command [%02X] wait returned code [%d] after %lu ms - unhandled\n",
|
||||
fis->command, rv, timeout);
|
||||
|
||||
if (rv == -ERESTARTSYS) { /* interrupted */
|
||||
dev_err(&dd->pdev->dev,
|
||||
"Internal command [%02X] was interrupted after %u ms\n",
|
||||
fis->command,
|
||||
jiffies_to_msecs(jiffies - start));
|
||||
rv = -EINTR;
|
||||
goto exec_ic_exit;
|
||||
} else if (rv == 0) /* timeout */
|
||||
dev_err(&dd->pdev->dev,
|
||||
"Internal command did not complete [%02X] within timeout of %lu ms\n",
|
||||
fis->command, timeout);
|
||||
else
|
||||
dev_err(&dd->pdev->dev,
|
||||
"Internal command [%02X] wait returned code [%d] after %lu ms - unhandled\n",
|
||||
fis->command, rv, timeout);
|
||||
|
||||
if (mtip_check_surprise_removal(dd->pdev) ||
|
||||
test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
|
||||
&dd->dd_flag)) {
|
||||
dev_err(&dd->pdev->dev,
|
||||
"Internal command [%02X] wait returned due to SR\n",
|
||||
fis->command);
|
||||
rv = -ENXIO;
|
||||
goto exec_ic_exit;
|
||||
}
|
||||
mtip_device_reset(dd); /* recover from timeout issue */
|
||||
rv = -EAGAIN;
|
||||
if (mtip_check_surprise_removal(dd->pdev) ||
|
||||
test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
|
||||
&dd->dd_flag)) {
|
||||
dev_err(&dd->pdev->dev,
|
||||
"Internal command [%02X] wait returned due to SR\n",
|
||||
fis->command);
|
||||
rv = -ENXIO;
|
||||
goto exec_ic_exit;
|
||||
}
|
||||
} else {
|
||||
u32 hba_stat, port_stat;
|
||||
|
||||
/* Spin for <timeout> checking if command still outstanding */
|
||||
timeout = jiffies + msecs_to_jiffies(timeout);
|
||||
while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL])
|
||||
& (1 << MTIP_TAG_INTERNAL))
|
||||
&& time_before(jiffies, timeout)) {
|
||||
if (mtip_check_surprise_removal(dd->pdev)) {
|
||||
rv = -ENXIO;
|
||||
goto exec_ic_exit;
|
||||
}
|
||||
if ((fis->command != ATA_CMD_STANDBYNOW1) &&
|
||||
test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
|
||||
&dd->dd_flag)) {
|
||||
rv = -ENXIO;
|
||||
goto exec_ic_exit;
|
||||
}
|
||||
port_stat = readl(port->mmio + PORT_IRQ_STAT);
|
||||
if (!port_stat)
|
||||
continue;
|
||||
|
||||
if (port_stat & PORT_IRQ_ERR) {
|
||||
dev_err(&dd->pdev->dev,
|
||||
"Internal command [%02X] failed\n",
|
||||
fis->command);
|
||||
mtip_device_reset(dd);
|
||||
rv = -EIO;
|
||||
goto exec_ic_exit;
|
||||
} else {
|
||||
writel(port_stat, port->mmio + PORT_IRQ_STAT);
|
||||
hba_stat = readl(dd->mmio + HOST_IRQ_STAT);
|
||||
if (hba_stat)
|
||||
writel(hba_stat,
|
||||
dd->mmio + HOST_IRQ_STAT);
|
||||
}
|
||||
break;
|
||||
}
|
||||
mtip_device_reset(dd); /* recover from timeout issue */
|
||||
rv = -EAGAIN;
|
||||
goto exec_ic_exit;
|
||||
}
|
||||
|
||||
if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
|
||||
& (1 << MTIP_TAG_INTERNAL)) {
|
||||
if (readl(port->cmd_issue[MTIP_TAG_INDEX(MTIP_TAG_INTERNAL)])
|
||||
& (1 << MTIP_TAG_BIT(MTIP_TAG_INTERNAL))) {
|
||||
rv = -ENXIO;
|
||||
if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
|
||||
mtip_device_reset(dd);
|
||||
|
@ -1283,7 +1105,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
|
|||
}
|
||||
exec_ic_exit:
|
||||
/* Clear the allocated and active bits for the internal command. */
|
||||
mtip_put_int_command(dd, int_cmd);
|
||||
blk_mq_free_request(rq);
|
||||
clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
|
||||
if (rv >= 0 && mtip_pause_ncq(port, fis)) {
|
||||
/* NCQ paused */
|
||||
|
@ -1391,7 +1213,6 @@ static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer)
|
|||
port->identify_dma,
|
||||
sizeof(u16) * ATA_ID_WORDS,
|
||||
0,
|
||||
GFP_KERNEL,
|
||||
MTIP_INT_CMD_TIMEOUT_MS)
|
||||
< 0) {
|
||||
rv = -1;
|
||||
|
@ -1477,7 +1298,6 @@ static int mtip_standby_immediate(struct mtip_port *port)
|
|||
0,
|
||||
0,
|
||||
0,
|
||||
GFP_ATOMIC,
|
||||
timeout);
|
||||
dbg_printk(MTIP_DRV_NAME "Time taken to complete standby cmd: %d ms\n",
|
||||
jiffies_to_msecs(jiffies - start));
|
||||
|
@ -1523,7 +1343,6 @@ static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
|
|||
buffer_dma,
|
||||
sectors * ATA_SECT_SIZE,
|
||||
0,
|
||||
GFP_ATOMIC,
|
||||
MTIP_INT_CMD_TIMEOUT_MS);
|
||||
}
|
||||
|
||||
|
@ -1558,7 +1377,6 @@ static int mtip_get_smart_data(struct mtip_port *port, u8 *buffer,
|
|||
buffer_dma,
|
||||
ATA_SECT_SIZE,
|
||||
0,
|
||||
GFP_ATOMIC,
|
||||
15000);
|
||||
}
|
||||
|
||||
|
@ -1686,7 +1504,6 @@ static int mtip_send_trim(struct driver_data *dd, unsigned int lba,
|
|||
dma_addr,
|
||||
ATA_SECT_SIZE,
|
||||
0,
|
||||
GFP_KERNEL,
|
||||
MTIP_TRIM_TIMEOUT_MS) < 0)
|
||||
rv = -EIO;
|
||||
|
||||
|
@ -1850,7 +1667,6 @@ static int exec_drive_task(struct mtip_port *port, u8 *command)
|
|||
0,
|
||||
0,
|
||||
0,
|
||||
GFP_KERNEL,
|
||||
to) < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
@ -1946,7 +1762,6 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
|
|||
(xfer_sz ? dma_addr : 0),
|
||||
(xfer_sz ? ATA_SECT_SIZE * xfer_sz : 0),
|
||||
0,
|
||||
GFP_KERNEL,
|
||||
to)
|
||||
< 0) {
|
||||
rv = -EFAULT;
|
||||
|
@ -2189,7 +2004,6 @@ static int exec_drive_taskfile(struct driver_data *dd,
|
|||
dma_buffer,
|
||||
transfer_size,
|
||||
0,
|
||||
GFP_KERNEL,
|
||||
timeout) < 0) {
|
||||
err = -EIO;
|
||||
goto abort;
|
||||
|
@ -2446,12 +2260,6 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
|
|||
(nents << 16) | 5 | AHCI_CMD_PREFETCH);
|
||||
command->command_header->byte_count = 0;
|
||||
|
||||
/*
|
||||
* Set the completion function and data for the command
|
||||
* within this layer.
|
||||
*/
|
||||
command->comp_data = dd;
|
||||
command->comp_func = mtip_async_complete;
|
||||
command->direction = dma_dir;
|
||||
|
||||
/*
|
||||
|
@ -3825,6 +3633,42 @@ static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx,
|
|||
return false;
|
||||
}
|
||||
|
||||
static int mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
|
||||
struct request *rq)
|
||||
{
|
||||
struct driver_data *dd = hctx->queue->queuedata;
|
||||
struct mtip_int_cmd *icmd = rq->special;
|
||||
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
|
||||
struct mtip_cmd_sg *command_sg;
|
||||
|
||||
if (mtip_commands_active(dd->port))
|
||||
return BLK_MQ_RQ_QUEUE_BUSY;
|
||||
|
||||
/* Populate the SG list */
|
||||
cmd->command_header->opts =
|
||||
__force_bit2int cpu_to_le32(icmd->opts | icmd->fis_len);
|
||||
if (icmd->buf_len) {
|
||||
command_sg = cmd->command + AHCI_CMD_TBL_HDR_SZ;
|
||||
|
||||
command_sg->info =
|
||||
__force_bit2int cpu_to_le32((icmd->buf_len-1) & 0x3FFFFF);
|
||||
command_sg->dba =
|
||||
__force_bit2int cpu_to_le32(icmd->buffer & 0xFFFFFFFF);
|
||||
command_sg->dba_upper =
|
||||
__force_bit2int cpu_to_le32((icmd->buffer >> 16) >> 16);
|
||||
|
||||
cmd->command_header->opts |=
|
||||
__force_bit2int cpu_to_le32((1 << 16));
|
||||
}
|
||||
|
||||
/* Populate the command header */
|
||||
cmd->command_header->byte_count = 0;
|
||||
|
||||
blk_mq_start_request(rq);
|
||||
mtip_issue_non_ncq_command(dd->port, rq->tag);
|
||||
return BLK_MQ_RQ_QUEUE_OK;
|
||||
}
|
||||
|
||||
static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
const struct blk_mq_queue_data *bd)
|
||||
{
|
||||
|
@ -3833,6 +3677,9 @@ static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||
|
||||
mtip_init_cmd_header(rq);
|
||||
|
||||
if (blk_rq_is_passthrough(rq))
|
||||
return mtip_issue_reserved_cmd(hctx, rq);
|
||||
|
||||
if (unlikely(mtip_check_unal_depth(hctx, rq)))
|
||||
return BLK_MQ_RQ_QUEUE_BUSY;
|
||||
|
||||
|
@ -3845,10 +3692,10 @@ static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||
return BLK_MQ_RQ_QUEUE_ERROR;
|
||||
}
|
||||
|
||||
static void mtip_free_cmd(void *data, struct request *rq,
|
||||
unsigned int hctx_idx, unsigned int request_idx)
|
||||
static void mtip_free_cmd(struct blk_mq_tag_set *set, struct request *rq,
|
||||
unsigned int hctx_idx)
|
||||
{
|
||||
struct driver_data *dd = data;
|
||||
struct driver_data *dd = set->driver_data;
|
||||
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
|
||||
|
||||
if (!cmd->command)
|
||||
|
@ -3858,20 +3705,12 @@ static void mtip_free_cmd(void *data, struct request *rq,
|
|||
cmd->command, cmd->command_dma);
|
||||
}
|
||||
|
||||
static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx,
|
||||
unsigned int request_idx, unsigned int numa_node)
|
||||
static int mtip_init_cmd(struct blk_mq_tag_set *set, struct request *rq,
|
||||
unsigned int hctx_idx, unsigned int numa_node)
|
||||
{
|
||||
struct driver_data *dd = data;
|
||||
struct driver_data *dd = set->driver_data;
|
||||
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
|
||||
|
||||
/*
|
||||
* For flush requests, request_idx starts at the end of the
|
||||
* tag space. Since we don't support FLUSH/FUA, simply return
|
||||
* 0 as there's nothing to be done.
|
||||
*/
|
||||
if (request_idx >= MTIP_MAX_COMMAND_SLOTS)
|
||||
return 0;
|
||||
|
||||
cmd->command = dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
|
||||
&cmd->command_dma, GFP_KERNEL);
|
||||
if (!cmd->command)
|
||||
|
@ -3888,8 +3727,12 @@ static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req,
|
|||
{
|
||||
struct driver_data *dd = req->q->queuedata;
|
||||
|
||||
if (reserved)
|
||||
goto exit_handler;
|
||||
if (reserved) {
|
||||
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
|
||||
|
||||
cmd->status = -ETIME;
|
||||
return BLK_EH_HANDLED;
|
||||
}
|
||||
|
||||
if (test_bit(req->tag, dd->port->cmds_to_issue))
|
||||
goto exit_handler;
|
||||
|
@ -3982,7 +3825,7 @@ static int mtip_block_initialize(struct driver_data *dd)
|
|||
dd->tags.reserved_tags = 1;
|
||||
dd->tags.cmd_size = sizeof(struct mtip_cmd);
|
||||
dd->tags.numa_node = dd->numa_node;
|
||||
dd->tags.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_NO_SCHED;
|
||||
dd->tags.flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
dd->tags.driver_data = dd;
|
||||
dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS;
|
||||
|
||||
|
@ -4116,20 +3959,10 @@ static int mtip_block_initialize(struct driver_data *dd)
|
|||
|
||||
static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
|
||||
{
|
||||
struct driver_data *dd = (struct driver_data *)data;
|
||||
struct mtip_cmd *cmd;
|
||||
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
|
||||
|
||||
if (likely(!reserv)) {
|
||||
cmd = blk_mq_rq_to_pdu(rq);
|
||||
cmd->status = -ENODEV;
|
||||
blk_mq_complete_request(rq);
|
||||
} else if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &dd->port->flags)) {
|
||||
|
||||
cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
|
||||
if (cmd->comp_func)
|
||||
cmd->comp_func(dd->port, MTIP_TAG_INTERNAL,
|
||||
cmd, -ENODEV);
|
||||
}
|
||||
cmd->status = -ENODEV;
|
||||
blk_mq_complete_request(rq);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -4168,8 +4001,7 @@ static int mtip_block_remove(struct driver_data *dd)
|
|||
* Explicitly wait here for IOs to quiesce,
|
||||
* as mtip_standby_drive usually won't wait for IOs.
|
||||
*/
|
||||
if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS,
|
||||
GFP_KERNEL))
|
||||
if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS))
|
||||
mtip_standby_drive(dd);
|
||||
}
|
||||
else
|
||||
|
|
|
@ -333,16 +333,6 @@ struct mtip_cmd {
|
|||
|
||||
dma_addr_t command_dma; /* corresponding physical address */
|
||||
|
||||
void *comp_data; /* data passed to completion function comp_func() */
|
||||
/*
|
||||
* Completion function called by the ISR upon completion of
|
||||
* a command.
|
||||
*/
|
||||
void (*comp_func)(struct mtip_port *port,
|
||||
int tag,
|
||||
struct mtip_cmd *cmd,
|
||||
int status);
|
||||
|
||||
int scatter_ents; /* Number of scatter list entries used */
|
||||
|
||||
int unaligned; /* command is unaligned on 4k boundary */
|
||||
|
|
|
@ -1396,12 +1396,11 @@ static void nbd_dbg_close(void)
|
|||
|
||||
#endif
|
||||
|
||||
static int nbd_init_request(void *data, struct request *rq,
|
||||
unsigned int hctx_idx, unsigned int request_idx,
|
||||
unsigned int numa_node)
|
||||
static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
|
||||
unsigned int hctx_idx, unsigned int numa_node)
|
||||
{
|
||||
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
|
||||
cmd->nbd = data;
|
||||
cmd->nbd = set->driver_data;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -4307,9 +4307,8 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int rbd_init_request(void *data, struct request *rq,
|
||||
unsigned int hctx_idx, unsigned int request_idx,
|
||||
unsigned int numa_node)
|
||||
static int rbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
|
||||
unsigned int hctx_idx, unsigned int numa_node)
|
||||
{
|
||||
struct work_struct *work = blk_mq_rq_to_pdu(rq);
|
||||
|
||||
|
|
|
@ -573,11 +573,10 @@ static const struct device_attribute dev_attr_cache_type_rw =
|
|||
__ATTR(cache_type, S_IRUGO|S_IWUSR,
|
||||
virtblk_cache_type_show, virtblk_cache_type_store);
|
||||
|
||||
static int virtblk_init_request(void *data, struct request *rq,
|
||||
unsigned int hctx_idx, unsigned int request_idx,
|
||||
unsigned int numa_node)
|
||||
static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
|
||||
unsigned int hctx_idx, unsigned int numa_node)
|
||||
{
|
||||
struct virtio_blk *vblk = data;
|
||||
struct virtio_blk *vblk = set->driver_data;
|
||||
struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
|
||||
|
||||
#ifdef CONFIG_VIRTIO_BLK_SCSI
|
||||
|
|
|
@ -74,7 +74,7 @@ static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
|
|||
|
||||
return 0;
|
||||
err:
|
||||
while (--i > lun_begin)
|
||||
while (--i >= lun_begin)
|
||||
clear_bit(i, dev->lun_map);
|
||||
|
||||
return -EBUSY;
|
||||
|
@ -211,7 +211,7 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
|
|||
|
||||
return tgt_dev;
|
||||
err_ch:
|
||||
while (--i > 0)
|
||||
while (--i >= 0)
|
||||
kfree(dev_map->chnls[i].lun_offs);
|
||||
kfree(luns);
|
||||
err_luns:
|
||||
|
|
|
@ -720,11 +720,10 @@ int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int dm_mq_init_request(void *data, struct request *rq,
|
||||
unsigned int hctx_idx, unsigned int request_idx,
|
||||
unsigned int numa_node)
|
||||
static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
|
||||
unsigned int hctx_idx, unsigned int numa_node)
|
||||
{
|
||||
return __dm_rq_init_rq(data, rq);
|
||||
return __dm_rq_init_rq(set->driver_data, rq);
|
||||
}
|
||||
|
||||
static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
|
|
|
@ -334,10 +334,9 @@ static int ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||
|
||||
}
|
||||
|
||||
static int ubiblock_init_request(void *data, struct request *req,
|
||||
unsigned int hctx_idx,
|
||||
unsigned int request_idx,
|
||||
unsigned int numa_node)
|
||||
static int ubiblock_init_request(struct blk_mq_tag_set *set,
|
||||
struct request *req, unsigned int hctx_idx,
|
||||
unsigned int numa_node)
|
||||
{
|
||||
struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
|
||||
|
||||
|
|
|
@ -1172,12 +1172,12 @@ __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
|
|||
}
|
||||
|
||||
static void
|
||||
nvme_fc_exit_request(void *data, struct request *rq,
|
||||
unsigned int hctx_idx, unsigned int rq_idx)
|
||||
nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
|
||||
unsigned int hctx_idx)
|
||||
{
|
||||
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
|
||||
|
||||
return __nvme_fc_exit_request(data, op);
|
||||
return __nvme_fc_exit_request(set->driver_data, op);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -1434,11 +1434,10 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
|
|||
}
|
||||
|
||||
static int
|
||||
nvme_fc_init_request(void *data, struct request *rq,
|
||||
unsigned int hctx_idx, unsigned int rq_idx,
|
||||
unsigned int numa_node)
|
||||
nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
|
||||
unsigned int hctx_idx, unsigned int numa_node)
|
||||
{
|
||||
struct nvme_fc_ctrl *ctrl = data;
|
||||
struct nvme_fc_ctrl *ctrl = set->driver_data;
|
||||
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
|
||||
struct nvme_fc_queue *queue = &ctrl->queues[hctx_idx+1];
|
||||
|
||||
|
@ -1446,11 +1445,10 @@ nvme_fc_init_request(void *data, struct request *rq,
|
|||
}
|
||||
|
||||
static int
|
||||
nvme_fc_init_admin_request(void *data, struct request *rq,
|
||||
unsigned int hctx_idx, unsigned int rq_idx,
|
||||
unsigned int numa_node)
|
||||
nvme_fc_init_admin_request(struct blk_mq_tag_set *set, struct request *rq,
|
||||
unsigned int hctx_idx, unsigned int numa_node)
|
||||
{
|
||||
struct nvme_fc_ctrl *ctrl = data;
|
||||
struct nvme_fc_ctrl *ctrl = set->driver_data;
|
||||
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
|
||||
struct nvme_fc_queue *queue = &ctrl->queues[0];
|
||||
|
||||
|
|
|
@ -503,6 +503,8 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
|
|||
if (!cmd)
|
||||
return -ENOMEM;
|
||||
|
||||
nvme_nvm_rqtocmd(rq, rqd, ns, cmd);
|
||||
|
||||
rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
|
||||
if (IS_ERR(rq)) {
|
||||
kfree(cmd);
|
||||
|
@ -517,8 +519,6 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
|
|||
rq->__data_len = 0;
|
||||
}
|
||||
|
||||
nvme_nvm_rqtocmd(rq, rqd, ns, cmd);
|
||||
|
||||
rq->end_io_data = rqd;
|
||||
|
||||
blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
|
||||
|
|
|
@ -356,11 +356,11 @@ static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_i
|
|||
nvmeq->tags = NULL;
|
||||
}
|
||||
|
||||
static int nvme_admin_init_request(void *data, struct request *req,
|
||||
unsigned int hctx_idx, unsigned int rq_idx,
|
||||
unsigned int numa_node)
|
||||
static int nvme_admin_init_request(struct blk_mq_tag_set *set,
|
||||
struct request *req, unsigned int hctx_idx,
|
||||
unsigned int numa_node)
|
||||
{
|
||||
struct nvme_dev *dev = data;
|
||||
struct nvme_dev *dev = set->driver_data;
|
||||
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
||||
struct nvme_queue *nvmeq = dev->queues[0];
|
||||
|
||||
|
@ -383,11 +383,10 @@ static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int nvme_init_request(void *data, struct request *req,
|
||||
unsigned int hctx_idx, unsigned int rq_idx,
|
||||
unsigned int numa_node)
|
||||
static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
|
||||
unsigned int hctx_idx, unsigned int numa_node)
|
||||
{
|
||||
struct nvme_dev *dev = data;
|
||||
struct nvme_dev *dev = set->driver_data;
|
||||
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
||||
struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1];
|
||||
|
||||
|
|
|
@ -315,16 +315,16 @@ static void __nvme_rdma_exit_request(struct nvme_rdma_ctrl *ctrl,
|
|||
DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
static void nvme_rdma_exit_request(void *data, struct request *rq,
|
||||
unsigned int hctx_idx, unsigned int rq_idx)
|
||||
static void nvme_rdma_exit_request(struct blk_mq_tag_set *set,
|
||||
struct request *rq, unsigned int hctx_idx)
|
||||
{
|
||||
return __nvme_rdma_exit_request(data, rq, hctx_idx + 1);
|
||||
return __nvme_rdma_exit_request(set->driver_data, rq, hctx_idx + 1);
|
||||
}
|
||||
|
||||
static void nvme_rdma_exit_admin_request(void *data, struct request *rq,
|
||||
unsigned int hctx_idx, unsigned int rq_idx)
|
||||
static void nvme_rdma_exit_admin_request(struct blk_mq_tag_set *set,
|
||||
struct request *rq, unsigned int hctx_idx)
|
||||
{
|
||||
return __nvme_rdma_exit_request(data, rq, 0);
|
||||
return __nvme_rdma_exit_request(set->driver_data, rq, 0);
|
||||
}
|
||||
|
||||
static int __nvme_rdma_init_request(struct nvme_rdma_ctrl *ctrl,
|
||||
|
@ -358,18 +358,18 @@ static int __nvme_rdma_init_request(struct nvme_rdma_ctrl *ctrl,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int nvme_rdma_init_request(void *data, struct request *rq,
|
||||
unsigned int hctx_idx, unsigned int rq_idx,
|
||||
unsigned int numa_node)
|
||||
static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
|
||||
struct request *rq, unsigned int hctx_idx,
|
||||
unsigned int numa_node)
|
||||
{
|
||||
return __nvme_rdma_init_request(data, rq, hctx_idx + 1);
|
||||
return __nvme_rdma_init_request(set->driver_data, rq, hctx_idx + 1);
|
||||
}
|
||||
|
||||
static int nvme_rdma_init_admin_request(void *data, struct request *rq,
|
||||
unsigned int hctx_idx, unsigned int rq_idx,
|
||||
unsigned int numa_node)
|
||||
static int nvme_rdma_init_admin_request(struct blk_mq_tag_set *set,
|
||||
struct request *rq, unsigned int hctx_idx,
|
||||
unsigned int numa_node)
|
||||
{
|
||||
return __nvme_rdma_init_request(data, rq, 0);
|
||||
return __nvme_rdma_init_request(set->driver_data, rq, 0);
|
||||
}
|
||||
|
||||
static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
|
||||
|
|
|
@ -230,18 +230,19 @@ static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int nvme_loop_init_request(void *data, struct request *req,
|
||||
unsigned int hctx_idx, unsigned int rq_idx,
|
||||
unsigned int numa_node)
|
||||
static int nvme_loop_init_request(struct blk_mq_tag_set *set,
|
||||
struct request *req, unsigned int hctx_idx,
|
||||
unsigned int numa_node)
|
||||
{
|
||||
return nvme_loop_init_iod(data, blk_mq_rq_to_pdu(req), hctx_idx + 1);
|
||||
return nvme_loop_init_iod(set->driver_data, blk_mq_rq_to_pdu(req),
|
||||
hctx_idx + 1);
|
||||
}
|
||||
|
||||
static int nvme_loop_init_admin_request(void *data, struct request *req,
|
||||
unsigned int hctx_idx, unsigned int rq_idx,
|
||||
unsigned int numa_node)
|
||||
static int nvme_loop_init_admin_request(struct blk_mq_tag_set *set,
|
||||
struct request *req, unsigned int hctx_idx,
|
||||
unsigned int numa_node)
|
||||
{
|
||||
return nvme_loop_init_iod(data, blk_mq_rq_to_pdu(req), 0);
|
||||
return nvme_loop_init_iod(set->driver_data, blk_mq_rq_to_pdu(req), 0);
|
||||
}
|
||||
|
||||
static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
|
||||
|
|
|
@ -1999,11 +1999,10 @@ static enum blk_eh_timer_return scsi_timeout(struct request *req,
|
|||
return scsi_times_out(req);
|
||||
}
|
||||
|
||||
static int scsi_init_request(void *data, struct request *rq,
|
||||
unsigned int hctx_idx, unsigned int request_idx,
|
||||
unsigned int numa_node)
|
||||
static int scsi_init_request(struct blk_mq_tag_set *set, struct request *rq,
|
||||
unsigned int hctx_idx, unsigned int numa_node)
|
||||
{
|
||||
struct Scsi_Host *shost = data;
|
||||
struct Scsi_Host *shost = set->driver_data;
|
||||
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
|
||||
|
||||
cmd->sense_buffer =
|
||||
|
@ -2014,10 +2013,10 @@ static int scsi_init_request(void *data, struct request *rq,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void scsi_exit_request(void *data, struct request *rq,
|
||||
unsigned int hctx_idx, unsigned int request_idx)
|
||||
static void scsi_exit_request(struct blk_mq_tag_set *set, struct request *rq,
|
||||
unsigned int hctx_idx)
|
||||
{
|
||||
struct Scsi_Host *shost = data;
|
||||
struct Scsi_Host *shost = set->driver_data;
|
||||
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
|
||||
|
||||
scsi_free_sense_buffer(shost, cmd->sense_buffer);
|
||||
|
|
|
@ -139,7 +139,7 @@ struct nfs_mount_request {
|
|||
};
|
||||
|
||||
struct nfs_mount_info {
|
||||
int (*fill_super)(struct super_block *, struct nfs_mount_info *);
|
||||
void (*fill_super)(struct super_block *, struct nfs_mount_info *);
|
||||
int (*set_security)(struct super_block *, struct dentry *, struct nfs_mount_info *);
|
||||
struct nfs_parsed_mount_data *parsed;
|
||||
struct nfs_clone_mount *cloned;
|
||||
|
@ -407,7 +407,7 @@ struct dentry *nfs_fs_mount(struct file_system_type *, int, const char *, void *
|
|||
struct dentry * nfs_xdev_mount_common(struct file_system_type *, int,
|
||||
const char *, struct nfs_mount_info *);
|
||||
void nfs_kill_super(struct super_block *);
|
||||
int nfs_fill_super(struct super_block *, struct nfs_mount_info *);
|
||||
void nfs_fill_super(struct super_block *, struct nfs_mount_info *);
|
||||
|
||||
extern struct rpc_stat nfs_rpcstat;
|
||||
|
||||
|
@ -458,7 +458,7 @@ extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
|
|||
extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
|
||||
|
||||
/* super.c */
|
||||
int nfs_clone_super(struct super_block *, struct nfs_mount_info *);
|
||||
void nfs_clone_super(struct super_block *, struct nfs_mount_info *);
|
||||
void nfs_umount_begin(struct super_block *);
|
||||
int nfs_statfs(struct dentry *, struct kstatfs *);
|
||||
int nfs_show_options(struct seq_file *, struct dentry *);
|
||||
|
|
|
@ -2321,11 +2321,10 @@ inline void nfs_initialise_sb(struct super_block *sb)
|
|||
/*
|
||||
* Finish setting up an NFS2/3 superblock
|
||||
*/
|
||||
int nfs_fill_super(struct super_block *sb, struct nfs_mount_info *mount_info)
|
||||
void nfs_fill_super(struct super_block *sb, struct nfs_mount_info *mount_info)
|
||||
{
|
||||
struct nfs_parsed_mount_data *data = mount_info->parsed;
|
||||
struct nfs_server *server = NFS_SB(sb);
|
||||
int ret;
|
||||
|
||||
sb->s_blocksize_bits = 0;
|
||||
sb->s_blocksize = 0;
|
||||
|
@ -2343,21 +2342,13 @@ int nfs_fill_super(struct super_block *sb, struct nfs_mount_info *mount_info)
|
|||
}
|
||||
|
||||
nfs_initialise_sb(sb);
|
||||
|
||||
ret = super_setup_bdi_name(sb, "%u:%u", MAJOR(server->s_dev),
|
||||
MINOR(server->s_dev));
|
||||
if (ret)
|
||||
return ret;
|
||||
sb->s_bdi->ra_pages = server->rpages * NFS_MAX_READAHEAD;
|
||||
return 0;
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nfs_fill_super);
|
||||
|
||||
/*
|
||||
* Finish setting up a cloned NFS2/3/4 superblock
|
||||
*/
|
||||
int nfs_clone_super(struct super_block *sb, struct nfs_mount_info *mount_info)
|
||||
void nfs_clone_super(struct super_block *sb, struct nfs_mount_info *mount_info)
|
||||
{
|
||||
const struct super_block *old_sb = mount_info->cloned->sb;
|
||||
struct nfs_server *server = NFS_SB(sb);
|
||||
|
@ -2377,10 +2368,6 @@ int nfs_clone_super(struct super_block *sb, struct nfs_mount_info *mount_info)
|
|||
}
|
||||
|
||||
nfs_initialise_sb(sb);
|
||||
|
||||
sb->s_bdi = bdi_get(old_sb->s_bdi);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nfs_compare_mount_options(const struct super_block *s, const struct nfs_server *b, int flags)
|
||||
|
@ -2600,14 +2587,19 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server,
|
|||
nfs_free_server(server);
|
||||
server = NULL;
|
||||
} else {
|
||||
error = super_setup_bdi_name(s, "%u:%u", MAJOR(server->s_dev),
|
||||
MINOR(server->s_dev));
|
||||
if (error) {
|
||||
mntroot = ERR_PTR(error);
|
||||
goto error_splat_super;
|
||||
}
|
||||
s->s_bdi->ra_pages = server->rpages * NFS_MAX_READAHEAD;
|
||||
server->super = s;
|
||||
}
|
||||
|
||||
if (!s->s_root) {
|
||||
/* initial superblock/root creation */
|
||||
error = mount_info->fill_super(s, mount_info);
|
||||
if (error)
|
||||
goto error_splat_super;
|
||||
mount_info->fill_super(s, mount_info);
|
||||
nfs_get_cache_cookie(s, mount_info->parsed, mount_info->cloned);
|
||||
}
|
||||
|
||||
|
|
|
@ -57,6 +57,11 @@ struct blk_mq_hw_ctx {
|
|||
unsigned long poll_considered;
|
||||
unsigned long poll_invoked;
|
||||
unsigned long poll_success;
|
||||
|
||||
#ifdef CONFIG_BLK_DEBUG_FS
|
||||
struct dentry *debugfs_dir;
|
||||
struct dentry *sched_debugfs_dir;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct blk_mq_tag_set {
|
||||
|
@ -86,9 +91,9 @@ typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data
|
|||
typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
|
||||
typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
|
||||
typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
|
||||
typedef int (init_request_fn)(void *, struct request *, unsigned int,
|
||||
typedef int (init_request_fn)(struct blk_mq_tag_set *set, struct request *,
|
||||
unsigned int, unsigned int);
|
||||
typedef void (exit_request_fn)(void *, struct request *, unsigned int,
|
||||
typedef void (exit_request_fn)(struct blk_mq_tag_set *set, struct request *,
|
||||
unsigned int);
|
||||
typedef int (reinit_request_fn)(void *, struct request *);
|
||||
|
||||
|
|
|
@ -579,7 +579,7 @@ struct request_queue {
|
|||
|
||||
#ifdef CONFIG_BLK_DEBUG_FS
|
||||
struct dentry *debugfs_dir;
|
||||
struct dentry *mq_debugfs_dir;
|
||||
struct dentry *sched_debugfs_dir;
|
||||
#endif
|
||||
|
||||
bool mq_sysfs_init_done;
|
||||
|
|
|
@ -8,6 +8,9 @@
|
|||
|
||||
struct io_cq;
|
||||
struct elevator_type;
|
||||
#ifdef CONFIG_BLK_DEBUG_FS
|
||||
struct blk_mq_debugfs_attr;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Return values from elevator merger
|
||||
|
@ -144,6 +147,10 @@ struct elevator_type
|
|||
char elevator_name[ELV_NAME_MAX];
|
||||
struct module *elevator_owner;
|
||||
bool uses_mq;
|
||||
#ifdef CONFIG_BLK_DEBUG_FS
|
||||
const struct blk_mq_debugfs_attr *queue_debugfs_attrs;
|
||||
const struct blk_mq_debugfs_attr *hctx_debugfs_attrs;
|
||||
#endif
|
||||
|
||||
/* managed by elevator core */
|
||||
char icq_cache_name[ELV_NAME_MAX + 5]; /* elvname + "_io_cq" */
|
||||
|
@ -214,7 +221,6 @@ extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
|
|||
|
||||
extern int elevator_init(struct request_queue *, char *);
|
||||
extern void elevator_exit(struct request_queue *, struct elevator_queue *);
|
||||
extern int elevator_change(struct request_queue *, const char *);
|
||||
extern bool elv_bio_merge_ok(struct request *, struct bio *);
|
||||
extern struct elevator_queue *elevator_alloc(struct request_queue *,
|
||||
struct elevator_type *);
|
||||
|
|
Loading…
Reference in New Issue
Block a user