Revert "nvmet-rdma: use a private workqueue for delete"

This reverts commit 2acf70ade7.

The commit never really fixed the intended issue and caused all
kinds of other issues, including a use before initialization.

Suggested-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2018-11-07 09:20:25 +01:00 committed by Jens Axboe
parent 8f676b8508
commit d39aa49792

View File

@ -122,7 +122,6 @@ struct nvmet_rdma_device {
int inline_page_count; int inline_page_count;
}; };
static struct workqueue_struct *nvmet_rdma_delete_wq;
static bool nvmet_rdma_use_srq; static bool nvmet_rdma_use_srq;
module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444); module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
MODULE_PARM_DESC(use_srq, "Use shared receive queue."); MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
@ -1274,12 +1273,12 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
if (queue->host_qid == 0) { if (queue->host_qid == 0) {
/* Let inflight controller teardown complete */ /* Let inflight controller teardown complete */
flush_workqueue(nvmet_rdma_delete_wq); flush_scheduled_work();
} }
ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
if (ret) { if (ret) {
queue_work(nvmet_rdma_delete_wq, &queue->release_work); schedule_work(&queue->release_work);
/* Destroying rdma_cm id is not needed here */ /* Destroying rdma_cm id is not needed here */
return 0; return 0;
} }
@ -1344,7 +1343,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
if (disconnect) { if (disconnect) {
rdma_disconnect(queue->cm_id); rdma_disconnect(queue->cm_id);
queue_work(nvmet_rdma_delete_wq, &queue->release_work); schedule_work(&queue->release_work);
} }
} }
@ -1374,7 +1373,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
mutex_unlock(&nvmet_rdma_queue_mutex); mutex_unlock(&nvmet_rdma_queue_mutex);
pr_err("failed to connect queue %d\n", queue->idx); pr_err("failed to connect queue %d\n", queue->idx);
queue_work(nvmet_rdma_delete_wq, &queue->release_work); schedule_work(&queue->release_work);
} }
/** /**
@ -1656,17 +1655,8 @@ static int __init nvmet_rdma_init(void)
if (ret) if (ret)
goto err_ib_client; goto err_ib_client;
nvmet_rdma_delete_wq = alloc_workqueue("nvmet-rdma-delete-wq",
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
if (!nvmet_rdma_delete_wq) {
ret = -ENOMEM;
goto err_unreg_transport;
}
return 0; return 0;
err_unreg_transport:
nvmet_unregister_transport(&nvmet_rdma_ops);
err_ib_client: err_ib_client:
ib_unregister_client(&nvmet_rdma_ib_client); ib_unregister_client(&nvmet_rdma_ib_client);
return ret; return ret;
@ -1674,7 +1664,6 @@ static int __init nvmet_rdma_init(void)
static void __exit nvmet_rdma_exit(void) static void __exit nvmet_rdma_exit(void)
{ {
destroy_workqueue(nvmet_rdma_delete_wq);
nvmet_unregister_transport(&nvmet_rdma_ops); nvmet_unregister_transport(&nvmet_rdma_ops);
ib_unregister_client(&nvmet_rdma_ib_client); ib_unregister_client(&nvmet_rdma_ib_client);
WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list)); WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));