forked from luck/tmp_suning_uos_patched
nvmet: release the sq ref on rdma read errors
On rdma read errors, release the sq ref that was taken when the req was initialized. This avoids a hang in nvmet_sq_destroy() when the queue is being freed. Signed-off-by: Vijay Immanuel <vijayi@attalasystems.com> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
4b8ba5fa52
commit
549f01ae7b
|
@ -529,6 +529,12 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(nvmet_req_init);
|
||||
|
||||
void nvmet_req_uninit(struct nvmet_req *req)
|
||||
{
|
||||
percpu_ref_put(&req->sq->ref);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvmet_req_uninit);
|
||||
|
||||
static inline bool nvmet_cc_en(u32 cc)
|
||||
{
|
||||
return cc & 0x1;
|
||||
|
|
|
@ -261,6 +261,7 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
|
|||
|
||||
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
|
||||
struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops);
|
||||
void nvmet_req_uninit(struct nvmet_req *req);
|
||||
void nvmet_req_complete(struct nvmet_req *req, u16 status);
|
||||
|
||||
void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
|
||||
|
|
|
@ -567,6 +567,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
|
|||
rsp->n_rdma = 0;
|
||||
|
||||
if (unlikely(wc->status != IB_WC_SUCCESS)) {
|
||||
nvmet_req_uninit(&rsp->req);
|
||||
nvmet_rdma_release_rsp(rsp);
|
||||
if (wc->status != IB_WC_WR_FLUSH_ERR) {
|
||||
pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n",
|
||||
|
|
Loading…
Reference in New Issue
Block a user