nvme: move common call to nvme_cleanup_cmd to core layer
authorMax Gurtovoy <maxg@mellanox.com>
Sun, 13 Oct 2019 16:57:36 +0000 (19:57 +0300)
committerJens Axboe <axboe@kernel.dk>
Mon, 4 Nov 2019 17:56:41 +0000 (10:56 -0700)
nvme_cleanup_cmd should be called for each call to nvme_setup_cmd
(symmetrical functions). Move the call for nvme_cleanup_cmd to the common
core layer and call it during nvme_complete_rq for the good flow. For
error flow, each transport will call nvme_cleanup_cmd independently. Also
take care of a special case of path failure, where we call
nvme_complete_rq without doing nvme_setup_cmd.

Signed-off-by: Max Gurtovoy <maxg@mellanox.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/target/loop.c

index dfa122b..9c74361 100644 (file)
@@ -268,6 +268,8 @@ void nvme_complete_rq(struct request *req)
 
        trace_nvme_complete_rq(req);
 
+       nvme_cleanup_cmd(req);
+
        if (nvme_req(req)->ctrl->kas)
                nvme_req(req)->ctrl->comp_seen = true;
 
index 714a1c3..679a721 100644 (file)
@@ -2173,8 +2173,6 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
        fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
                        rq_dma_dir(rq));
 
-       nvme_cleanup_cmd(rq);
-
        sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE);
 
        freq->sg_cnt = 0;
@@ -2305,6 +2303,7 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
                if (!(op->flags & FCOP_FLAGS_AEN))
                        nvme_fc_unmap_data(ctrl, op->rq, op);
 
+               nvme_cleanup_cmd(op->rq);
                nvme_fc_ctrl_put(ctrl);
 
                if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
index 7082116..612f922 100644 (file)
@@ -924,7 +924,6 @@ static void nvme_pci_complete_rq(struct request *req)
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
        struct nvme_dev *dev = iod->nvmeq->dev;
 
-       nvme_cleanup_cmd(req);
        if (blk_integrity_rq(req))
                dma_unmap_page(dev->dev, iod->meta_dma,
                               rq_integrity_vec(req)->bv_len, rq_data_dir(req));
index 154fa4e..05f2dfa 100644 (file)
@@ -1160,8 +1160,6 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
        }
 
        ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq));
-
-       nvme_cleanup_cmd(rq);
        sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE);
 }
 
@@ -1760,7 +1758,6 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
        if (unlikely(err < 0)) {
                dev_err(queue->ctrl->ctrl.device,
                             "Failed to map data (%d)\n", err);
-               nvme_cleanup_cmd(rq);
                goto err;
        }
 
@@ -1771,18 +1768,19 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
 
        err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
                        req->mr ? &req->reg_wr.wr : NULL);
-       if (unlikely(err)) {
-               nvme_rdma_unmap_data(queue, rq);
-               goto err;
-       }
+       if (unlikely(err))
+               goto err_unmap;
 
        return BLK_STS_OK;
 
+err_unmap:
+       nvme_rdma_unmap_data(queue, rq);
 err:
        if (err == -ENOMEM || err == -EAGAIN)
                ret = BLK_STS_RESOURCE;
        else
                ret = BLK_STS_IOERR;
+       nvme_cleanup_cmd(rq);
 unmap_qe:
        ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command),
                            DMA_TO_DEVICE);
index bd1f81f..5b7b197 100644 (file)
@@ -76,7 +76,6 @@ static void nvme_loop_complete_rq(struct request *req)
 {
        struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
 
-       nvme_cleanup_cmd(req);
        sg_free_table_chained(&iod->sg_table, SG_CHUNK_SIZE);
        nvme_complete_rq(req);
 }