nvme-pci: use mapped entries for sgl decision
authorKeith Busch <kbusch@kernel.org>
Thu, 5 Jan 2023 20:28:31 +0000 (12:28 -0800)
committerChristoph Hellwig <hch@lst.de>
Wed, 1 Feb 2023 13:21:59 +0000 (14:21 +0100)
The driver uses the dma entries for setting up its command's SGL/PRP
lists. The dma mapping might have fewer entries than the physical
segments, so check the dma mapped count to determine which nvme data
layout method is more optimal.

Signed-off-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
drivers/nvme/host/pci.c

index 9d6d171..5951a51 100644 (file)
@@ -515,10 +515,10 @@ static void **nvme_pci_iod_list(struct request *req)
        return (void **)(iod->sgt.sgl + blk_rq_nr_phys_segments(req));
 }
 
-static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
+static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req,
+                                    int nseg)
 {
        struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
-       int nseg = blk_rq_nr_phys_segments(req);
        unsigned int avg_seg_size;
 
        avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
@@ -818,7 +818,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
                goto out_free_sg;
        }
 
-       iod->use_sgl = nvme_pci_use_sgls(dev, req);
+       iod->use_sgl = nvme_pci_use_sgls(dev, req, iod->sgt.nents);
        if (iod->use_sgl)
                ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw);
        else