nvme-fc: Avoid preallocating big SGL for data
authorIsrael Rukshin <israelr@mellanox.com>
Sun, 24 Nov 2019 16:38:31 +0000 (18:38 +0200)
committerKeith Busch <kbusch@kernel.org>
Tue, 26 Nov 2019 17:14:01 +0000 (02:14 +0900)
nvme_fc_create_io_queues() preallocates a big buffer for the IO SGL based
on SG_CHUNK_SIZE.

Modern DMA engines are often capable of dealing with very big segments so
the SG_CHUNK_SIZE is often too big. SG_CHUNK_SIZE results in a static 4KB
SGL allocation per command.

If a controller has lots of deep queues, preallocation for the sg list can
consume substantial amounts of memory. For nvme-fc, nr_hw_queues can be
128 and each queue's depth 128. This means the resulting preallocation
for the data SGL is 128*128*4K = 64MB per controller.

Switch to runtime allocation for SGL for lists longer than 2 entries. This
is the approach used by NVMe PCI so it should be reasonable for NVMeOF as
well. Runtime SGL allocation has always been the case for the legacy I/O
path so this is nothing new.

Reviewed-by: Max Gurtovoy <maxg@mellanox.com>
Reviewed-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Israel Rukshin <israelr@mellanox.com>
Signed-off-by: Keith Busch <kbusch@kernel.org>
drivers/nvme/host/fc.c

index 679a721..13cb00e 100644 (file)
@@ -95,7 +95,7 @@ struct nvme_fc_fcp_op {
 
 struct nvme_fcp_op_w_sgl {
        struct nvme_fc_fcp_op   op;
-       struct scatterlist      sgl[SG_CHUNK_SIZE];
+       struct scatterlist      sgl[NVME_INLINE_SG_CNT];
        uint8_t                 priv[0];
 };
 
@@ -2141,7 +2141,7 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
        freq->sg_table.sgl = freq->first_sgl;
        ret = sg_alloc_table_chained(&freq->sg_table,
                        blk_rq_nr_phys_segments(rq), freq->sg_table.sgl,
-                       SG_CHUNK_SIZE);
+                       NVME_INLINE_SG_CNT);
        if (ret)
                return -ENOMEM;
 
@@ -2150,7 +2150,7 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
        freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
                                op->nents, rq_dma_dir(rq));
        if (unlikely(freq->sg_cnt <= 0)) {
-               sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE);
+               sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
                freq->sg_cnt = 0;
                return -EFAULT;
        }
@@ -2173,7 +2173,7 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
        fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
                        rq_dma_dir(rq));
 
-       sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE);
+       sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
 
        freq->sg_cnt = 0;
 }