nvme-fabrics: allow nvmf_connect_io_queue to poll
authorSagi Grimberg <sagi@grimberg.me>
Fri, 14 Dec 2018 19:06:08 +0000 (11:06 -0800)
committerChristoph Hellwig <hch@lst.de>
Tue, 18 Dec 2018 16:50:48 +0000 (17:50 +0100)
Preparation for polling support for fabrics. Polling support
means that our completion queues are not generating any interrupts
which means we need to poll for the nvmf io queue connect as well.

Reviewed by Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Christoph Hellwig <hch@lst.de>
drivers/nvme/host/fabrics.c
drivers/nvme/host/fabrics.h
drivers/nvme/host/fc.c
drivers/nvme/host/rdma.c
drivers/nvme/host/tcp.c
drivers/nvme/target/loop.c

index d93a169..5ff14f4 100644 (file)
@@ -441,7 +441,7 @@ EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue);
  *     > 0: NVMe error status code
  *     < 0: Linux errno error code
  */
-int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
+int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll)
 {
        struct nvme_command cmd;
        struct nvmf_connect_data *data;
@@ -468,7 +468,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
 
        ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
                        data, sizeof(*data), 0, qid, 1,
-                       BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, false);
+                       BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, poll);
        if (ret) {
                nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
                                       &cmd, data);
index 81b8fd1..cad70a9 100644 (file)
@@ -168,7 +168,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
 int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
 int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
 int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl);
-int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid);
+int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll);
 int nvmf_register_transport(struct nvmf_transport_ops *ops);
 void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
 void nvmf_free_options(struct nvmf_ctrl_options *opts);
index b79e419..89accc7 100644 (file)
@@ -1975,7 +1975,7 @@ nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
                                        (qsize / 5));
                if (ret)
                        break;
-               ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
+               ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
                if (ret)
                        break;
 
index ed726da..b907ed4 100644 (file)
@@ -598,7 +598,7 @@ static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
        int ret;
 
        if (idx)
-               ret = nvmf_connect_io_queue(&ctrl->ctrl, idx);
+               ret = nvmf_connect_io_queue(&ctrl->ctrl, idx, false);
        else
                ret = nvmf_connect_admin_queue(&ctrl->ctrl);
 
index f49cbe9..de17491 100644 (file)
@@ -1394,7 +1394,7 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
        int ret;
 
        if (idx)
-               ret = nvmf_connect_io_queue(nctrl, idx);
+               ret = nvmf_connect_io_queue(nctrl, idx, false);
        else
                ret = nvmf_connect_admin_queue(nctrl);
 
index 9908082..4aac1b4 100644 (file)
@@ -345,7 +345,7 @@ static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
        int i, ret;
 
        for (i = 1; i < ctrl->ctrl.queue_count; i++) {
-               ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
+               ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
                if (ret)
                        return ret;
                set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);