Merge tag 'exfat-for-5.20-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/linki...
[linux-2.6-microblaze.git] / drivers / nvme / host / tcp.c
index b95ee85..e82dcfc 100644 (file)
@@ -209,9 +209,11 @@ static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
        return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
 }
 
-static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
+static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request *req)
 {
-       return queue->cmnd_capsule_len - sizeof(struct nvme_command);
+       if (nvme_is_fabrics(req->req.cmd))
+               return NVME_TCP_ADMIN_CCSZ;
+       return req->queue->cmnd_capsule_len - sizeof(struct nvme_command);
 }
 
 static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
@@ -229,7 +231,7 @@ static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
        rq = blk_mq_rq_from_pdu(req);
 
        return rq_data_dir(rq) == WRITE && req->data_len &&
-               req->data_len <= nvme_tcp_inline_data_size(req->queue);
+               req->data_len <= nvme_tcp_inline_data_size(req);
 }
 
 static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
@@ -1685,45 +1687,49 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
        return ret;
 }
 
-static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
-               bool admin)
+static int nvme_tcp_alloc_admin_tag_set(struct nvme_ctrl *nctrl)
 {
        struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
-       struct blk_mq_tag_set *set;
+       struct blk_mq_tag_set *set = &ctrl->admin_tag_set;
        int ret;
 
-       if (admin) {
-               set = &ctrl->admin_tag_set;
-               memset(set, 0, sizeof(*set));
-               set->ops = &nvme_tcp_admin_mq_ops;
-               set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
-               set->reserved_tags = NVMF_RESERVED_TAGS;
-               set->numa_node = nctrl->numa_node;
-               set->flags = BLK_MQ_F_BLOCKING;
-               set->cmd_size = sizeof(struct nvme_tcp_request);
-               set->driver_data = ctrl;
-               set->nr_hw_queues = 1;
-               set->timeout = NVME_ADMIN_TIMEOUT;
-       } else {
-               set = &ctrl->tag_set;
-               memset(set, 0, sizeof(*set));
-               set->ops = &nvme_tcp_mq_ops;
-               set->queue_depth = nctrl->sqsize + 1;
-               set->reserved_tags = NVMF_RESERVED_TAGS;
-               set->numa_node = nctrl->numa_node;
-               set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
-               set->cmd_size = sizeof(struct nvme_tcp_request);
-               set->driver_data = ctrl;
-               set->nr_hw_queues = nctrl->queue_count - 1;
-               set->timeout = NVME_IO_TIMEOUT;
-               set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
-       }
-
+       memset(set, 0, sizeof(*set));
+       set->ops = &nvme_tcp_admin_mq_ops;
+       set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
+       set->reserved_tags = NVMF_RESERVED_TAGS;
+       set->numa_node = nctrl->numa_node;
+       set->flags = BLK_MQ_F_BLOCKING;
+       set->cmd_size = sizeof(struct nvme_tcp_request);
+       set->driver_data = ctrl;
+       set->nr_hw_queues = 1;
+       set->timeout = NVME_ADMIN_TIMEOUT;
        ret = blk_mq_alloc_tag_set(set);
-       if (ret)
-               return ERR_PTR(ret);
+       if (!ret)
+               nctrl->admin_tagset = set;
+       return ret;
+}
 
-       return set;
+static int nvme_tcp_alloc_tag_set(struct nvme_ctrl *nctrl)
+{
+       struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
+       struct blk_mq_tag_set *set = &ctrl->tag_set;
+       int ret;
+
+       memset(set, 0, sizeof(*set));
+       set->ops = &nvme_tcp_mq_ops;
+       set->queue_depth = nctrl->sqsize + 1;
+       set->reserved_tags = NVMF_RESERVED_TAGS;
+       set->numa_node = nctrl->numa_node;
+       set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
+       set->cmd_size = sizeof(struct nvme_tcp_request);
+       set->driver_data = ctrl;
+       set->nr_hw_queues = nctrl->queue_count - 1;
+       set->timeout = NVME_IO_TIMEOUT;
+       set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
+       ret = blk_mq_alloc_tag_set(set);
+       if (!ret)
+               nctrl->tagset = set;
+       return ret;
 }
 
 static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
@@ -1899,11 +1905,9 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
                return ret;
 
        if (new) {
-               ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
-               if (IS_ERR(ctrl->tagset)) {
-                       ret = PTR_ERR(ctrl->tagset);
+               ret = nvme_tcp_alloc_tag_set(ctrl);
+               if (ret)
                        goto out_free_io_queues;
-               }
 
                ret = nvme_ctrl_init_connect_q(ctrl);
                if (ret)
@@ -1968,11 +1972,9 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
                return error;
 
        if (new) {
-               ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
-               if (IS_ERR(ctrl->admin_tagset)) {
-                       error = PTR_ERR(ctrl->admin_tagset);
+               error = nvme_tcp_alloc_admin_tag_set(ctrl);
+               if (error)
                        goto out_free_queue;
-               }
 
                ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
                if (IS_ERR(ctrl->fabrics_q)) {
@@ -2173,6 +2175,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
                                struct nvme_tcp_ctrl, err_work);
        struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
 
+       nvme_auth_stop(ctrl);
        nvme_stop_keep_alive(ctrl);
        flush_work(&ctrl->async_event_work);
        nvme_tcp_teardown_io_queues(ctrl, false);
@@ -2371,7 +2374,7 @@ static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
        if (!blk_rq_nr_phys_segments(rq))
                nvme_tcp_set_sg_null(c);
        else if (rq_data_dir(rq) == WRITE &&
-           req->data_len <= nvme_tcp_inline_data_size(queue))
+           req->data_len <= nvme_tcp_inline_data_size(req))
                nvme_tcp_set_sg_inline(queue, c, req->data_len);
        else
                nvme_tcp_set_sg_host_data(c, req->data_len);
@@ -2406,7 +2409,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
                nvme_tcp_init_iter(req, rq_data_dir(rq));
 
        if (rq_data_dir(rq) == WRITE &&
-           req->data_len <= nvme_tcp_inline_data_size(queue))
+           req->data_len <= nvme_tcp_inline_data_size(req))
                req->pdu_len = req->data_len;
 
        pdu->hdr.type = nvme_tcp_cmd;