Merge tag 'gpio-fixes-for-v5.13-rc3' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / drivers / nvme / host / core.c
index f2d1f2d..762125f 100644 (file)
@@ -589,9 +589,6 @@ EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU);
 
 static inline void nvme_clear_nvme_request(struct request *req)
 {
-       struct nvme_command *cmd = nvme_req(req)->cmd;
-
-       memset(cmd, 0, sizeof(*cmd));
        nvme_req(req)->retries = 0;
        nvme_req(req)->flags = 0;
        req->rq_flags |= RQF_DONTPREP;
@@ -642,6 +639,66 @@ static struct request *nvme_alloc_request_qid(struct request_queue *q,
        return req;
 }
 
+/*
+ * For something we're not in a state to send to the device the default action
+ * is to busy it and retry it after the controller state is recovered.  However,
+ * if the controller is deleting or if anything is marked for failfast or
+ * nvme multipath it is immediately failed.
+ *
+ * Note: commands used to initialize the controller will be marked for failfast.
+ * Note: nvme cli/ioctl commands are marked for failfast.
+ */
+blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
+               struct request *rq)
+{
+       if (ctrl->state != NVME_CTRL_DELETING_NOIO &&
+           ctrl->state != NVME_CTRL_DEAD &&
+           !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
+           !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
+               return BLK_STS_RESOURCE;
+       return nvme_host_path_error(rq);
+}
+EXPORT_SYMBOL_GPL(nvme_fail_nonready_command);
+
+bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
+               bool queue_live)
+{
+       struct nvme_request *req = nvme_req(rq);
+
+       /*
+        * currently we have a problem sending passthru commands
+        * on the admin_q if the controller is not LIVE because we can't
+        * make sure that they are going out after the admin connect,
+        * controller enable and/or other commands in the initialization
+        * sequence. until the controller will be LIVE, fail with
+        * BLK_STS_RESOURCE so that they will be rescheduled.
+        */
+       if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD))
+               return false;
+
+       if (ctrl->ops->flags & NVME_F_FABRICS) {
+               /*
+                * Only allow commands on a live queue, except for the connect
+                * command, which is require to set the queue live in the
+                * appropinquate states.
+                */
+               switch (ctrl->state) {
+               case NVME_CTRL_CONNECTING:
+                       if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
+                           req->cmd->fabrics.fctype == nvme_fabrics_type_connect)
+                               return true;
+                       break;
+               default:
+                       break;
+               case NVME_CTRL_DEAD:
+                       return false;
+               }
+       }
+
+       return queue_live;
+}
+EXPORT_SYMBOL_GPL(__nvme_check_ready);
+
 static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
 {
        struct nvme_command c;
@@ -903,8 +960,10 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
        struct nvme_command *cmd = nvme_req(req)->cmd;
        blk_status_t ret = BLK_STS_OK;
 
-       if (!(req->rq_flags & RQF_DONTPREP))
+       if (!(req->rq_flags & RQF_DONTPREP)) {
                nvme_clear_nvme_request(req);
+               memset(cmd, 0, sizeof(*cmd));
+       }
 
        switch (req_op(req)) {
        case REQ_OP_DRV_IN:
@@ -2842,7 +2901,7 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
                ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
        }
 
-       ret = nvme_mpath_init(ctrl, id);
+       ret = nvme_mpath_init_identify(ctrl, id);
        if (ret < 0)
                goto out_free;
 
@@ -4305,6 +4364,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
                min(default_ps_max_latency_us, (unsigned long)S32_MAX));
 
        nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
+       nvme_mpath_init_ctrl(ctrl);
 
        return 0;
 out_free_name: