nvme: fix handling of metadata_len for NVME_IOCTL_IO_CMD
[linux-2.6-microblaze.git] / drivers / nvme / host / core.c
index effb130..bf65501 100644 (file)
@@ -100,6 +100,22 @@ static struct class *nvme_subsys_class;
 static void nvme_ns_remove(struct nvme_ns *ns);
 static int nvme_revalidate_disk(struct gendisk *disk);
 static void nvme_put_subsystem(struct nvme_subsystem *subsys);
+static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
+                                          unsigned nsid);
+
+static void nvme_set_queue_dying(struct nvme_ns *ns)
+{
+       /*
+        * Revalidating a dead namespace sets capacity to 0. This will end
+        * buffered writers dirtying pages that can't be synced.
+        */
+       if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
+               return;
+       revalidate_disk(ns->disk);
+       blk_set_queue_dying(ns->queue);
+       /* Forcibly unquiesce queues to avoid blocking dispatch */
+       blk_mq_unquiesce_queue(ns->queue);
+}
 
 static void nvme_queue_scan(struct nvme_ctrl *ctrl)
 {
@@ -1044,14 +1060,17 @@ EXPORT_SYMBOL_GPL(nvme_set_queue_count);
 
 static void nvme_enable_aen(struct nvme_ctrl *ctrl)
 {
-       u32 result;
+       u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
        int status;
 
-       status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT,
-                       ctrl->oaes & NVME_AEN_SUPPORTED, NULL, 0, &result);
+       if (!supported_aens)
+               return;
+
+       status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens,
+                       NULL, 0, &result);
        if (status)
                dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
-                        ctrl->oaes & NVME_AEN_SUPPORTED);
+                        supported_aens);
 }
 
 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
@@ -1151,19 +1170,15 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 
 static void nvme_update_formats(struct nvme_ctrl *ctrl)
 {
-       struct nvme_ns *ns, *next;
-       LIST_HEAD(rm_list);
+       struct nvme_ns *ns;
 
-       down_write(&ctrl->namespaces_rwsem);
-       list_for_each_entry(ns, &ctrl->namespaces, list) {
-               if (ns->disk && nvme_revalidate_disk(ns->disk)) {
-                       list_move_tail(&ns->list, &rm_list);
-               }
-       }
-       up_write(&ctrl->namespaces_rwsem);
+       down_read(&ctrl->namespaces_rwsem);
+       list_for_each_entry(ns, &ctrl->namespaces, list)
+               if (ns->disk && nvme_revalidate_disk(ns->disk))
+                       nvme_set_queue_dying(ns);
+       up_read(&ctrl->namespaces_rwsem);
 
-       list_for_each_entry_safe(ns, next, &rm_list, list)
-               nvme_ns_remove(ns);
+       nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
 }
 
 static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
@@ -1218,7 +1233,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
        effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
        status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
                        (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
-                       (void __user *)(uintptr_t)cmd.metadata, cmd.metadata,
+                       (void __user *)(uintptr_t)cmd.metadata, cmd.metadata_len,
                        0, &cmd.result, timeout);
        nvme_passthru_end(ctrl, effects);
 
@@ -1808,6 +1823,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
                u32 max_segments =
                        (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
 
+               max_segments = min_not_zero(max_segments, ctrl->max_segments);
                blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
                blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
        }
@@ -2208,7 +2224,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
                 * Verify that the subsystem actually supports multiple
                 * controllers, else bail out.
                 */
-               if (!ctrl->opts->discovery_nqn &&
+               if (!(ctrl->opts && ctrl->opts->discovery_nqn) &&
                    nvme_active_ctrls(found) && !(id->cmic & (1 << 1))) {
                        dev_err(ctrl->device,
                                "ignoring ctrl due to duplicate subnqn (%s).\n",
@@ -3137,7 +3153,7 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
 
        down_write(&ctrl->namespaces_rwsem);
        list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
-               if (ns->head->ns_id > nsid)
+               if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags))
                        list_move_tail(&ns->list, &rm_list);
        }
        up_write(&ctrl->namespaces_rwsem);
@@ -3197,40 +3213,28 @@ static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
        nvme_remove_invalid_namespaces(ctrl, nn);
 }
 
-static bool nvme_scan_changed_ns_log(struct nvme_ctrl *ctrl)
+static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl)
 {
        size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32);
        __le32 *log;
-       int error, i;
-       bool ret = false;
+       int error;
 
        log = kzalloc(log_size, GFP_KERNEL);
        if (!log)
-               return false;
+               return;
 
+       /*
+        * We need to read the log to clear the AEN, but we don't want to rely
+        * on it for the changed namespace information as userspace could have
+        * raced with us in reading the log page, which could cause us to miss
+        * updates.
+        */
        error = nvme_get_log(ctrl, NVME_LOG_CHANGED_NS, log, log_size);
-       if (error) {
+       if (error)
                dev_warn(ctrl->device,
                        "reading changed ns log failed: %d\n", error);
-               goto out_free_log;
-       }
-
-       if (log[0] == cpu_to_le32(0xffffffff))
-               goto out_free_log;
-
-       for (i = 0; i < NVME_MAX_CHANGED_NAMESPACES; i++) {
-               u32 nsid = le32_to_cpu(log[i]);
-
-               if (nsid == 0)
-                       break;
-               dev_info(ctrl->device, "rescanning namespace %d.\n", nsid);
-               nvme_validate_ns(ctrl, nsid);
-       }
-       ret = true;
 
-out_free_log:
        kfree(log);
-       return ret;
 }
 
 static void nvme_scan_work(struct work_struct *work)
@@ -3246,9 +3250,8 @@ static void nvme_scan_work(struct work_struct *work)
        WARN_ON_ONCE(!ctrl->tagset);
 
        if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
-               if (nvme_scan_changed_ns_log(ctrl))
-                       goto out_sort_namespaces;
                dev_info(ctrl->device, "rescanning namespaces.\n");
+               nvme_clear_changed_ns_log(ctrl);
        }
 
        if (nvme_identify_ctrl(ctrl, &id))
@@ -3263,7 +3266,6 @@ static void nvme_scan_work(struct work_struct *work)
        nvme_scan_ns_sequential(ctrl, nn);
 out_free_id:
        kfree(id);
-out_sort_namespaces:
        down_write(&ctrl->namespaces_rwsem);
        list_sort(NULL, &ctrl->namespaces, ns_cmp);
        up_write(&ctrl->namespaces_rwsem);
@@ -3555,19 +3557,9 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
        if (ctrl->admin_q)
                blk_mq_unquiesce_queue(ctrl->admin_q);
 
-       list_for_each_entry(ns, &ctrl->namespaces, list) {
-               /*
-                * Revalidating a dead namespace sets capacity to 0. This will
-                * end buffered writers dirtying pages that can't be synced.
-                */
-               if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
-                       continue;
-               revalidate_disk(ns->disk);
-               blk_set_queue_dying(ns->queue);
+       list_for_each_entry(ns, &ctrl->namespaces, list)
+               nvme_set_queue_dying(ns);
 
-               /* Forcibly unquiesce queues to avoid blocking dispatch */
-               blk_mq_unquiesce_queue(ns->queue);
-       }
        up_read(&ctrl->namespaces_rwsem);
 }
 EXPORT_SYMBOL_GPL(nvme_kill_queues);
@@ -3641,16 +3633,6 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
 }
 EXPORT_SYMBOL_GPL(nvme_start_queues);
 
-int nvme_reinit_tagset(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set)
-{
-       if (!ctrl->ops->reinit_request)
-               return 0;
-
-       return blk_mq_tagset_iter(set, set->driver_data,
-                       ctrl->ops->reinit_request);
-}
-EXPORT_SYMBOL_GPL(nvme_reinit_tagset);
-
 int __init nvme_core_init(void)
 {
        int result = -ENOMEM;