1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2017-2018 Christoph Hellwig.
6 #include <linux/backing-dev.h>
7 #include <linux/moduleparam.h>
8 #include <trace/events/block.h>
11 static bool multipath = true;
12 module_param(multipath, bool, 0444);
13 MODULE_PARM_DESC(multipath,
14 "turn on native support for multiple controllers per subsystem");
16 void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
18 struct nvme_ns_head *h;
20 lockdep_assert_held(&subsys->lock);
21 list_for_each_entry(h, &subsys->nsheads, entry)
23 blk_mq_unfreeze_queue(h->disk->queue);
26 void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
28 struct nvme_ns_head *h;
30 lockdep_assert_held(&subsys->lock);
31 list_for_each_entry(h, &subsys->nsheads, entry)
33 blk_mq_freeze_queue_wait(h->disk->queue);
36 void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
38 struct nvme_ns_head *h;
40 lockdep_assert_held(&subsys->lock);
41 list_for_each_entry(h, &subsys->nsheads, entry)
43 blk_freeze_queue_start(h->disk->queue);
47 * If multipathing is enabled we need to always use the subsystem instance
48 * number for numbering our devices to avoid conflicts between subsystems that
49 * have multiple controllers and thus use the multipath-aware subsystem node
50 * and those that have a single controller and use the controller node
53 void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
54 struct nvme_ctrl *ctrl, int *flags)
57 sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
58 } else if (ns->head->disk) {
59 sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
60 ctrl->instance, ns->head->instance);
61 *flags = GENHD_FL_HIDDEN;
63 sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
68 bool nvme_failover_req(struct request *req)
70 struct nvme_ns *ns = req->q->queuedata;
71 u16 status = nvme_req(req)->status;
74 switch (status & 0x7ff) {
75 case NVME_SC_ANA_TRANSITION:
76 case NVME_SC_ANA_INACCESSIBLE:
77 case NVME_SC_ANA_PERSISTENT_LOSS:
79 * If we got back an ANA error we know the controller is alive,
80 * but not ready to serve this namespaces. The spec suggests
81 * we should update our general state here, but due to the fact
82 * that the admin and I/O queues are not serialized that is
83 * fundamentally racy. So instead just clear the current path,
84 * mark the the path as pending and kick of a re-read of the ANA
87 nvme_mpath_clear_current_path(ns);
88 if (ns->ctrl->ana_log_buf) {
89 set_bit(NVME_NS_ANA_PENDING, &ns->flags);
90 queue_work(nvme_wq, &ns->ctrl->ana_work);
93 case NVME_SC_HOST_PATH_ERROR:
94 case NVME_SC_HOST_ABORTED_CMD:
96 * Temporary transport disruption in talking to the controller.
97 * Try to send on a new path.
99 nvme_mpath_clear_current_path(ns);
102 /* This was a non-ANA error so follow the normal error path. */
106 spin_lock_irqsave(&ns->head->requeue_lock, flags);
107 blk_steal_bios(&ns->head->requeue_list, req);
108 spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
109 blk_mq_end_request(req, 0);
111 kblockd_schedule_work(&ns->head->requeue_work);
115 void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
119 down_read(&ctrl->namespaces_rwsem);
120 list_for_each_entry(ns, &ctrl->namespaces, list) {
122 kblockd_schedule_work(&ns->head->requeue_work);
124 up_read(&ctrl->namespaces_rwsem);
127 static const char *nvme_ana_state_names[] = {
128 [0] = "invalid state",
129 [NVME_ANA_OPTIMIZED] = "optimized",
130 [NVME_ANA_NONOPTIMIZED] = "non-optimized",
131 [NVME_ANA_INACCESSIBLE] = "inaccessible",
132 [NVME_ANA_PERSISTENT_LOSS] = "persistent-loss",
133 [NVME_ANA_CHANGE] = "change",
136 bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
138 struct nvme_ns_head *head = ns->head;
139 bool changed = false;
145 for_each_node(node) {
146 if (ns == rcu_access_pointer(head->current_path[node])) {
147 rcu_assign_pointer(head->current_path[node], NULL);
155 void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
159 mutex_lock(&ctrl->scan_lock);
160 down_read(&ctrl->namespaces_rwsem);
161 list_for_each_entry(ns, &ctrl->namespaces, list)
162 if (nvme_mpath_clear_current_path(ns))
163 kblockd_schedule_work(&ns->head->requeue_work);
164 up_read(&ctrl->namespaces_rwsem);
165 mutex_unlock(&ctrl->scan_lock);
168 static bool nvme_path_is_disabled(struct nvme_ns *ns)
170 return ns->ctrl->state != NVME_CTRL_LIVE ||
171 test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
172 test_bit(NVME_NS_REMOVING, &ns->flags);
175 static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
177 int found_distance = INT_MAX, fallback_distance = INT_MAX, distance;
178 struct nvme_ns *found = NULL, *fallback = NULL, *ns;
180 list_for_each_entry_rcu(ns, &head->list, siblings) {
181 if (nvme_path_is_disabled(ns))
184 if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
185 distance = node_distance(node, ns->ctrl->numa_node);
187 distance = LOCAL_DISTANCE;
189 switch (ns->ana_state) {
190 case NVME_ANA_OPTIMIZED:
191 if (distance < found_distance) {
192 found_distance = distance;
196 case NVME_ANA_NONOPTIMIZED:
197 if (distance < fallback_distance) {
198 fallback_distance = distance;
210 rcu_assign_pointer(head->current_path[node], found);
214 static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head,
217 ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns,
221 return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings);
224 static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
225 int node, struct nvme_ns *old)
227 struct nvme_ns *ns, *found, *fallback = NULL;
229 if (list_is_singular(&head->list)) {
230 if (nvme_path_is_disabled(old))
235 for (ns = nvme_next_ns(head, old);
237 ns = nvme_next_ns(head, ns)) {
238 if (nvme_path_is_disabled(ns))
241 if (ns->ana_state == NVME_ANA_OPTIMIZED) {
245 if (ns->ana_state == NVME_ANA_NONOPTIMIZED)
253 rcu_assign_pointer(head->current_path[node], found);
257 static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
259 return ns->ctrl->state == NVME_CTRL_LIVE &&
260 ns->ana_state == NVME_ANA_OPTIMIZED;
263 inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
265 int node = numa_node_id();
268 ns = srcu_dereference(head->current_path[node], &head->srcu);
269 if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR && ns)
270 ns = nvme_round_robin_path(head, node, ns);
271 if (unlikely(!ns || !nvme_path_is_optimized(ns)))
272 ns = __nvme_find_path(head, node);
276 static bool nvme_available_path(struct nvme_ns_head *head)
280 list_for_each_entry_rcu(ns, &head->list, siblings) {
281 switch (ns->ctrl->state) {
283 case NVME_CTRL_RESETTING:
284 case NVME_CTRL_CONNECTING:
294 blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
296 struct nvme_ns_head *head = bio->bi_disk->private_data;
297 struct device *dev = disk_to_dev(head->disk);
299 blk_qc_t ret = BLK_QC_T_NONE;
303 * The namespace might be going away and the bio might be moved to a
304 * different queue via blk_steal_bios(), so we need to use the bio_split
305 * pool from the original queue to allocate the bvecs from.
307 blk_queue_split(&bio);
309 srcu_idx = srcu_read_lock(&head->srcu);
310 ns = nvme_find_path(head);
312 bio->bi_disk = ns->disk;
313 bio->bi_opf |= REQ_NVME_MPATH;
314 trace_block_bio_remap(bio->bi_disk->queue, bio,
315 disk_devt(ns->head->disk),
316 bio->bi_iter.bi_sector);
317 ret = submit_bio_noacct(bio);
318 } else if (nvme_available_path(head)) {
319 dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
321 spin_lock_irq(&head->requeue_lock);
322 bio_list_add(&head->requeue_list, bio);
323 spin_unlock_irq(&head->requeue_lock);
325 dev_warn_ratelimited(dev, "no available path - failing I/O\n");
327 bio->bi_status = BLK_STS_IOERR;
331 srcu_read_unlock(&head->srcu, srcu_idx);
335 static void nvme_requeue_work(struct work_struct *work)
337 struct nvme_ns_head *head =
338 container_of(work, struct nvme_ns_head, requeue_work);
339 struct bio *bio, *next;
341 spin_lock_irq(&head->requeue_lock);
342 next = bio_list_get(&head->requeue_list);
343 spin_unlock_irq(&head->requeue_lock);
345 while ((bio = next) != NULL) {
350 * Reset disk to the mpath node and resubmit to select a new
353 bio->bi_disk = head->disk;
354 submit_bio_noacct(bio);
358 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
360 struct request_queue *q;
363 mutex_init(&head->lock);
364 bio_list_init(&head->requeue_list);
365 spin_lock_init(&head->requeue_lock);
366 INIT_WORK(&head->requeue_work, nvme_requeue_work);
369 * Add a multipath node if the subsystems supports multiple controllers.
370 * We also do this for private namespaces as the namespace sharing data could
371 * change after a rescan.
373 if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || !multipath)
376 q = blk_alloc_queue(ctrl->numa_node);
379 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
380 /* set to a default value for 512 until disk is validated */
381 blk_queue_logical_block_size(q, 512);
382 blk_set_stacking_limits(&q->limits);
384 /* we need to propagate up the VMC settings */
385 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
387 blk_queue_write_cache(q, vwc, vwc);
389 head->disk = alloc_disk(0);
391 goto out_cleanup_queue;
392 head->disk->fops = &nvme_ns_head_ops;
393 head->disk->private_data = head;
394 head->disk->queue = q;
395 head->disk->flags = GENHD_FL_EXT_DEVT;
396 sprintf(head->disk->disk_name, "nvme%dn%d",
397 ctrl->subsys->instance, head->instance);
401 blk_cleanup_queue(q);
406 static void nvme_mpath_set_live(struct nvme_ns *ns)
408 struct nvme_ns_head *head = ns->head;
413 if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags))
414 device_add_disk(&head->subsys->dev, head->disk,
415 nvme_ns_id_attr_groups);
417 mutex_lock(&head->lock);
418 if (nvme_path_is_optimized(ns)) {
421 srcu_idx = srcu_read_lock(&head->srcu);
423 __nvme_find_path(head, node);
424 srcu_read_unlock(&head->srcu, srcu_idx);
426 mutex_unlock(&head->lock);
428 synchronize_srcu(&head->srcu);
429 kblockd_schedule_work(&head->requeue_work);
432 static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
433 int (*cb)(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *,
436 void *base = ctrl->ana_log_buf;
437 size_t offset = sizeof(struct nvme_ana_rsp_hdr);
440 lockdep_assert_held(&ctrl->ana_lock);
442 for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) {
443 struct nvme_ana_group_desc *desc = base + offset;
445 size_t nsid_buf_size;
447 if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc)))
450 nr_nsids = le32_to_cpu(desc->nnsids);
451 nsid_buf_size = nr_nsids * sizeof(__le32);
453 if (WARN_ON_ONCE(desc->grpid == 0))
455 if (WARN_ON_ONCE(le32_to_cpu(desc->grpid) > ctrl->anagrpmax))
457 if (WARN_ON_ONCE(desc->state == 0))
459 if (WARN_ON_ONCE(desc->state > NVME_ANA_CHANGE))
462 offset += sizeof(*desc);
463 if (WARN_ON_ONCE(offset > ctrl->ana_log_size - nsid_buf_size))
466 error = cb(ctrl, desc, data);
470 offset += nsid_buf_size;
476 static inline bool nvme_state_is_live(enum nvme_ana_state state)
478 return state == NVME_ANA_OPTIMIZED || state == NVME_ANA_NONOPTIMIZED;
481 static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
484 ns->ana_grpid = le32_to_cpu(desc->grpid);
485 ns->ana_state = desc->state;
486 clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
488 if (nvme_state_is_live(ns->ana_state))
489 nvme_mpath_set_live(ns);
492 static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
493 struct nvme_ana_group_desc *desc, void *data)
495 u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0;
496 unsigned *nr_change_groups = data;
499 dev_dbg(ctrl->device, "ANA group %d: %s.\n",
500 le32_to_cpu(desc->grpid),
501 nvme_ana_state_names[desc->state]);
503 if (desc->state == NVME_ANA_CHANGE)
504 (*nr_change_groups)++;
509 down_read(&ctrl->namespaces_rwsem);
510 list_for_each_entry(ns, &ctrl->namespaces, list) {
511 unsigned nsid = le32_to_cpu(desc->nsids[n]);
513 if (ns->head->ns_id < nsid)
515 if (ns->head->ns_id == nsid)
516 nvme_update_ns_ana_state(desc, ns);
520 up_read(&ctrl->namespaces_rwsem);
524 static int nvme_read_ana_log(struct nvme_ctrl *ctrl)
526 u32 nr_change_groups = 0;
529 mutex_lock(&ctrl->ana_lock);
530 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0,
531 ctrl->ana_log_buf, ctrl->ana_log_size, 0);
533 dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error);
537 error = nvme_parse_ana_log(ctrl, &nr_change_groups,
538 nvme_update_ana_state);
543 * In theory we should have an ANATT timer per group as they might enter
544 * the change state at different times. But that is a lot of overhead
545 * just to protect against a target that keeps entering new changes
546 * states while never finishing previous ones. But we'll still
547 * eventually time out once all groups are in change state, so this
550 * We also double the ANATT value to provide some slack for transports
551 * or AEN processing overhead.
553 if (nr_change_groups)
554 mod_timer(&ctrl->anatt_timer, ctrl->anatt * HZ * 2 + jiffies);
556 del_timer_sync(&ctrl->anatt_timer);
558 mutex_unlock(&ctrl->ana_lock);
562 static void nvme_ana_work(struct work_struct *work)
564 struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
566 nvme_read_ana_log(ctrl);
569 static void nvme_anatt_timeout(struct timer_list *t)
571 struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
573 dev_info(ctrl->device, "ANATT timeout, resetting controller.\n");
574 nvme_reset_ctrl(ctrl);
577 void nvme_mpath_stop(struct nvme_ctrl *ctrl)
579 if (!nvme_ctrl_use_ana(ctrl))
581 del_timer_sync(&ctrl->anatt_timer);
582 cancel_work_sync(&ctrl->ana_work);
585 #define SUBSYS_ATTR_RW(_name, _mode, _show, _store) \
586 struct device_attribute subsys_attr_##_name = \
587 __ATTR(_name, _mode, _show, _store)
589 static const char *nvme_iopolicy_names[] = {
590 [NVME_IOPOLICY_NUMA] = "numa",
591 [NVME_IOPOLICY_RR] = "round-robin",
594 static ssize_t nvme_subsys_iopolicy_show(struct device *dev,
595 struct device_attribute *attr, char *buf)
597 struct nvme_subsystem *subsys =
598 container_of(dev, struct nvme_subsystem, dev);
600 return sprintf(buf, "%s\n",
601 nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]);
604 static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
605 struct device_attribute *attr, const char *buf, size_t count)
607 struct nvme_subsystem *subsys =
608 container_of(dev, struct nvme_subsystem, dev);
611 for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) {
612 if (sysfs_streq(buf, nvme_iopolicy_names[i])) {
613 WRITE_ONCE(subsys->iopolicy, i);
620 SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR,
621 nvme_subsys_iopolicy_show, nvme_subsys_iopolicy_store);
623 static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr,
626 return sprintf(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid);
628 DEVICE_ATTR_RO(ana_grpid);
630 static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
633 struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
635 return sprintf(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
637 DEVICE_ATTR_RO(ana_state);
639 static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
640 struct nvme_ana_group_desc *desc, void *data)
642 struct nvme_ana_group_desc *dst = data;
644 if (desc->grpid != dst->grpid)
648 return -ENXIO; /* just break out of the loop */
651 void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
653 if (nvme_ctrl_use_ana(ns->ctrl)) {
654 struct nvme_ana_group_desc desc = {
655 .grpid = id->anagrpid,
659 mutex_lock(&ns->ctrl->ana_lock);
660 ns->ana_grpid = le32_to_cpu(id->anagrpid);
661 nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc);
662 mutex_unlock(&ns->ctrl->ana_lock);
664 /* found the group desc: update */
665 nvme_update_ns_ana_state(&desc, ns);
668 ns->ana_state = NVME_ANA_OPTIMIZED;
669 nvme_mpath_set_live(ns);
672 if (bdi_cap_stable_pages_required(ns->queue->backing_dev_info)) {
673 struct gendisk *disk = ns->head->disk;
676 disk->queue->backing_dev_info->capabilities |=
677 BDI_CAP_STABLE_WRITES;
681 void nvme_mpath_remove_disk(struct nvme_ns_head *head)
685 if (head->disk->flags & GENHD_FL_UP)
686 del_gendisk(head->disk);
687 blk_set_queue_dying(head->disk->queue);
688 /* make sure all pending bios are cleaned up */
689 kblockd_schedule_work(&head->requeue_work);
690 flush_work(&head->requeue_work);
691 blk_cleanup_queue(head->disk->queue);
692 if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
694 * if device_add_disk wasn't called, prevent
695 * disk release to put a bogus reference on the
698 head->disk->queue = NULL;
700 put_disk(head->disk);
703 int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
707 /* check if multipath is enabled and we have the capability */
708 if (!multipath || !ctrl->subsys ||
709 !(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA))
712 ctrl->anacap = id->anacap;
713 ctrl->anatt = id->anatt;
714 ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
715 ctrl->anagrpmax = le32_to_cpu(id->anagrpmax);
717 mutex_init(&ctrl->ana_lock);
718 timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
719 ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
720 ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc);
721 ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
723 if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) {
724 dev_err(ctrl->device,
725 "ANA log page size (%zd) larger than MDTS (%d).\n",
727 ctrl->max_hw_sectors << SECTOR_SHIFT);
728 dev_err(ctrl->device, "disabling ANA support.\n");
732 INIT_WORK(&ctrl->ana_work, nvme_ana_work);
733 kfree(ctrl->ana_log_buf);
734 ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
735 if (!ctrl->ana_log_buf) {
740 error = nvme_read_ana_log(ctrl);
742 goto out_free_ana_log_buf;
744 out_free_ana_log_buf:
745 kfree(ctrl->ana_log_buf);
746 ctrl->ana_log_buf = NULL;
751 void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
753 kfree(ctrl->ana_log_buf);
754 ctrl->ana_log_buf = NULL;