1 // SPDX-License-Identifier: GPL-2.0
3 * NVM Express device driver
4 * Copyright (c) 2011-2014, Intel Corporation.
7 #include <linux/blkdev.h>
8 #include <linux/blk-mq.h>
9 #include <linux/compat.h>
10 #include <linux/delay.h>
11 #include <linux/errno.h>
12 #include <linux/hdreg.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/backing-dev.h>
16 #include <linux/list_sort.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
20 #include <linux/ptrace.h>
21 #include <linux/nvme_ioctl.h>
22 #include <linux/t10-pi.h>
23 #include <linux/pm_qos.h>
24 #include <asm/unaligned.h>
29 #define CREATE_TRACE_POINTS
32 #define NVME_MINORS (1U << MINORBITS)
34 unsigned int admin_timeout = 60;
35 module_param(admin_timeout, uint, 0644);
36 MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
37 EXPORT_SYMBOL_GPL(admin_timeout);
39 unsigned int nvme_io_timeout = 30;
40 module_param_named(io_timeout, nvme_io_timeout, uint, 0644);
41 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
42 EXPORT_SYMBOL_GPL(nvme_io_timeout);
44 static unsigned char shutdown_timeout = 5;
45 module_param(shutdown_timeout, byte, 0644);
46 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
48 static u8 nvme_max_retries = 5;
49 module_param_named(max_retries, nvme_max_retries, byte, 0644);
50 MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
52 static unsigned long default_ps_max_latency_us = 100000;
53 module_param(default_ps_max_latency_us, ulong, 0644);
54 MODULE_PARM_DESC(default_ps_max_latency_us,
55 "max power saving latency for new devices; use PM QOS to change per device");
57 static bool force_apst;
58 module_param(force_apst, bool, 0644);
59 MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");
62 module_param(streams, bool, 0644);
63 MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
66 * nvme_wq - hosts nvme related works that are not reset or delete
67 * nvme_reset_wq - hosts nvme reset works
68 * nvme_delete_wq - hosts nvme delete works
70 * nvme_wq will host works such as scan, aen handling, fw activation,
71 * keep-alive, periodic reconnects etc. nvme_reset_wq
72 * runs reset works which also flush works hosted on nvme_wq for
73 * serialization purposes. nvme_delete_wq host controller deletion
74 * works which flush reset works for serialization.
76 struct workqueue_struct *nvme_wq;
77 EXPORT_SYMBOL_GPL(nvme_wq);
79 struct workqueue_struct *nvme_reset_wq;
80 EXPORT_SYMBOL_GPL(nvme_reset_wq);
82 struct workqueue_struct *nvme_delete_wq;
83 EXPORT_SYMBOL_GPL(nvme_delete_wq);
85 static LIST_HEAD(nvme_subsystems);
86 static DEFINE_MUTEX(nvme_subsystems_lock);
88 static DEFINE_IDA(nvme_instance_ida);
89 static dev_t nvme_chr_devt;
90 static struct class *nvme_class;
91 static struct class *nvme_subsys_class;
93 static int nvme_revalidate_disk(struct gendisk *disk);
94 static void nvme_put_subsystem(struct nvme_subsystem *subsys);
95 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
98 static void nvme_set_queue_dying(struct nvme_ns *ns)
101 * Revalidating a dead namespace sets capacity to 0. This will end
102 * buffered writers dirtying pages that can't be synced.
104 if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
106 blk_set_queue_dying(ns->queue);
107 /* Forcibly unquiesce queues to avoid blocking dispatch */
108 blk_mq_unquiesce_queue(ns->queue);
110 * Revalidate after unblocking dispatchers that may be holding bd_butex
112 revalidate_disk(ns->disk);
115 static void nvme_queue_scan(struct nvme_ctrl *ctrl)
118 * Only new queue scan work when admin and IO queues are both alive
120 if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset)
121 queue_work(nvme_wq, &ctrl->scan_work);
125 * Use this function to proceed with scheduling reset_work for a controller
126 * that had previously been set to the resetting state. This is intended for
127 * code paths that can't be interrupted by other reset attempts. A hot removal
128 * may prevent this from succeeding.
130 int nvme_try_sched_reset(struct nvme_ctrl *ctrl)
132 if (ctrl->state != NVME_CTRL_RESETTING)
134 if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
138 EXPORT_SYMBOL_GPL(nvme_try_sched_reset);
140 int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
142 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
144 if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
148 EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
150 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
154 ret = nvme_reset_ctrl(ctrl);
156 flush_work(&ctrl->reset_work);
157 if (ctrl->state != NVME_CTRL_LIVE)
163 EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
165 static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
167 dev_info(ctrl->device,
168 "Removing ctrl: NQN \"%s\"\n", ctrl->opts->subsysnqn);
170 flush_work(&ctrl->reset_work);
171 nvme_stop_ctrl(ctrl);
172 nvme_remove_namespaces(ctrl);
173 ctrl->ops->delete_ctrl(ctrl);
174 nvme_uninit_ctrl(ctrl);
177 static void nvme_delete_ctrl_work(struct work_struct *work)
179 struct nvme_ctrl *ctrl =
180 container_of(work, struct nvme_ctrl, delete_work);
182 nvme_do_delete_ctrl(ctrl);
185 int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
187 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
189 if (!queue_work(nvme_delete_wq, &ctrl->delete_work))
193 EXPORT_SYMBOL_GPL(nvme_delete_ctrl);
195 static void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
198 * Keep a reference until nvme_do_delete_ctrl() complete,
199 * since ->delete_ctrl can free the controller.
202 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
203 nvme_do_delete_ctrl(ctrl);
207 static inline bool nvme_ns_has_pi(struct nvme_ns *ns)
209 return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple);
212 static blk_status_t nvme_error_status(u16 status)
214 switch (status & 0x7ff) {
215 case NVME_SC_SUCCESS:
217 case NVME_SC_CAP_EXCEEDED:
218 return BLK_STS_NOSPC;
219 case NVME_SC_LBA_RANGE:
220 case NVME_SC_CMD_INTERRUPTED:
221 case NVME_SC_NS_NOT_READY:
222 return BLK_STS_TARGET;
223 case NVME_SC_BAD_ATTRIBUTES:
224 case NVME_SC_ONCS_NOT_SUPPORTED:
225 case NVME_SC_INVALID_OPCODE:
226 case NVME_SC_INVALID_FIELD:
227 case NVME_SC_INVALID_NS:
228 return BLK_STS_NOTSUPP;
229 case NVME_SC_WRITE_FAULT:
230 case NVME_SC_READ_ERROR:
231 case NVME_SC_UNWRITTEN_BLOCK:
232 case NVME_SC_ACCESS_DENIED:
233 case NVME_SC_READ_ONLY:
234 case NVME_SC_COMPARE_FAILED:
235 return BLK_STS_MEDIUM;
236 case NVME_SC_GUARD_CHECK:
237 case NVME_SC_APPTAG_CHECK:
238 case NVME_SC_REFTAG_CHECK:
239 case NVME_SC_INVALID_PI:
240 return BLK_STS_PROTECTION;
241 case NVME_SC_RESERVATION_CONFLICT:
242 return BLK_STS_NEXUS;
243 case NVME_SC_HOST_PATH_ERROR:
244 return BLK_STS_TRANSPORT;
246 return BLK_STS_IOERR;
250 static inline bool nvme_req_needs_retry(struct request *req)
252 if (blk_noretry_request(req))
254 if (nvme_req(req)->status & NVME_SC_DNR)
256 if (nvme_req(req)->retries >= nvme_max_retries)
261 static void nvme_retry_req(struct request *req)
263 struct nvme_ns *ns = req->q->queuedata;
264 unsigned long delay = 0;
267 /* The mask and shift result must be <= 3 */
268 crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11;
270 delay = ns->ctrl->crdt[crd - 1] * 100;
272 nvme_req(req)->retries++;
273 blk_mq_requeue_request(req, false);
274 blk_mq_delay_kick_requeue_list(req->q, delay);
277 void nvme_complete_rq(struct request *req)
279 blk_status_t status = nvme_error_status(nvme_req(req)->status);
281 trace_nvme_complete_rq(req);
283 nvme_cleanup_cmd(req);
285 if (nvme_req(req)->ctrl->kas)
286 nvme_req(req)->ctrl->comp_seen = true;
288 if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) {
289 if ((req->cmd_flags & REQ_NVME_MPATH) && nvme_failover_req(req))
292 if (!blk_queue_dying(req->q)) {
298 nvme_trace_bio_complete(req, status);
299 blk_mq_end_request(req, status);
301 EXPORT_SYMBOL_GPL(nvme_complete_rq);
303 bool nvme_cancel_request(struct request *req, void *data, bool reserved)
305 dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
306 "Cancelling I/O %d", req->tag);
308 /* don't abort one completed request */
309 if (blk_mq_request_completed(req))
312 nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
313 blk_mq_complete_request(req);
316 EXPORT_SYMBOL_GPL(nvme_cancel_request);
318 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
319 enum nvme_ctrl_state new_state)
321 enum nvme_ctrl_state old_state;
323 bool changed = false;
325 spin_lock_irqsave(&ctrl->lock, flags);
327 old_state = ctrl->state;
332 case NVME_CTRL_RESETTING:
333 case NVME_CTRL_CONNECTING:
340 case NVME_CTRL_RESETTING:
350 case NVME_CTRL_CONNECTING:
353 case NVME_CTRL_RESETTING:
360 case NVME_CTRL_DELETING:
363 case NVME_CTRL_RESETTING:
364 case NVME_CTRL_CONNECTING:
373 case NVME_CTRL_DELETING:
385 ctrl->state = new_state;
386 wake_up_all(&ctrl->state_wq);
389 spin_unlock_irqrestore(&ctrl->lock, flags);
390 if (changed && ctrl->state == NVME_CTRL_LIVE)
391 nvme_kick_requeue_lists(ctrl);
394 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
397 * Returns true for sink states that can't ever transition back to live.
399 static bool nvme_state_terminal(struct nvme_ctrl *ctrl)
401 switch (ctrl->state) {
404 case NVME_CTRL_RESETTING:
405 case NVME_CTRL_CONNECTING:
407 case NVME_CTRL_DELETING:
411 WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
417 * Waits for the controller state to be resetting, or returns false if it is
418 * not possible to ever transition to that state.
420 bool nvme_wait_reset(struct nvme_ctrl *ctrl)
422 wait_event(ctrl->state_wq,
423 nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) ||
424 nvme_state_terminal(ctrl));
425 return ctrl->state == NVME_CTRL_RESETTING;
427 EXPORT_SYMBOL_GPL(nvme_wait_reset);
429 static void nvme_free_ns_head(struct kref *ref)
431 struct nvme_ns_head *head =
432 container_of(ref, struct nvme_ns_head, ref);
434 nvme_mpath_remove_disk(head);
435 ida_simple_remove(&head->subsys->ns_ida, head->instance);
436 list_del_init(&head->entry);
437 cleanup_srcu_struct(&head->srcu);
438 nvme_put_subsystem(head->subsys);
442 static void nvme_put_ns_head(struct nvme_ns_head *head)
444 kref_put(&head->ref, nvme_free_ns_head);
447 static void nvme_free_ns(struct kref *kref)
449 struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
452 nvme_nvm_unregister(ns);
455 nvme_put_ns_head(ns->head);
456 nvme_put_ctrl(ns->ctrl);
460 static void nvme_put_ns(struct nvme_ns *ns)
462 kref_put(&ns->kref, nvme_free_ns);
465 static inline void nvme_clear_nvme_request(struct request *req)
467 if (!(req->rq_flags & RQF_DONTPREP)) {
468 nvme_req(req)->retries = 0;
469 nvme_req(req)->flags = 0;
470 req->rq_flags |= RQF_DONTPREP;
474 struct request *nvme_alloc_request(struct request_queue *q,
475 struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
477 unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
480 if (qid == NVME_QID_ANY) {
481 req = blk_mq_alloc_request(q, op, flags);
483 req = blk_mq_alloc_request_hctx(q, op, flags,
489 req->cmd_flags |= REQ_FAILFAST_DRIVER;
490 nvme_clear_nvme_request(req);
491 nvme_req(req)->cmd = cmd;
495 EXPORT_SYMBOL_GPL(nvme_alloc_request);
497 static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
499 struct nvme_command c;
501 memset(&c, 0, sizeof(c));
503 c.directive.opcode = nvme_admin_directive_send;
504 c.directive.nsid = cpu_to_le32(NVME_NSID_ALL);
505 c.directive.doper = NVME_DIR_SND_ID_OP_ENABLE;
506 c.directive.dtype = NVME_DIR_IDENTIFY;
507 c.directive.tdtype = NVME_DIR_STREAMS;
508 c.directive.endir = enable ? NVME_DIR_ENDIR : 0;
510 return nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, 0);
513 static int nvme_disable_streams(struct nvme_ctrl *ctrl)
515 return nvme_toggle_streams(ctrl, false);
518 static int nvme_enable_streams(struct nvme_ctrl *ctrl)
520 return nvme_toggle_streams(ctrl, true);
523 static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
524 struct streams_directive_params *s, u32 nsid)
526 struct nvme_command c;
528 memset(&c, 0, sizeof(c));
529 memset(s, 0, sizeof(*s));
531 c.directive.opcode = nvme_admin_directive_recv;
532 c.directive.nsid = cpu_to_le32(nsid);
533 c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1);
534 c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
535 c.directive.dtype = NVME_DIR_STREAMS;
537 return nvme_submit_sync_cmd(ctrl->admin_q, &c, s, sizeof(*s));
540 static int nvme_configure_directives(struct nvme_ctrl *ctrl)
542 struct streams_directive_params s;
545 if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES))
550 ret = nvme_enable_streams(ctrl);
554 ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL);
558 ctrl->nssa = le16_to_cpu(s.nssa);
559 if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) {
560 dev_info(ctrl->device, "too few streams (%u) available\n",
562 nvme_disable_streams(ctrl);
566 ctrl->nr_streams = min_t(unsigned, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1);
567 dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams);
572 * Check if 'req' has a write hint associated with it. If it does, assign
573 * a valid namespace stream to the write.
575 static void nvme_assign_write_stream(struct nvme_ctrl *ctrl,
576 struct request *req, u16 *control,
579 enum rw_hint streamid = req->write_hint;
581 if (streamid == WRITE_LIFE_NOT_SET || streamid == WRITE_LIFE_NONE)
585 if (WARN_ON_ONCE(streamid > ctrl->nr_streams))
588 *control |= NVME_RW_DTYPE_STREAMS;
589 *dsmgmt |= streamid << 16;
592 if (streamid < ARRAY_SIZE(req->q->write_hints))
593 req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9;
596 static inline void nvme_setup_flush(struct nvme_ns *ns,
597 struct nvme_command *cmnd)
599 cmnd->common.opcode = nvme_cmd_flush;
600 cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
603 static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
604 struct nvme_command *cmnd)
606 unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
607 struct nvme_dsm_range *range;
611 * Some devices do not consider the DSM 'Number of Ranges' field when
612 * determining how much data to DMA. Always allocate memory for maximum
613 * number of segments to prevent device reading beyond end of buffer.
615 static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES;
617 range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN);
620 * If we fail allocation our range, fallback to the controller
621 * discard page. If that's also busy, it's safe to return
622 * busy, as we know we can make progress once that's freed.
624 if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy))
625 return BLK_STS_RESOURCE;
627 range = page_address(ns->ctrl->discard_page);
630 __rq_for_each_bio(bio, req) {
631 u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
632 u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
635 range[n].cattr = cpu_to_le32(0);
636 range[n].nlb = cpu_to_le32(nlb);
637 range[n].slba = cpu_to_le64(slba);
642 if (WARN_ON_ONCE(n != segments)) {
643 if (virt_to_page(range) == ns->ctrl->discard_page)
644 clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
647 return BLK_STS_IOERR;
650 cmnd->dsm.opcode = nvme_cmd_dsm;
651 cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
652 cmnd->dsm.nr = cpu_to_le32(segments - 1);
653 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
655 req->special_vec.bv_page = virt_to_page(range);
656 req->special_vec.bv_offset = offset_in_page(range);
657 req->special_vec.bv_len = alloc_size;
658 req->rq_flags |= RQF_SPECIAL_PAYLOAD;
663 static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
664 struct request *req, struct nvme_command *cmnd)
666 if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
667 return nvme_setup_discard(ns, req, cmnd);
669 cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes;
670 cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id);
671 cmnd->write_zeroes.slba =
672 cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
673 cmnd->write_zeroes.length =
674 cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
675 cmnd->write_zeroes.control = 0;
679 static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
680 struct request *req, struct nvme_command *cmnd)
682 struct nvme_ctrl *ctrl = ns->ctrl;
686 if (req->cmd_flags & REQ_FUA)
687 control |= NVME_RW_FUA;
688 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
689 control |= NVME_RW_LR;
691 if (req->cmd_flags & REQ_RAHEAD)
692 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
694 cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
695 cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
696 cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
697 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
699 if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams)
700 nvme_assign_write_stream(ctrl, req, &control, &dsmgmt);
704 * If formated with metadata, the block layer always provides a
705 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else
706 * we enable the PRACT bit for protection information or set the
707 * namespace capacity to zero to prevent any I/O.
709 if (!blk_integrity_rq(req)) {
710 if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
711 return BLK_STS_NOTSUPP;
712 control |= NVME_RW_PRINFO_PRACT;
715 switch (ns->pi_type) {
716 case NVME_NS_DPS_PI_TYPE3:
717 control |= NVME_RW_PRINFO_PRCHK_GUARD;
719 case NVME_NS_DPS_PI_TYPE1:
720 case NVME_NS_DPS_PI_TYPE2:
721 control |= NVME_RW_PRINFO_PRCHK_GUARD |
722 NVME_RW_PRINFO_PRCHK_REF;
723 cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
728 cmnd->rw.control = cpu_to_le16(control);
729 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
733 void nvme_cleanup_cmd(struct request *req)
735 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
736 struct nvme_ns *ns = req->rq_disk->private_data;
737 struct page *page = req->special_vec.bv_page;
739 if (page == ns->ctrl->discard_page)
740 clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
742 kfree(page_address(page) + req->special_vec.bv_offset);
745 EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
747 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
748 struct nvme_command *cmd)
750 blk_status_t ret = BLK_STS_OK;
752 nvme_clear_nvme_request(req);
754 memset(cmd, 0, sizeof(*cmd));
755 switch (req_op(req)) {
758 memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
761 nvme_setup_flush(ns, cmd);
763 case REQ_OP_WRITE_ZEROES:
764 ret = nvme_setup_write_zeroes(ns, req, cmd);
767 ret = nvme_setup_discard(ns, req, cmd);
771 ret = nvme_setup_rw(ns, req, cmd);
775 return BLK_STS_IOERR;
778 cmd->common.command_id = req->tag;
779 trace_nvme_setup_cmd(req, cmd);
782 EXPORT_SYMBOL_GPL(nvme_setup_cmd);
784 static void nvme_end_sync_rq(struct request *rq, blk_status_t error)
786 struct completion *waiting = rq->end_io_data;
788 rq->end_io_data = NULL;
792 static void nvme_execute_rq_polled(struct request_queue *q,
793 struct gendisk *bd_disk, struct request *rq, int at_head)
795 DECLARE_COMPLETION_ONSTACK(wait);
797 WARN_ON_ONCE(!test_bit(QUEUE_FLAG_POLL, &q->queue_flags));
799 rq->cmd_flags |= REQ_HIPRI;
800 rq->end_io_data = &wait;
801 blk_execute_rq_nowait(q, bd_disk, rq, at_head, nvme_end_sync_rq);
803 while (!completion_done(&wait)) {
804 blk_poll(q, request_to_qc_t(rq->mq_hctx, rq), true);
810 * Returns 0 on success. If the result is negative, it's a Linux error code;
811 * if the result is positive, it's an NVM Express status code
813 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
814 union nvme_result *result, void *buffer, unsigned bufflen,
815 unsigned timeout, int qid, int at_head,
816 blk_mq_req_flags_t flags, bool poll)
821 req = nvme_alloc_request(q, cmd, flags, qid);
825 req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
827 if (buffer && bufflen) {
828 ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
834 nvme_execute_rq_polled(req->q, NULL, req, at_head);
836 blk_execute_rq(req->q, NULL, req, at_head);
838 *result = nvme_req(req)->result;
839 if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
842 ret = nvme_req(req)->status;
844 blk_mq_free_request(req);
847 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
849 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
850 void *buffer, unsigned bufflen)
852 return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
853 NVME_QID_ANY, 0, 0, false);
855 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
857 static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
858 unsigned len, u32 seed, bool write)
860 struct bio_integrity_payload *bip;
864 buf = kmalloc(len, GFP_KERNEL);
869 if (write && copy_from_user(buf, ubuf, len))
872 bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
878 bip->bip_iter.bi_size = len;
879 bip->bip_iter.bi_sector = seed;
880 ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
881 offset_in_page(buf));
891 static int nvme_submit_user_cmd(struct request_queue *q,
892 struct nvme_command *cmd, void __user *ubuffer,
893 unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
894 u32 meta_seed, u64 *result, unsigned timeout)
896 bool write = nvme_is_write(cmd);
897 struct nvme_ns *ns = q->queuedata;
898 struct gendisk *disk = ns ? ns->disk : NULL;
900 struct bio *bio = NULL;
904 req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY);
908 req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
909 nvme_req(req)->flags |= NVME_REQ_USERCMD;
911 if (ubuffer && bufflen) {
912 ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
918 if (disk && meta_buffer && meta_len) {
919 meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
925 req->cmd_flags |= REQ_INTEGRITY;
929 blk_execute_rq(req->q, disk, req, 0);
930 if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
933 ret = nvme_req(req)->status;
935 *result = le64_to_cpu(nvme_req(req)->result.u64);
936 if (meta && !ret && !write) {
937 if (copy_to_user(meta_buffer, meta, meta_len))
943 blk_rq_unmap_user(bio);
945 blk_mq_free_request(req);
949 static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
951 struct nvme_ctrl *ctrl = rq->end_io_data;
953 bool startka = false;
955 blk_mq_free_request(rq);
958 dev_err(ctrl->device,
959 "failed nvme_keep_alive_end_io error=%d\n",
964 ctrl->comp_seen = false;
965 spin_lock_irqsave(&ctrl->lock, flags);
966 if (ctrl->state == NVME_CTRL_LIVE ||
967 ctrl->state == NVME_CTRL_CONNECTING)
969 spin_unlock_irqrestore(&ctrl->lock, flags);
971 queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
974 static int nvme_keep_alive(struct nvme_ctrl *ctrl)
978 rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED,
983 rq->timeout = ctrl->kato * HZ;
984 rq->end_io_data = ctrl;
986 blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io);
991 static void nvme_keep_alive_work(struct work_struct *work)
993 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
994 struct nvme_ctrl, ka_work);
995 bool comp_seen = ctrl->comp_seen;
997 if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
998 dev_dbg(ctrl->device,
999 "reschedule traffic based keep-alive timer\n");
1000 ctrl->comp_seen = false;
1001 queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
1005 if (nvme_keep_alive(ctrl)) {
1006 /* allocation failure, reset the controller */
1007 dev_err(ctrl->device, "keep-alive failed\n");
1008 nvme_reset_ctrl(ctrl);
1013 static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
1015 if (unlikely(ctrl->kato == 0))
1018 queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
1021 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
1023 if (unlikely(ctrl->kato == 0))
1026 cancel_delayed_work_sync(&ctrl->ka_work);
1028 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
1030 static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
1032 struct nvme_command c = { };
1035 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1036 c.identify.opcode = nvme_admin_identify;
1037 c.identify.cns = NVME_ID_CNS_CTRL;
1039 *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
1043 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
1044 sizeof(struct nvme_id_ctrl));
1050 static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
1051 struct nvme_ns_id_desc *cur)
1053 const char *warn_str = "ctrl returned bogus length:";
1056 switch (cur->nidt) {
1057 case NVME_NIDT_EUI64:
1058 if (cur->nidl != NVME_NIDT_EUI64_LEN) {
1059 dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n",
1060 warn_str, cur->nidl);
1063 memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
1064 return NVME_NIDT_EUI64_LEN;
1065 case NVME_NIDT_NGUID:
1066 if (cur->nidl != NVME_NIDT_NGUID_LEN) {
1067 dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n",
1068 warn_str, cur->nidl);
1071 memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
1072 return NVME_NIDT_NGUID_LEN;
1073 case NVME_NIDT_UUID:
1074 if (cur->nidl != NVME_NIDT_UUID_LEN) {
1075 dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n",
1076 warn_str, cur->nidl);
1079 uuid_copy(&ids->uuid, data + sizeof(*cur));
1080 return NVME_NIDT_UUID_LEN;
1082 /* Skip unknown types */
1087 static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
1088 struct nvme_ns_ids *ids)
1090 struct nvme_command c = { };
1096 c.identify.opcode = nvme_admin_identify;
1097 c.identify.nsid = cpu_to_le32(nsid);
1098 c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
1100 data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
1104 status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
1105 NVME_IDENTIFY_DATA_SIZE);
1107 dev_warn(ctrl->device,
1108 "Identify Descriptors failed (%d)\n", status);
1110 * Don't treat an error as fatal, as we potentially already
1111 * have a NGUID or EUI-64.
1113 if (status > 0 && !(status & NVME_SC_DNR))
1118 for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
1119 struct nvme_ns_id_desc *cur = data + pos;
1124 len = nvme_process_ns_desc(ctrl, ids, cur);
1128 len += sizeof(*cur);
1135 static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list)
1137 struct nvme_command c = { };
1139 c.identify.opcode = nvme_admin_identify;
1140 c.identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST;
1141 c.identify.nsid = cpu_to_le32(nsid);
1142 return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list,
1143 NVME_IDENTIFY_DATA_SIZE);
1146 static int nvme_identify_ns(struct nvme_ctrl *ctrl,
1147 unsigned nsid, struct nvme_id_ns **id)
1149 struct nvme_command c = { };
1152 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1153 c.identify.opcode = nvme_admin_identify;
1154 c.identify.nsid = cpu_to_le32(nsid);
1155 c.identify.cns = NVME_ID_CNS_NS;
1157 *id = kmalloc(sizeof(**id), GFP_KERNEL);
1161 error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
1163 dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
1170 static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
1171 unsigned int dword11, void *buffer, size_t buflen, u32 *result)
1173 union nvme_result res = { 0 };
1174 struct nvme_command c;
1177 memset(&c, 0, sizeof(c));
1178 c.features.opcode = op;
1179 c.features.fid = cpu_to_le32(fid);
1180 c.features.dword11 = cpu_to_le32(dword11);
1182 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
1183 buffer, buflen, 0, NVME_QID_ANY, 0, 0, false);
1184 if (ret >= 0 && result)
1185 *result = le32_to_cpu(res.u32);
1189 int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
1190 unsigned int dword11, void *buffer, size_t buflen,
1193 return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer,
1196 EXPORT_SYMBOL_GPL(nvme_set_features);
1198 int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
1199 unsigned int dword11, void *buffer, size_t buflen,
1202 return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer,
1205 EXPORT_SYMBOL_GPL(nvme_get_features);
1207 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
1209 u32 q_count = (*count - 1) | ((*count - 1) << 16);
1211 int status, nr_io_queues;
1213 status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
1219 * Degraded controllers might return an error when setting the queue
1220 * count. We still want to be able to bring them online and offer
1221 * access to the admin queue, as that might be only way to fix them up.
1224 dev_err(ctrl->device, "Could not set queue count (%d)\n", status);
1227 nr_io_queues = min(result & 0xffff, result >> 16) + 1;
1228 *count = min(*count, nr_io_queues);
1233 EXPORT_SYMBOL_GPL(nvme_set_queue_count);
1235 #define NVME_AEN_SUPPORTED \
1236 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \
1237 NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE)
1239 static void nvme_enable_aen(struct nvme_ctrl *ctrl)
1241 u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
1244 if (!supported_aens)
1247 status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens,
1250 dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
1253 queue_work(nvme_wq, &ctrl->async_event_work);
1257 * Convert integer values from ioctl structures to user pointers, silently
1258 * ignoring the upper bits in the compat case to match behaviour of 32-bit
1261 static void __user *nvme_to_user_ptr(uintptr_t ptrval)
1263 if (in_compat_syscall())
1264 ptrval = (compat_uptr_t)ptrval;
1265 return (void __user *)ptrval;
1268 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1270 struct nvme_user_io io;
1271 struct nvme_command c;
1272 unsigned length, meta_len;
1273 void __user *metadata;
1275 if (copy_from_user(&io, uio, sizeof(io)))
1280 switch (io.opcode) {
1281 case nvme_cmd_write:
1283 case nvme_cmd_compare:
1289 length = (io.nblocks + 1) << ns->lba_shift;
1290 meta_len = (io.nblocks + 1) * ns->ms;
1291 metadata = nvme_to_user_ptr(io.metadata);
1296 } else if (meta_len) {
1297 if ((io.metadata & 3) || !io.metadata)
1301 memset(&c, 0, sizeof(c));
1302 c.rw.opcode = io.opcode;
1303 c.rw.flags = io.flags;
1304 c.rw.nsid = cpu_to_le32(ns->head->ns_id);
1305 c.rw.slba = cpu_to_le64(io.slba);
1306 c.rw.length = cpu_to_le16(io.nblocks);
1307 c.rw.control = cpu_to_le16(io.control);
1308 c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
1309 c.rw.reftag = cpu_to_le32(io.reftag);
1310 c.rw.apptag = cpu_to_le16(io.apptag);
1311 c.rw.appmask = cpu_to_le16(io.appmask);
1313 return nvme_submit_user_cmd(ns->queue, &c,
1314 nvme_to_user_ptr(io.addr), length,
1315 metadata, meta_len, lower_32_bits(io.slba), NULL, 0);
1318 static u32 nvme_known_admin_effects(u8 opcode)
1321 case nvme_admin_format_nvm:
1322 return NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
1323 NVME_CMD_EFFECTS_CSE_MASK;
1324 case nvme_admin_sanitize_nvm:
1325 return NVME_CMD_EFFECTS_CSE_MASK;
1332 static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1339 effects = le32_to_cpu(ctrl->effects->iocs[opcode]);
1340 if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
1341 dev_warn(ctrl->device,
1342 "IO command:%02x has unhandled effects:%08x\n",
1348 effects = le32_to_cpu(ctrl->effects->acs[opcode]);
1349 effects |= nvme_known_admin_effects(opcode);
1352 * For simplicity, IO to all namespaces is quiesced even if the command
1353 * effects say only one namespace is affected.
1355 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
1356 mutex_lock(&ctrl->scan_lock);
1357 mutex_lock(&ctrl->subsys->lock);
1358 nvme_mpath_start_freeze(ctrl->subsys);
1359 nvme_mpath_wait_freeze(ctrl->subsys);
1360 nvme_start_freeze(ctrl);
1361 nvme_wait_freeze(ctrl);
1366 static void nvme_update_formats(struct nvme_ctrl *ctrl)
1370 down_read(&ctrl->namespaces_rwsem);
1371 list_for_each_entry(ns, &ctrl->namespaces, list)
1372 if (ns->disk && nvme_revalidate_disk(ns->disk))
1373 nvme_set_queue_dying(ns);
1374 up_read(&ctrl->namespaces_rwsem);
1377 static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
1380 * Revalidate LBA changes prior to unfreezing. This is necessary to
1381 * prevent memory corruption if a logical block size was changed by
1384 if (effects & NVME_CMD_EFFECTS_LBCC)
1385 nvme_update_formats(ctrl);
1386 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
1387 nvme_unfreeze(ctrl);
1388 nvme_mpath_unfreeze(ctrl->subsys);
1389 mutex_unlock(&ctrl->subsys->lock);
1390 nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
1391 mutex_unlock(&ctrl->scan_lock);
1393 if (effects & NVME_CMD_EFFECTS_CCC)
1394 nvme_init_identify(ctrl);
1395 if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC))
1396 nvme_queue_scan(ctrl);
1399 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1400 struct nvme_passthru_cmd __user *ucmd)
1402 struct nvme_passthru_cmd cmd;
1403 struct nvme_command c;
1404 unsigned timeout = 0;
1409 if (!capable(CAP_SYS_ADMIN))
1411 if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
1416 memset(&c, 0, sizeof(c));
1417 c.common.opcode = cmd.opcode;
1418 c.common.flags = cmd.flags;
1419 c.common.nsid = cpu_to_le32(cmd.nsid);
1420 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
1421 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
1422 c.common.cdw10 = cpu_to_le32(cmd.cdw10);
1423 c.common.cdw11 = cpu_to_le32(cmd.cdw11);
1424 c.common.cdw12 = cpu_to_le32(cmd.cdw12);
1425 c.common.cdw13 = cpu_to_le32(cmd.cdw13);
1426 c.common.cdw14 = cpu_to_le32(cmd.cdw14);
1427 c.common.cdw15 = cpu_to_le32(cmd.cdw15);
1430 timeout = msecs_to_jiffies(cmd.timeout_ms);
1432 effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
1433 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
1434 nvme_to_user_ptr(cmd.addr), cmd.data_len,
1435 nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
1436 0, &result, timeout);
1437 nvme_passthru_end(ctrl, effects);
1440 if (put_user(result, &ucmd->result))
1447 static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1448 struct nvme_passthru_cmd64 __user *ucmd)
1450 struct nvme_passthru_cmd64 cmd;
1451 struct nvme_command c;
1452 unsigned timeout = 0;
1456 if (!capable(CAP_SYS_ADMIN))
1458 if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
1463 memset(&c, 0, sizeof(c));
1464 c.common.opcode = cmd.opcode;
1465 c.common.flags = cmd.flags;
1466 c.common.nsid = cpu_to_le32(cmd.nsid);
1467 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
1468 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
1469 c.common.cdw10 = cpu_to_le32(cmd.cdw10);
1470 c.common.cdw11 = cpu_to_le32(cmd.cdw11);
1471 c.common.cdw12 = cpu_to_le32(cmd.cdw12);
1472 c.common.cdw13 = cpu_to_le32(cmd.cdw13);
1473 c.common.cdw14 = cpu_to_le32(cmd.cdw14);
1474 c.common.cdw15 = cpu_to_le32(cmd.cdw15);
1477 timeout = msecs_to_jiffies(cmd.timeout_ms);
1479 effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
1480 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
1481 nvme_to_user_ptr(cmd.addr), cmd.data_len,
1482 nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
1483 0, &cmd.result, timeout);
1484 nvme_passthru_end(ctrl, effects);
1487 if (put_user(cmd.result, &ucmd->result))
1495 * Issue ioctl requests on the first available path. Note that unlike normal
1496 * block layer requests we will not retry failed request on another controller.
1498 static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
1499 struct nvme_ns_head **head, int *srcu_idx)
1501 #ifdef CONFIG_NVME_MULTIPATH
1502 if (disk->fops == &nvme_ns_head_ops) {
1505 *head = disk->private_data;
1506 *srcu_idx = srcu_read_lock(&(*head)->srcu);
1507 ns = nvme_find_path(*head);
1509 srcu_read_unlock(&(*head)->srcu, *srcu_idx);
1515 return disk->private_data;
1518 static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
1521 srcu_read_unlock(&head->srcu, idx);
1524 static bool is_ctrl_ioctl(unsigned int cmd)
1526 if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
1528 if (is_sed_ioctl(cmd))
1533 static int nvme_handle_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
1535 struct nvme_ns_head *head,
1538 struct nvme_ctrl *ctrl = ns->ctrl;
1541 nvme_get_ctrl(ns->ctrl);
1542 nvme_put_ns_from_disk(head, srcu_idx);
1545 case NVME_IOCTL_ADMIN_CMD:
1546 ret = nvme_user_cmd(ctrl, NULL, argp);
1548 case NVME_IOCTL_ADMIN64_CMD:
1549 ret = nvme_user_cmd64(ctrl, NULL, argp);
1552 ret = sed_ioctl(ctrl->opal_dev, cmd, argp);
1555 nvme_put_ctrl(ctrl);
1559 static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
1560 unsigned int cmd, unsigned long arg)
1562 struct nvme_ns_head *head = NULL;
1563 void __user *argp = (void __user *)arg;
1567 ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
1569 return -EWOULDBLOCK;
1572 * Handle ioctls that apply to the controller instead of the namespace
1573 * seperately and drop the ns SRCU reference early. This avoids a
1574 * deadlock when deleting namespaces using the passthrough interface.
1576 if (is_ctrl_ioctl(cmd))
1577 return nvme_handle_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
1581 force_successful_syscall_return();
1582 ret = ns->head->ns_id;
1584 case NVME_IOCTL_IO_CMD:
1585 ret = nvme_user_cmd(ns->ctrl, ns, argp);
1587 case NVME_IOCTL_SUBMIT_IO:
1588 ret = nvme_submit_io(ns, argp);
1590 case NVME_IOCTL_IO64_CMD:
1591 ret = nvme_user_cmd64(ns->ctrl, ns, argp);
1595 ret = nvme_nvm_ioctl(ns, cmd, arg);
1600 nvme_put_ns_from_disk(head, srcu_idx);
1604 #ifdef CONFIG_COMPAT
1605 struct nvme_user_io32 {
1618 } __attribute__((__packed__));
1620 #define NVME_IOCTL_SUBMIT_IO32 _IOW('N', 0x42, struct nvme_user_io32)
1622 static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
1623 unsigned int cmd, unsigned long arg)
1626 * Corresponds to the difference of NVME_IOCTL_SUBMIT_IO
1627 * between 32 bit programs and 64 bit kernel.
1628 * The cause is that the results of sizeof(struct nvme_user_io),
1629 * which is used to define NVME_IOCTL_SUBMIT_IO,
1630 * are not same between 32 bit compiler and 64 bit compiler.
1631 * NVME_IOCTL_SUBMIT_IO32 is for 64 bit kernel handling
1632 * NVME_IOCTL_SUBMIT_IO issued from 32 bit programs.
1633 * Other IOCTL numbers are same between 32 bit and 64 bit.
1634 * So there is nothing to do regarding to other IOCTL numbers.
1636 if (cmd == NVME_IOCTL_SUBMIT_IO32)
1637 return nvme_ioctl(bdev, mode, NVME_IOCTL_SUBMIT_IO, arg);
1639 return nvme_ioctl(bdev, mode, cmd, arg);
1642 #define nvme_compat_ioctl NULL
1643 #endif /* CONFIG_COMPAT */
1645 static int nvme_open(struct block_device *bdev, fmode_t mode)
1647 struct nvme_ns *ns = bdev->bd_disk->private_data;
1649 #ifdef CONFIG_NVME_MULTIPATH
1650 /* should never be called due to GENHD_FL_HIDDEN */
1651 if (WARN_ON_ONCE(ns->head->disk))
1654 if (!kref_get_unless_zero(&ns->kref))
1656 if (!try_module_get(ns->ctrl->ops->module))
1667 static void nvme_release(struct gendisk *disk, fmode_t mode)
1669 struct nvme_ns *ns = disk->private_data;
1671 module_put(ns->ctrl->ops->module);
1675 static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1677 /* some standard values */
1678 geo->heads = 1 << 6;
1679 geo->sectors = 1 << 5;
1680 geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
1684 #ifdef CONFIG_BLK_DEV_INTEGRITY
1685 static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type)
1687 struct blk_integrity integrity;
1689 memset(&integrity, 0, sizeof(integrity));
1691 case NVME_NS_DPS_PI_TYPE3:
1692 integrity.profile = &t10_pi_type3_crc;
1693 integrity.tag_size = sizeof(u16) + sizeof(u32);
1694 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1696 case NVME_NS_DPS_PI_TYPE1:
1697 case NVME_NS_DPS_PI_TYPE2:
1698 integrity.profile = &t10_pi_type1_crc;
1699 integrity.tag_size = sizeof(u16);
1700 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1703 integrity.profile = NULL;
1706 integrity.tuple_size = ms;
1707 blk_integrity_register(disk, &integrity);
1708 blk_queue_max_integrity_segments(disk->queue, 1);
1711 static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type)
1714 #endif /* CONFIG_BLK_DEV_INTEGRITY */
1716 static void nvme_set_chunk_size(struct nvme_ns *ns)
1718 u32 chunk_size = nvme_lba_to_sect(ns, ns->noiob);
1719 blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size));
1722 static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
1724 struct nvme_ctrl *ctrl = ns->ctrl;
1725 struct request_queue *queue = disk->queue;
1726 u32 size = queue_logical_block_size(queue);
1728 if (!(ctrl->oncs & NVME_CTRL_ONCS_DSM)) {
1729 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, queue);
1733 if (ctrl->nr_streams && ns->sws && ns->sgs)
1734 size *= ns->sws * ns->sgs;
1736 BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
1737 NVME_DSM_MAX_RANGES);
1739 queue->limits.discard_alignment = 0;
1740 queue->limits.discard_granularity = size;
1742 /* If discard is already enabled, don't reset queue limits */
1743 if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD, queue))
1746 blk_queue_max_discard_sectors(queue, UINT_MAX);
1747 blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES);
1749 if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
1750 blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
1753 static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
1757 if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) ||
1758 (ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
1761 * Even though NVMe spec explicitly states that MDTS is not
1762 * applicable to the write-zeroes:- "The restriction does not apply to
1763 * commands that do not transfer data between the host and the
1764 * controller (e.g., Write Uncorrectable ro Write Zeroes command).".
1765 * In order to be more cautious use controller's max_hw_sectors value
1766 * to configure the maximum sectors for the write-zeroes which is
1767 * configured based on the controller's MDTS field in the
1768 * nvme_init_identify() if available.
1770 if (ns->ctrl->max_hw_sectors == UINT_MAX)
1771 max_blocks = (u64)USHRT_MAX + 1;
1773 max_blocks = ns->ctrl->max_hw_sectors + 1;
1775 blk_queue_max_write_zeroes_sectors(disk->queue,
1776 nvme_lba_to_sect(ns, max_blocks));
1779 static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
1780 struct nvme_id_ns *id, struct nvme_ns_ids *ids)
1782 memset(ids, 0, sizeof(*ids));
1784 if (ctrl->vs >= NVME_VS(1, 1, 0))
1785 memcpy(ids->eui64, id->eui64, sizeof(id->eui64));
1786 if (ctrl->vs >= NVME_VS(1, 2, 0))
1787 memcpy(ids->nguid, id->nguid, sizeof(id->nguid));
1788 if (ctrl->vs >= NVME_VS(1, 3, 0))
1789 return nvme_identify_ns_descs(ctrl, nsid, ids);
1793 static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
1795 return !uuid_is_null(&ids->uuid) ||
1796 memchr_inv(ids->nguid, 0, sizeof(ids->nguid)) ||
1797 memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
1800 static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
1802 return uuid_equal(&a->uuid, &b->uuid) &&
1803 memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 &&
1804 memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0;
1807 static void nvme_update_disk_info(struct gendisk *disk,
1808 struct nvme_ns *ns, struct nvme_id_ns *id)
1810 sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
1811 unsigned short bs = 1 << ns->lba_shift;
1812 u32 atomic_bs, phys_bs, io_opt;
1814 if (ns->lba_shift > PAGE_SHIFT) {
1815 /* unsupported block size, set capacity to 0 later */
1818 blk_mq_freeze_queue(disk->queue);
1819 blk_integrity_unregister(disk);
1821 if (id->nabo == 0) {
1823 * Bit 1 indicates whether NAWUPF is defined for this namespace
1824 * and whether it should be used instead of AWUPF. If NAWUPF ==
1825 * 0 then AWUPF must be used instead.
1827 if (id->nsfeat & (1 << 1) && id->nawupf)
1828 atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
1830 atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
1836 if (id->nsfeat & (1 << 4)) {
1837 /* NPWG = Namespace Preferred Write Granularity */
1838 phys_bs *= 1 + le16_to_cpu(id->npwg);
1839 /* NOWS = Namespace Optimal Write Size */
1840 io_opt *= 1 + le16_to_cpu(id->nows);
1843 blk_queue_logical_block_size(disk->queue, bs);
1845 * Linux filesystems assume writing a single physical block is
1846 * an atomic operation. Hence limit the physical block size to the
1847 * value of the Atomic Write Unit Power Fail parameter.
1849 blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs));
1850 blk_queue_io_min(disk->queue, phys_bs);
1851 blk_queue_io_opt(disk->queue, io_opt);
1853 if (ns->ms && !ns->ext &&
1854 (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
1855 nvme_init_integrity(disk, ns->ms, ns->pi_type);
1856 if ((ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk)) ||
1857 ns->lba_shift > PAGE_SHIFT)
1860 set_capacity_revalidate_and_notify(disk, capacity, false);
1862 nvme_config_discard(disk, ns);
1863 nvme_config_write_zeroes(disk, ns);
1865 if (id->nsattr & (1 << 0))
1866 set_disk_ro(disk, true);
1868 set_disk_ro(disk, false);
1870 blk_mq_unfreeze_queue(disk->queue);
1873 static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
1875 struct nvme_ns *ns = disk->private_data;
1878 * If identify namespace failed, use default 512 byte block size so
1879 * block layer can use before failing read/write for 0 capacity.
1881 ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds;
1882 if (ns->lba_shift == 0)
1884 ns->noiob = le16_to_cpu(id->noiob);
1885 ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
1886 ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
1887 /* the PI implementation requires metadata equal t10 pi tuple size */
1888 if (ns->ms == sizeof(struct t10_pi_tuple))
1889 ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
1894 nvme_set_chunk_size(ns);
1895 nvme_update_disk_info(disk, ns, id);
1896 #ifdef CONFIG_NVME_MULTIPATH
1897 if (ns->head->disk) {
1898 nvme_update_disk_info(ns->head->disk, ns, id);
1899 blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
1900 if (bdi_cap_stable_pages_required(ns->queue->backing_dev_info)) {
1901 struct backing_dev_info *info =
1902 ns->head->disk->queue->backing_dev_info;
1904 info->capabilities |= BDI_CAP_STABLE_WRITES;
1907 revalidate_disk(ns->head->disk);
1912 static int nvme_revalidate_disk(struct gendisk *disk)
1914 struct nvme_ns *ns = disk->private_data;
1915 struct nvme_ctrl *ctrl = ns->ctrl;
1916 struct nvme_id_ns *id;
1917 struct nvme_ns_ids ids;
1920 if (test_bit(NVME_NS_DEAD, &ns->flags)) {
1921 set_capacity(disk, 0);
1925 ret = nvme_identify_ns(ctrl, ns->head->ns_id, &id);
1929 if (id->ncap == 0) {
1934 __nvme_revalidate_disk(disk, id);
1935 ret = nvme_report_ns_ids(ctrl, ns->head->ns_id, id, &ids);
1939 if (!nvme_ns_ids_equal(&ns->head->ids, &ids)) {
1940 dev_err(ctrl->device,
1941 "identifiers changed for nsid %d\n", ns->head->ns_id);
1949 * Only fail the function if we got a fatal error back from the
1950 * device, otherwise ignore the error and just move on.
1952 if (ret == -ENOMEM || (ret > 0 && !(ret & NVME_SC_DNR)))
1955 ret = blk_status_to_errno(nvme_error_status(ret));
1959 static char nvme_pr_type(enum pr_type type)
1962 case PR_WRITE_EXCLUSIVE:
1964 case PR_EXCLUSIVE_ACCESS:
1966 case PR_WRITE_EXCLUSIVE_REG_ONLY:
1968 case PR_EXCLUSIVE_ACCESS_REG_ONLY:
1970 case PR_WRITE_EXCLUSIVE_ALL_REGS:
1972 case PR_EXCLUSIVE_ACCESS_ALL_REGS:
1979 static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
1980 u64 key, u64 sa_key, u8 op)
1982 struct nvme_ns_head *head = NULL;
1984 struct nvme_command c;
1986 u8 data[16] = { 0, };
1988 ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
1990 return -EWOULDBLOCK;
1992 put_unaligned_le64(key, &data[0]);
1993 put_unaligned_le64(sa_key, &data[8]);
1995 memset(&c, 0, sizeof(c));
1996 c.common.opcode = op;
1997 c.common.nsid = cpu_to_le32(ns->head->ns_id);
1998 c.common.cdw10 = cpu_to_le32(cdw10);
2000 ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
2001 nvme_put_ns_from_disk(head, srcu_idx);
2005 static int nvme_pr_register(struct block_device *bdev, u64 old,
2006 u64 new, unsigned flags)
2010 if (flags & ~PR_FL_IGNORE_KEY)
2013 cdw10 = old ? 2 : 0;
2014 cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
2015 cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
2016 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
2019 static int nvme_pr_reserve(struct block_device *bdev, u64 key,
2020 enum pr_type type, unsigned flags)
2024 if (flags & ~PR_FL_IGNORE_KEY)
2027 cdw10 = nvme_pr_type(type) << 8;
2028 cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
2029 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
2032 static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
2033 enum pr_type type, bool abort)
2035 u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1);
2036 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
2039 static int nvme_pr_clear(struct block_device *bdev, u64 key)
2041 u32 cdw10 = 1 | (key ? 1 << 3 : 0);
2042 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
2045 static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
2047 u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0);
2048 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
2051 static const struct pr_ops nvme_pr_ops = {
2052 .pr_register = nvme_pr_register,
2053 .pr_reserve = nvme_pr_reserve,
2054 .pr_release = nvme_pr_release,
2055 .pr_preempt = nvme_pr_preempt,
2056 .pr_clear = nvme_pr_clear,
2059 #ifdef CONFIG_BLK_SED_OPAL
2060 int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
2063 struct nvme_ctrl *ctrl = data;
2064 struct nvme_command cmd;
2066 memset(&cmd, 0, sizeof(cmd));
2068 cmd.common.opcode = nvme_admin_security_send;
2070 cmd.common.opcode = nvme_admin_security_recv;
2071 cmd.common.nsid = 0;
2072 cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
2073 cmd.common.cdw11 = cpu_to_le32(len);
2075 return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
2076 ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0, false);
2078 EXPORT_SYMBOL_GPL(nvme_sec_submit);
2079 #endif /* CONFIG_BLK_SED_OPAL */
2081 static const struct block_device_operations nvme_fops = {
2082 .owner = THIS_MODULE,
2083 .ioctl = nvme_ioctl,
2084 .compat_ioctl = nvme_compat_ioctl,
2086 .release = nvme_release,
2087 .getgeo = nvme_getgeo,
2088 .revalidate_disk= nvme_revalidate_disk,
2089 .pr_ops = &nvme_pr_ops,
2092 #ifdef CONFIG_NVME_MULTIPATH
2093 static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode)
2095 struct nvme_ns_head *head = bdev->bd_disk->private_data;
2097 if (!kref_get_unless_zero(&head->ref))
2102 static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode)
2104 nvme_put_ns_head(disk->private_data);
2107 const struct block_device_operations nvme_ns_head_ops = {
2108 .owner = THIS_MODULE,
2109 .open = nvme_ns_head_open,
2110 .release = nvme_ns_head_release,
2111 .ioctl = nvme_ioctl,
2112 .compat_ioctl = nvme_compat_ioctl,
2113 .getgeo = nvme_getgeo,
2114 .pr_ops = &nvme_pr_ops,
2116 #endif /* CONFIG_NVME_MULTIPATH */
2118 static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
2120 unsigned long timeout =
2121 ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
2122 u32 csts, bit = enabled ? NVME_CSTS_RDY : 0;
2125 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
2128 if ((csts & NVME_CSTS_RDY) == bit)
2131 usleep_range(1000, 2000);
2132 if (fatal_signal_pending(current))
2134 if (time_after(jiffies, timeout)) {
2135 dev_err(ctrl->device,
2136 "Device not ready; aborting %s, CSTS=0x%x\n",
2137 enabled ? "initialisation" : "reset", csts);
2146 * If the device has been passed off to us in an enabled state, just clear
2147 * the enabled bit. The spec says we should set the 'shutdown notification
2148 * bits', but doing so may cause the device to complete commands to the
2149 * admin queue ... and we don't know what memory that might be pointing at!
2151 int nvme_disable_ctrl(struct nvme_ctrl *ctrl)
2155 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
2156 ctrl->ctrl_config &= ~NVME_CC_ENABLE;
2158 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2162 if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
2163 msleep(NVME_QUIRK_DELAY_AMOUNT);
2165 return nvme_wait_ready(ctrl, ctrl->cap, false);
2167 EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
2169 int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
2172 * Default to a 4K page size, with the intention to update this
2173 * path in the future to accomodate architectures with differing
2174 * kernel and IO page sizes.
2176 unsigned dev_page_min, page_shift = 12;
2179 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
2181 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
2184 dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12;
2186 if (page_shift < dev_page_min) {
2187 dev_err(ctrl->device,
2188 "Minimum device page size %u too large for host (%u)\n",
2189 1 << dev_page_min, 1 << page_shift);
2193 ctrl->page_size = 1 << page_shift;
2195 ctrl->ctrl_config = NVME_CC_CSS_NVM;
2196 ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
2197 ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
2198 ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
2199 ctrl->ctrl_config |= NVME_CC_ENABLE;
2201 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2204 return nvme_wait_ready(ctrl, ctrl->cap, true);
2206 EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
2208 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
2210 unsigned long timeout = jiffies + (ctrl->shutdown_timeout * HZ);
2214 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
2215 ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
2217 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2221 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
2222 if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT)
2226 if (fatal_signal_pending(current))
2228 if (time_after(jiffies, timeout)) {
2229 dev_err(ctrl->device,
2230 "Device shutdown incomplete; abort shutdown\n");
2237 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
2239 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
2240 struct request_queue *q)
2244 if (ctrl->max_hw_sectors) {
2246 (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
2248 max_segments = min_not_zero(max_segments, ctrl->max_segments);
2249 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
2250 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
2252 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
2253 is_power_of_2(ctrl->max_hw_sectors))
2254 blk_queue_chunk_sectors(q, ctrl->max_hw_sectors);
2255 blk_queue_virt_boundary(q, ctrl->page_size - 1);
2256 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
2258 blk_queue_write_cache(q, vwc, vwc);
2261 static int nvme_configure_timestamp(struct nvme_ctrl *ctrl)
2266 if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP))
2269 ts = cpu_to_le64(ktime_to_ms(ktime_get_real()));
2270 ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts),
2273 dev_warn_once(ctrl->device,
2274 "could not set timestamp (%d)\n", ret);
2278 static int nvme_configure_acre(struct nvme_ctrl *ctrl)
2280 struct nvme_feat_host_behavior *host;
2283 /* Don't bother enabling the feature if retry delay is not reported */
2287 host = kzalloc(sizeof(*host), GFP_KERNEL);
2291 host->acre = NVME_ENABLE_ACRE;
2292 ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
2293 host, sizeof(*host), NULL);
2298 static int nvme_configure_apst(struct nvme_ctrl *ctrl)
2301 * APST (Autonomous Power State Transition) lets us program a
2302 * table of power state transitions that the controller will
2303 * perform automatically. We configure it with a simple
2304 * heuristic: we are willing to spend at most 2% of the time
2305 * transitioning between power states. Therefore, when running
2306 * in any given state, we will enter the next lower-power
2307 * non-operational state after waiting 50 * (enlat + exlat)
2308 * microseconds, as long as that state's exit latency is under
2309 * the requested maximum latency.
2311 * We will not autonomously enter any non-operational state for
2312 * which the total latency exceeds ps_max_latency_us. Users
2313 * can set ps_max_latency_us to zero to turn off APST.
2317 struct nvme_feat_auto_pst *table;
2323 * If APST isn't supported or if we haven't been initialized yet,
2324 * then don't do anything.
2329 if (ctrl->npss > 31) {
2330 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
2334 table = kzalloc(sizeof(*table), GFP_KERNEL);
2338 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
2339 /* Turn off APST. */
2341 dev_dbg(ctrl->device, "APST disabled\n");
2343 __le64 target = cpu_to_le64(0);
2347 * Walk through all states from lowest- to highest-power.
2348 * According to the spec, lower-numbered states use more
2349 * power. NPSS, despite the name, is the index of the
2350 * lowest-power state, not the number of states.
2352 for (state = (int)ctrl->npss; state >= 0; state--) {
2353 u64 total_latency_us, exit_latency_us, transition_ms;
2356 table->entries[state] = target;
2359 * Don't allow transitions to the deepest state
2360 * if it's quirked off.
2362 if (state == ctrl->npss &&
2363 (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
2367 * Is this state a useful non-operational state for
2368 * higher-power states to autonomously transition to?
2370 if (!(ctrl->psd[state].flags &
2371 NVME_PS_FLAGS_NON_OP_STATE))
2375 (u64)le32_to_cpu(ctrl->psd[state].exit_lat);
2376 if (exit_latency_us > ctrl->ps_max_latency_us)
2381 le32_to_cpu(ctrl->psd[state].entry_lat);
2384 * This state is good. Use it as the APST idle
2385 * target for higher power states.
2387 transition_ms = total_latency_us + 19;
2388 do_div(transition_ms, 20);
2389 if (transition_ms > (1 << 24) - 1)
2390 transition_ms = (1 << 24) - 1;
2392 target = cpu_to_le64((state << 3) |
2393 (transition_ms << 8));
2398 if (total_latency_us > max_lat_us)
2399 max_lat_us = total_latency_us;
2405 dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n");
2407 dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
2408 max_ps, max_lat_us, (int)sizeof(*table), table);
2412 ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste,
2413 table, sizeof(*table), NULL);
2415 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
2421 static void nvme_set_latency_tolerance(struct device *dev, s32 val)
2423 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2427 case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT:
2428 case PM_QOS_LATENCY_ANY:
2436 if (ctrl->ps_max_latency_us != latency) {
2437 ctrl->ps_max_latency_us = latency;
2438 nvme_configure_apst(ctrl);
2442 struct nvme_core_quirk_entry {
2444 * NVMe model and firmware strings are padded with spaces. For
2445 * simplicity, strings in the quirk table are padded with NULLs
2451 unsigned long quirks;
2454 static const struct nvme_core_quirk_entry core_quirks[] = {
2457 * This Toshiba device seems to die using any APST states. See:
2458 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
2461 .mn = "THNSF5256GPUK TOSHIBA",
2462 .quirks = NVME_QUIRK_NO_APST,
2466 * This LiteON CL1-3D*-Q11 firmware version has a race
2467 * condition associated with actions related to suspend to idle
2468 * LiteON has resolved the problem in future firmware
2472 .quirks = NVME_QUIRK_SIMPLE_SUSPEND,
2476 /* match is null-terminated but idstr is space-padded. */
2477 static bool string_matches(const char *idstr, const char *match, size_t len)
2484 matchlen = strlen(match);
2485 WARN_ON_ONCE(matchlen > len);
2487 if (memcmp(idstr, match, matchlen))
2490 for (; matchlen < len; matchlen++)
2491 if (idstr[matchlen] != ' ')
2497 static bool quirk_matches(const struct nvme_id_ctrl *id,
2498 const struct nvme_core_quirk_entry *q)
2500 return q->vid == le16_to_cpu(id->vid) &&
2501 string_matches(id->mn, q->mn, sizeof(id->mn)) &&
2502 string_matches(id->fr, q->fr, sizeof(id->fr));
2505 static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl,
2506 struct nvme_id_ctrl *id)
2511 if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
2512 nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
2513 if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
2514 strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
2518 if (ctrl->vs >= NVME_VS(1, 2, 1))
2519 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
2522 /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
2523 off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
2524 "nqn.2014.08.org.nvmexpress:%04x%04x",
2525 le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
2526 memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
2527 off += sizeof(id->sn);
2528 memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn));
2529 off += sizeof(id->mn);
2530 memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off);
2533 static void nvme_release_subsystem(struct device *dev)
2535 struct nvme_subsystem *subsys =
2536 container_of(dev, struct nvme_subsystem, dev);
2538 if (subsys->instance >= 0)
2539 ida_simple_remove(&nvme_instance_ida, subsys->instance);
2543 static void nvme_destroy_subsystem(struct kref *ref)
2545 struct nvme_subsystem *subsys =
2546 container_of(ref, struct nvme_subsystem, ref);
2548 mutex_lock(&nvme_subsystems_lock);
2549 list_del(&subsys->entry);
2550 mutex_unlock(&nvme_subsystems_lock);
2552 ida_destroy(&subsys->ns_ida);
2553 device_del(&subsys->dev);
2554 put_device(&subsys->dev);
2557 static void nvme_put_subsystem(struct nvme_subsystem *subsys)
2559 kref_put(&subsys->ref, nvme_destroy_subsystem);
2562 static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn)
2564 struct nvme_subsystem *subsys;
2566 lockdep_assert_held(&nvme_subsystems_lock);
2569 * Fail matches for discovery subsystems. This results
2570 * in each discovery controller bound to a unique subsystem.
2571 * This avoids issues with validating controller values
2572 * that can only be true when there is a single unique subsystem.
2573 * There may be multiple and completely independent entities
2574 * that provide discovery controllers.
2576 if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME))
2579 list_for_each_entry(subsys, &nvme_subsystems, entry) {
2580 if (strcmp(subsys->subnqn, subsysnqn))
2582 if (!kref_get_unless_zero(&subsys->ref))
2590 #define SUBSYS_ATTR_RO(_name, _mode, _show) \
2591 struct device_attribute subsys_attr_##_name = \
2592 __ATTR(_name, _mode, _show, NULL)
2594 static ssize_t nvme_subsys_show_nqn(struct device *dev,
2595 struct device_attribute *attr,
2598 struct nvme_subsystem *subsys =
2599 container_of(dev, struct nvme_subsystem, dev);
2601 return snprintf(buf, PAGE_SIZE, "%s\n", subsys->subnqn);
2603 static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
2605 #define nvme_subsys_show_str_function(field) \
2606 static ssize_t subsys_##field##_show(struct device *dev, \
2607 struct device_attribute *attr, char *buf) \
2609 struct nvme_subsystem *subsys = \
2610 container_of(dev, struct nvme_subsystem, dev); \
2611 return sprintf(buf, "%.*s\n", \
2612 (int)sizeof(subsys->field), subsys->field); \
2614 static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
2616 nvme_subsys_show_str_function(model);
2617 nvme_subsys_show_str_function(serial);
2618 nvme_subsys_show_str_function(firmware_rev);
2620 static struct attribute *nvme_subsys_attrs[] = {
2621 &subsys_attr_model.attr,
2622 &subsys_attr_serial.attr,
2623 &subsys_attr_firmware_rev.attr,
2624 &subsys_attr_subsysnqn.attr,
2625 #ifdef CONFIG_NVME_MULTIPATH
2626 &subsys_attr_iopolicy.attr,
2631 static struct attribute_group nvme_subsys_attrs_group = {
2632 .attrs = nvme_subsys_attrs,
2635 static const struct attribute_group *nvme_subsys_attrs_groups[] = {
2636 &nvme_subsys_attrs_group,
2640 static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
2641 struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2643 struct nvme_ctrl *tmp;
2645 lockdep_assert_held(&nvme_subsystems_lock);
2647 list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) {
2648 if (nvme_state_terminal(tmp))
2651 if (tmp->cntlid == ctrl->cntlid) {
2652 dev_err(ctrl->device,
2653 "Duplicate cntlid %u with %s, rejecting\n",
2654 ctrl->cntlid, dev_name(tmp->device));
2658 if ((id->cmic & (1 << 1)) ||
2659 (ctrl->opts && ctrl->opts->discovery_nqn))
2662 dev_err(ctrl->device,
2663 "Subsystem does not support multiple controllers\n");
2670 static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2672 struct nvme_subsystem *subsys, *found;
2675 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
2679 subsys->instance = -1;
2680 mutex_init(&subsys->lock);
2681 kref_init(&subsys->ref);
2682 INIT_LIST_HEAD(&subsys->ctrls);
2683 INIT_LIST_HEAD(&subsys->nsheads);
2684 nvme_init_subnqn(subsys, ctrl, id);
2685 memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
2686 memcpy(subsys->model, id->mn, sizeof(subsys->model));
2687 memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
2688 subsys->vendor_id = le16_to_cpu(id->vid);
2689 subsys->cmic = id->cmic;
2690 subsys->awupf = le16_to_cpu(id->awupf);
2691 #ifdef CONFIG_NVME_MULTIPATH
2692 subsys->iopolicy = NVME_IOPOLICY_NUMA;
2695 subsys->dev.class = nvme_subsys_class;
2696 subsys->dev.release = nvme_release_subsystem;
2697 subsys->dev.groups = nvme_subsys_attrs_groups;
2698 dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance);
2699 device_initialize(&subsys->dev);
2701 mutex_lock(&nvme_subsystems_lock);
2702 found = __nvme_find_get_subsystem(subsys->subnqn);
2704 put_device(&subsys->dev);
2707 if (!nvme_validate_cntlid(subsys, ctrl, id)) {
2709 goto out_put_subsystem;
2712 ret = device_add(&subsys->dev);
2714 dev_err(ctrl->device,
2715 "failed to register subsystem device.\n");
2716 put_device(&subsys->dev);
2719 ida_init(&subsys->ns_ida);
2720 list_add_tail(&subsys->entry, &nvme_subsystems);
2723 ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
2724 dev_name(ctrl->device));
2726 dev_err(ctrl->device,
2727 "failed to create sysfs link from subsystem.\n");
2728 goto out_put_subsystem;
2732 subsys->instance = ctrl->instance;
2733 ctrl->subsys = subsys;
2734 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
2735 mutex_unlock(&nvme_subsystems_lock);
2739 nvme_put_subsystem(subsys);
2741 mutex_unlock(&nvme_subsystems_lock);
2745 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
2746 void *log, size_t size, u64 offset)
2748 struct nvme_command c = { };
2749 unsigned long dwlen = size / 4 - 1;
2751 c.get_log_page.opcode = nvme_admin_get_log_page;
2752 c.get_log_page.nsid = cpu_to_le32(nsid);
2753 c.get_log_page.lid = log_page;
2754 c.get_log_page.lsp = lsp;
2755 c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1));
2756 c.get_log_page.numdu = cpu_to_le16(dwlen >> 16);
2757 c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset));
2758 c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset));
2760 return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
2763 static int nvme_get_effects_log(struct nvme_ctrl *ctrl)
2768 ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL);
2773 ret = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CMD_EFFECTS, 0,
2774 ctrl->effects, sizeof(*ctrl->effects), 0);
2776 kfree(ctrl->effects);
2777 ctrl->effects = NULL;
2783 * Initialize the cached copies of the Identify data and various controller
2784 * register in our nvme_ctrl structure. This should be called as soon as
2785 * the admin queue is fully up and running.
2787 int nvme_init_identify(struct nvme_ctrl *ctrl)
2789 struct nvme_id_ctrl *id;
2790 int ret, page_shift;
2792 bool prev_apst_enabled;
2794 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
2796 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
2799 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
2800 ctrl->sqsize = min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
2802 if (ctrl->vs >= NVME_VS(1, 1, 0))
2803 ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap);
2805 ret = nvme_identify_ctrl(ctrl, &id);
2807 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
2811 if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
2812 ret = nvme_get_effects_log(ctrl);
2817 if (!(ctrl->ops->flags & NVME_F_FABRICS))
2818 ctrl->cntlid = le16_to_cpu(id->cntlid);
2820 if (!ctrl->identified) {
2823 ret = nvme_init_subsystem(ctrl, id);
2828 * Check for quirks. Quirk can depend on firmware version,
2829 * so, in principle, the set of quirks present can change
2830 * across a reset. As a possible future enhancement, we
2831 * could re-scan for quirks every time we reinitialize
2832 * the device, but we'd have to make sure that the driver
2833 * behaves intelligently if the quirks change.
2835 for (i = 0; i < ARRAY_SIZE(core_quirks); i++) {
2836 if (quirk_matches(id, &core_quirks[i]))
2837 ctrl->quirks |= core_quirks[i].quirks;
2841 if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
2842 dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
2843 ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
2846 ctrl->crdt[0] = le16_to_cpu(id->crdt1);
2847 ctrl->crdt[1] = le16_to_cpu(id->crdt2);
2848 ctrl->crdt[2] = le16_to_cpu(id->crdt3);
2850 ctrl->oacs = le16_to_cpu(id->oacs);
2851 ctrl->oncs = le16_to_cpu(id->oncs);
2852 ctrl->mtfa = le16_to_cpu(id->mtfa);
2853 ctrl->oaes = le32_to_cpu(id->oaes);
2854 ctrl->wctemp = le16_to_cpu(id->wctemp);
2855 ctrl->cctemp = le16_to_cpu(id->cctemp);
2857 atomic_set(&ctrl->abort_limit, id->acl + 1);
2858 ctrl->vwc = id->vwc;
2860 max_hw_sectors = 1 << (id->mdts + page_shift - 9);
2862 max_hw_sectors = UINT_MAX;
2863 ctrl->max_hw_sectors =
2864 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
2866 nvme_set_queue_limits(ctrl, ctrl->admin_q);
2867 ctrl->sgls = le32_to_cpu(id->sgls);
2868 ctrl->kas = le16_to_cpu(id->kas);
2869 ctrl->max_namespaces = le32_to_cpu(id->mnan);
2870 ctrl->ctratt = le32_to_cpu(id->ctratt);
2874 u32 transition_time = le32_to_cpu(id->rtd3e) / 1000000;
2876 ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time,
2877 shutdown_timeout, 60);
2879 if (ctrl->shutdown_timeout != shutdown_timeout)
2880 dev_info(ctrl->device,
2881 "Shutdown timeout set to %u seconds\n",
2882 ctrl->shutdown_timeout);
2884 ctrl->shutdown_timeout = shutdown_timeout;
2886 ctrl->npss = id->npss;
2887 ctrl->apsta = id->apsta;
2888 prev_apst_enabled = ctrl->apst_enabled;
2889 if (ctrl->quirks & NVME_QUIRK_NO_APST) {
2890 if (force_apst && id->apsta) {
2891 dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
2892 ctrl->apst_enabled = true;
2894 ctrl->apst_enabled = false;
2897 ctrl->apst_enabled = id->apsta;
2899 memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));
2901 if (ctrl->ops->flags & NVME_F_FABRICS) {
2902 ctrl->icdoff = le16_to_cpu(id->icdoff);
2903 ctrl->ioccsz = le32_to_cpu(id->ioccsz);
2904 ctrl->iorcsz = le32_to_cpu(id->iorcsz);
2905 ctrl->maxcmd = le16_to_cpu(id->maxcmd);
2908 * In fabrics we need to verify the cntlid matches the
2911 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
2912 dev_err(ctrl->device,
2913 "Mismatching cntlid: Connect %u vs Identify "
2915 ctrl->cntlid, le16_to_cpu(id->cntlid));
2920 if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
2921 dev_err(ctrl->device,
2922 "keep-alive support is mandatory for fabrics\n");
2927 ctrl->hmpre = le32_to_cpu(id->hmpre);
2928 ctrl->hmmin = le32_to_cpu(id->hmmin);
2929 ctrl->hmminds = le32_to_cpu(id->hmminds);
2930 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
2933 ret = nvme_mpath_init(ctrl, id);
2939 if (ctrl->apst_enabled && !prev_apst_enabled)
2940 dev_pm_qos_expose_latency_tolerance(ctrl->device);
2941 else if (!ctrl->apst_enabled && prev_apst_enabled)
2942 dev_pm_qos_hide_latency_tolerance(ctrl->device);
2944 ret = nvme_configure_apst(ctrl);
2948 ret = nvme_configure_timestamp(ctrl);
2952 ret = nvme_configure_directives(ctrl);
2956 ret = nvme_configure_acre(ctrl);
2960 if (!ctrl->identified)
2961 nvme_hwmon_init(ctrl);
2963 ctrl->identified = true;
2971 EXPORT_SYMBOL_GPL(nvme_init_identify);
2973 static int nvme_dev_open(struct inode *inode, struct file *file)
2975 struct nvme_ctrl *ctrl =
2976 container_of(inode->i_cdev, struct nvme_ctrl, cdev);
2978 switch (ctrl->state) {
2979 case NVME_CTRL_LIVE:
2982 return -EWOULDBLOCK;
2985 file->private_data = ctrl;
2989 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
2994 down_read(&ctrl->namespaces_rwsem);
2995 if (list_empty(&ctrl->namespaces)) {
3000 ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
3001 if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
3002 dev_warn(ctrl->device,
3003 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
3008 dev_warn(ctrl->device,
3009 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
3010 kref_get(&ns->kref);
3011 up_read(&ctrl->namespaces_rwsem);
3013 ret = nvme_user_cmd(ctrl, ns, argp);
3018 up_read(&ctrl->namespaces_rwsem);
3022 static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
3025 struct nvme_ctrl *ctrl = file->private_data;
3026 void __user *argp = (void __user *)arg;
3029 case NVME_IOCTL_ADMIN_CMD:
3030 return nvme_user_cmd(ctrl, NULL, argp);
3031 case NVME_IOCTL_ADMIN64_CMD:
3032 return nvme_user_cmd64(ctrl, NULL, argp);
3033 case NVME_IOCTL_IO_CMD:
3034 return nvme_dev_user_cmd(ctrl, argp);
3035 case NVME_IOCTL_RESET:
3036 dev_warn(ctrl->device, "resetting controller\n");
3037 return nvme_reset_ctrl_sync(ctrl);
3038 case NVME_IOCTL_SUBSYS_RESET:
3039 return nvme_reset_subsystem(ctrl);
3040 case NVME_IOCTL_RESCAN:
3041 nvme_queue_scan(ctrl);
3048 static const struct file_operations nvme_dev_fops = {
3049 .owner = THIS_MODULE,
3050 .open = nvme_dev_open,
3051 .unlocked_ioctl = nvme_dev_ioctl,
3052 .compat_ioctl = compat_ptr_ioctl,
3055 static ssize_t nvme_sysfs_reset(struct device *dev,
3056 struct device_attribute *attr, const char *buf,
3059 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3062 ret = nvme_reset_ctrl_sync(ctrl);
3067 static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
3069 static ssize_t nvme_sysfs_rescan(struct device *dev,
3070 struct device_attribute *attr, const char *buf,
3073 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3075 nvme_queue_scan(ctrl);
3078 static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
3080 static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
3082 struct gendisk *disk = dev_to_disk(dev);
3084 if (disk->fops == &nvme_fops)
3085 return nvme_get_ns_from_dev(dev)->head;
3087 return disk->private_data;
3090 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
3093 struct nvme_ns_head *head = dev_to_ns_head(dev);
3094 struct nvme_ns_ids *ids = &head->ids;
3095 struct nvme_subsystem *subsys = head->subsys;
3096 int serial_len = sizeof(subsys->serial);
3097 int model_len = sizeof(subsys->model);
3099 if (!uuid_is_null(&ids->uuid))
3100 return sprintf(buf, "uuid.%pU\n", &ids->uuid);
3102 if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3103 return sprintf(buf, "eui.%16phN\n", ids->nguid);
3105 if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
3106 return sprintf(buf, "eui.%8phN\n", ids->eui64);
3108 while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
3109 subsys->serial[serial_len - 1] == '\0'))
3111 while (model_len > 0 && (subsys->model[model_len - 1] == ' ' ||
3112 subsys->model[model_len - 1] == '\0'))
3115 return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
3116 serial_len, subsys->serial, model_len, subsys->model,
3119 static DEVICE_ATTR_RO(wwid);
3121 static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
3124 return sprintf(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
3126 static DEVICE_ATTR_RO(nguid);
3128 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
3131 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
3133 /* For backward compatibility expose the NGUID to userspace if
3134 * we have no UUID set
3136 if (uuid_is_null(&ids->uuid)) {
3137 printk_ratelimited(KERN_WARNING
3138 "No UUID available providing old NGUID\n");
3139 return sprintf(buf, "%pU\n", ids->nguid);
3141 return sprintf(buf, "%pU\n", &ids->uuid);
3143 static DEVICE_ATTR_RO(uuid);
3145 static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
3148 return sprintf(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
3150 static DEVICE_ATTR_RO(eui);
3152 static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
3155 return sprintf(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
3157 static DEVICE_ATTR_RO(nsid);
3159 static struct attribute *nvme_ns_id_attrs[] = {
3160 &dev_attr_wwid.attr,
3161 &dev_attr_uuid.attr,
3162 &dev_attr_nguid.attr,
3164 &dev_attr_nsid.attr,
3165 #ifdef CONFIG_NVME_MULTIPATH
3166 &dev_attr_ana_grpid.attr,
3167 &dev_attr_ana_state.attr,
3172 static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
3173 struct attribute *a, int n)
3175 struct device *dev = container_of(kobj, struct device, kobj);
3176 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
3178 if (a == &dev_attr_uuid.attr) {
3179 if (uuid_is_null(&ids->uuid) &&
3180 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3183 if (a == &dev_attr_nguid.attr) {
3184 if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3187 if (a == &dev_attr_eui.attr) {
3188 if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
3191 #ifdef CONFIG_NVME_MULTIPATH
3192 if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
3193 if (dev_to_disk(dev)->fops != &nvme_fops) /* per-path attr */
3195 if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
3202 static const struct attribute_group nvme_ns_id_attr_group = {
3203 .attrs = nvme_ns_id_attrs,
3204 .is_visible = nvme_ns_id_attrs_are_visible,
3207 const struct attribute_group *nvme_ns_id_attr_groups[] = {
3208 &nvme_ns_id_attr_group,
3210 &nvme_nvm_attr_group,
3215 #define nvme_show_str_function(field) \
3216 static ssize_t field##_show(struct device *dev, \
3217 struct device_attribute *attr, char *buf) \
3219 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
3220 return sprintf(buf, "%.*s\n", \
3221 (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \
3223 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
3225 nvme_show_str_function(model);
3226 nvme_show_str_function(serial);
3227 nvme_show_str_function(firmware_rev);
3229 #define nvme_show_int_function(field) \
3230 static ssize_t field##_show(struct device *dev, \
3231 struct device_attribute *attr, char *buf) \
3233 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
3234 return sprintf(buf, "%d\n", ctrl->field); \
3236 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
3238 nvme_show_int_function(cntlid);
3239 nvme_show_int_function(numa_node);
3240 nvme_show_int_function(queue_count);
3241 nvme_show_int_function(sqsize);
3243 static ssize_t nvme_sysfs_delete(struct device *dev,
3244 struct device_attribute *attr, const char *buf,
3247 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3249 /* Can't delete non-created controllers */
3253 if (device_remove_file_self(dev, attr))
3254 nvme_delete_ctrl_sync(ctrl);
3257 static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
3259 static ssize_t nvme_sysfs_show_transport(struct device *dev,
3260 struct device_attribute *attr,
3263 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3265 return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name);
3267 static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
3269 static ssize_t nvme_sysfs_show_state(struct device *dev,
3270 struct device_attribute *attr,
3273 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3274 static const char *const state_name[] = {
3275 [NVME_CTRL_NEW] = "new",
3276 [NVME_CTRL_LIVE] = "live",
3277 [NVME_CTRL_RESETTING] = "resetting",
3278 [NVME_CTRL_CONNECTING] = "connecting",
3279 [NVME_CTRL_DELETING] = "deleting",
3280 [NVME_CTRL_DEAD] = "dead",
3283 if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
3284 state_name[ctrl->state])
3285 return sprintf(buf, "%s\n", state_name[ctrl->state]);
3287 return sprintf(buf, "unknown state\n");
3290 static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);
3292 static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
3293 struct device_attribute *attr,
3296 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3298 return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subsys->subnqn);
3300 static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
3302 static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
3303 struct device_attribute *attr,
3306 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3308 return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->opts->host->nqn);
3310 static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);
3312 static ssize_t nvme_sysfs_show_hostid(struct device *dev,
3313 struct device_attribute *attr,
3316 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3318 return snprintf(buf, PAGE_SIZE, "%pU\n", &ctrl->opts->host->id);
3320 static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);
3322 static ssize_t nvme_sysfs_show_address(struct device *dev,
3323 struct device_attribute *attr,
3326 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3328 return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
3330 static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
3332 static struct attribute *nvme_dev_attrs[] = {
3333 &dev_attr_reset_controller.attr,
3334 &dev_attr_rescan_controller.attr,
3335 &dev_attr_model.attr,
3336 &dev_attr_serial.attr,
3337 &dev_attr_firmware_rev.attr,
3338 &dev_attr_cntlid.attr,
3339 &dev_attr_delete_controller.attr,
3340 &dev_attr_transport.attr,
3341 &dev_attr_subsysnqn.attr,
3342 &dev_attr_address.attr,
3343 &dev_attr_state.attr,
3344 &dev_attr_numa_node.attr,
3345 &dev_attr_queue_count.attr,
3346 &dev_attr_sqsize.attr,
3347 &dev_attr_hostnqn.attr,
3348 &dev_attr_hostid.attr,
3352 static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
3353 struct attribute *a, int n)
3355 struct device *dev = container_of(kobj, struct device, kobj);
3356 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3358 if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl)
3360 if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
3362 if (a == &dev_attr_hostnqn.attr && !ctrl->opts)
3364 if (a == &dev_attr_hostid.attr && !ctrl->opts)
3370 static struct attribute_group nvme_dev_attrs_group = {
3371 .attrs = nvme_dev_attrs,
3372 .is_visible = nvme_dev_attrs_are_visible,
3375 static const struct attribute_group *nvme_dev_attr_groups[] = {
3376 &nvme_dev_attrs_group,
3380 static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
3383 struct nvme_ns_head *h;
3385 lockdep_assert_held(&subsys->lock);
3387 list_for_each_entry(h, &subsys->nsheads, entry) {
3388 if (h->ns_id == nsid && kref_get_unless_zero(&h->ref))
3395 static int __nvme_check_ids(struct nvme_subsystem *subsys,
3396 struct nvme_ns_head *new)
3398 struct nvme_ns_head *h;
3400 lockdep_assert_held(&subsys->lock);
3402 list_for_each_entry(h, &subsys->nsheads, entry) {
3403 if (nvme_ns_ids_valid(&new->ids) &&
3404 !list_empty(&h->list) &&
3405 nvme_ns_ids_equal(&new->ids, &h->ids))
3412 static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
3413 unsigned nsid, struct nvme_id_ns *id,
3414 struct nvme_ns_ids *ids)
3416 struct nvme_ns_head *head;
3417 size_t size = sizeof(*head);
3420 #ifdef CONFIG_NVME_MULTIPATH
3421 size += num_possible_nodes() * sizeof(struct nvme_ns *);
3424 head = kzalloc(size, GFP_KERNEL);
3427 ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL);
3430 head->instance = ret;
3431 INIT_LIST_HEAD(&head->list);
3432 ret = init_srcu_struct(&head->srcu);
3434 goto out_ida_remove;
3435 head->subsys = ctrl->subsys;
3438 kref_init(&head->ref);
3440 ret = __nvme_check_ids(ctrl->subsys, head);
3442 dev_err(ctrl->device,
3443 "duplicate IDs for nsid %d\n", nsid);
3444 goto out_cleanup_srcu;
3447 ret = nvme_mpath_alloc_disk(ctrl, head);
3449 goto out_cleanup_srcu;
3451 list_add_tail(&head->entry, &ctrl->subsys->nsheads);
3453 kref_get(&ctrl->subsys->ref);
3457 cleanup_srcu_struct(&head->srcu);
3459 ida_simple_remove(&ctrl->subsys->ns_ida, head->instance);
3464 ret = blk_status_to_errno(nvme_error_status(ret));
3465 return ERR_PTR(ret);
3468 static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
3469 struct nvme_id_ns *id)
3471 struct nvme_ctrl *ctrl = ns->ctrl;
3472 bool is_shared = id->nmic & (1 << 0);
3473 struct nvme_ns_head *head = NULL;
3474 struct nvme_ns_ids ids;
3477 ret = nvme_report_ns_ids(ctrl, nsid, id, &ids);
3481 mutex_lock(&ctrl->subsys->lock);
3483 head = nvme_find_ns_head(ctrl->subsys, nsid);
3485 head = nvme_alloc_ns_head(ctrl, nsid, id, &ids);
3487 ret = PTR_ERR(head);
3491 if (!nvme_ns_ids_equal(&head->ids, &ids)) {
3492 dev_err(ctrl->device,
3493 "IDs don't match for shared namespace %d\n",
3500 list_add_tail(&ns->siblings, &head->list);
3504 mutex_unlock(&ctrl->subsys->lock);
3507 ret = blk_status_to_errno(nvme_error_status(ret));
3511 static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
3513 struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
3514 struct nvme_ns *nsb = container_of(b, struct nvme_ns, list);
3516 return nsa->head->ns_id - nsb->head->ns_id;
3519 static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3521 struct nvme_ns *ns, *ret = NULL;
3523 down_read(&ctrl->namespaces_rwsem);
3524 list_for_each_entry(ns, &ctrl->namespaces, list) {
3525 if (ns->head->ns_id == nsid) {
3526 if (!kref_get_unless_zero(&ns->kref))
3531 if (ns->head->ns_id > nsid)
3534 up_read(&ctrl->namespaces_rwsem);
3538 static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns)
3540 struct streams_directive_params s;
3543 if (!ctrl->nr_streams)
3546 ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id);
3550 ns->sws = le32_to_cpu(s.sws);
3551 ns->sgs = le16_to_cpu(s.sgs);
3554 unsigned int bs = 1 << ns->lba_shift;
3556 blk_queue_io_min(ns->queue, bs * ns->sws);
3558 blk_queue_io_opt(ns->queue, bs * ns->sws * ns->sgs);
3564 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3567 struct gendisk *disk;
3568 struct nvme_id_ns *id;
3569 char disk_name[DISK_NAME_LEN];
3570 int node = ctrl->numa_node, flags = GENHD_FL_EXT_DEVT, ret;
3572 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
3576 ns->queue = blk_mq_init_queue(ctrl->tagset);
3577 if (IS_ERR(ns->queue))
3580 if (ctrl->opts && ctrl->opts->data_digest)
3581 ns->queue->backing_dev_info->capabilities
3582 |= BDI_CAP_STABLE_WRITES;
3584 blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
3585 if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
3586 blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
3588 ns->queue->queuedata = ns;
3591 kref_init(&ns->kref);
3592 ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
3594 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
3595 nvme_set_queue_limits(ctrl, ns->queue);
3597 ret = nvme_identify_ns(ctrl, nsid, &id);
3599 goto out_free_queue;
3601 if (id->ncap == 0) /* no namespace (legacy quirk) */
3604 ret = nvme_init_ns_head(ns, nsid, id);
3607 nvme_setup_streams_ns(ctrl, ns);
3608 nvme_set_disk_name(disk_name, ns, ctrl, &flags);
3610 disk = alloc_disk_node(0, node);
3614 disk->fops = &nvme_fops;
3615 disk->private_data = ns;
3616 disk->queue = ns->queue;
3617 disk->flags = flags;
3618 memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
3621 __nvme_revalidate_disk(disk, id);
3623 if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
3624 ret = nvme_nvm_register(ns, disk_name, node);
3626 dev_warn(ctrl->device, "LightNVM init failure\n");
3631 down_write(&ctrl->namespaces_rwsem);
3632 list_add_tail(&ns->list, &ctrl->namespaces);
3633 up_write(&ctrl->namespaces_rwsem);
3635 nvme_get_ctrl(ctrl);
3637 device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups);
3639 nvme_mpath_add_disk(ns, id);
3640 nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
3645 /* prevent double queue cleanup */
3646 ns->disk->queue = NULL;
3649 mutex_lock(&ctrl->subsys->lock);
3650 list_del_rcu(&ns->siblings);
3651 mutex_unlock(&ctrl->subsys->lock);
3652 nvme_put_ns_head(ns->head);
3656 blk_cleanup_queue(ns->queue);
3661 static void nvme_ns_remove(struct nvme_ns *ns)
3663 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
3666 nvme_fault_inject_fini(&ns->fault_inject);
3668 mutex_lock(&ns->ctrl->subsys->lock);
3669 list_del_rcu(&ns->siblings);
3670 mutex_unlock(&ns->ctrl->subsys->lock);
3671 synchronize_rcu(); /* guarantee not available in head->list */
3672 nvme_mpath_clear_current_path(ns);
3673 synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */
3675 if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
3676 del_gendisk(ns->disk);
3677 blk_cleanup_queue(ns->queue);
3678 if (blk_get_integrity(ns->disk))
3679 blk_integrity_unregister(ns->disk);
3682 down_write(&ns->ctrl->namespaces_rwsem);
3683 list_del_init(&ns->list);
3684 up_write(&ns->ctrl->namespaces_rwsem);
3686 nvme_mpath_check_last_path(ns);
3690 static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3694 ns = nvme_find_get_ns(ctrl, nsid);
3696 if (ns->disk && revalidate_disk(ns->disk))
3700 nvme_alloc_ns(ctrl, nsid);
3703 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
3706 struct nvme_ns *ns, *next;
3709 down_write(&ctrl->namespaces_rwsem);
3710 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
3711 if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags))
3712 list_move_tail(&ns->list, &rm_list);
3714 up_write(&ctrl->namespaces_rwsem);
3716 list_for_each_entry_safe(ns, next, &rm_list, list)
3721 static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
3725 unsigned i, j, nsid, prev = 0;
3726 unsigned num_lists = DIV_ROUND_UP_ULL((u64)nn, 1024);
3729 ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
3733 for (i = 0; i < num_lists; i++) {
3734 ret = nvme_identify_ns_list(ctrl, prev, ns_list);
3738 for (j = 0; j < min(nn, 1024U); j++) {
3739 nsid = le32_to_cpu(ns_list[j]);
3743 nvme_validate_ns(ctrl, nsid);
3745 while (++prev < nsid) {
3746 ns = nvme_find_get_ns(ctrl, prev);
3756 nvme_remove_invalid_namespaces(ctrl, prev);
3762 static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
3766 for (i = 1; i <= nn; i++)
3767 nvme_validate_ns(ctrl, i);
3769 nvme_remove_invalid_namespaces(ctrl, nn);
3772 static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl)
3774 size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32);
3778 log = kzalloc(log_size, GFP_KERNEL);
3783 * We need to read the log to clear the AEN, but we don't want to rely
3784 * on it for the changed namespace information as userspace could have
3785 * raced with us in reading the log page, which could cause us to miss
3788 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0, log,
3791 dev_warn(ctrl->device,
3792 "reading changed ns log failed: %d\n", error);
3797 static void nvme_scan_work(struct work_struct *work)
3799 struct nvme_ctrl *ctrl =
3800 container_of(work, struct nvme_ctrl, scan_work);
3801 struct nvme_id_ctrl *id;
3804 /* No tagset on a live ctrl means IO queues could not created */
3805 if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset)
3808 if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
3809 dev_info(ctrl->device, "rescanning namespaces.\n");
3810 nvme_clear_changed_ns_log(ctrl);
3813 if (nvme_identify_ctrl(ctrl, &id))
3816 mutex_lock(&ctrl->scan_lock);
3817 nn = le32_to_cpu(id->nn);
3818 if (ctrl->vs >= NVME_VS(1, 1, 0) &&
3819 !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
3820 if (!nvme_scan_ns_list(ctrl, nn))
3823 nvme_scan_ns_sequential(ctrl, nn);
3825 mutex_unlock(&ctrl->scan_lock);
3827 down_write(&ctrl->namespaces_rwsem);
3828 list_sort(NULL, &ctrl->namespaces, ns_cmp);
3829 up_write(&ctrl->namespaces_rwsem);
3833 * This function iterates the namespace list unlocked to allow recovery from
3834 * controller failure. It is up to the caller to ensure the namespace list is
3835 * not modified by scan work while this function is executing.
3837 void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
3839 struct nvme_ns *ns, *next;
3843 * make sure to requeue I/O to all namespaces as these
3844 * might result from the scan itself and must complete
3845 * for the scan_work to make progress
3847 nvme_mpath_clear_ctrl_paths(ctrl);
3849 /* prevent racing with ns scanning */
3850 flush_work(&ctrl->scan_work);
3853 * The dead states indicates the controller was not gracefully
3854 * disconnected. In that case, we won't be able to flush any data while
3855 * removing the namespaces' disks; fail all the queues now to avoid
3856 * potentially having to clean up the failed sync later.
3858 if (ctrl->state == NVME_CTRL_DEAD)
3859 nvme_kill_queues(ctrl);
3861 down_write(&ctrl->namespaces_rwsem);
3862 list_splice_init(&ctrl->namespaces, &ns_list);
3863 up_write(&ctrl->namespaces_rwsem);
3865 list_for_each_entry_safe(ns, next, &ns_list, list)
3868 EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
3870 static int nvme_class_uevent(struct device *dev, struct kobj_uevent_env *env)
3872 struct nvme_ctrl *ctrl =
3873 container_of(dev, struct nvme_ctrl, ctrl_device);
3874 struct nvmf_ctrl_options *opts = ctrl->opts;
3877 ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name);
3882 ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr);
3886 ret = add_uevent_var(env, "NVME_TRSVCID=%s",
3887 opts->trsvcid ?: "none");
3891 ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s",
3892 opts->host_traddr ?: "none");
3897 static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
3899 char *envp[2] = { NULL, NULL };
3900 u32 aen_result = ctrl->aen_result;
3902 ctrl->aen_result = 0;
3906 envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result);
3909 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
3913 static void nvme_async_event_work(struct work_struct *work)
3915 struct nvme_ctrl *ctrl =
3916 container_of(work, struct nvme_ctrl, async_event_work);
3918 nvme_aen_uevent(ctrl);
3919 ctrl->ops->submit_async_event(ctrl);
3922 static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
3927 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts))
3933 return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP));
3936 static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
3938 struct nvme_fw_slot_info_log *log;
3940 log = kmalloc(sizeof(*log), GFP_KERNEL);
3944 if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, log,
3946 dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
3950 static void nvme_fw_act_work(struct work_struct *work)
3952 struct nvme_ctrl *ctrl = container_of(work,
3953 struct nvme_ctrl, fw_act_work);
3954 unsigned long fw_act_timeout;
3957 fw_act_timeout = jiffies +
3958 msecs_to_jiffies(ctrl->mtfa * 100);
3960 fw_act_timeout = jiffies +
3961 msecs_to_jiffies(admin_timeout * 1000);
3963 nvme_stop_queues(ctrl);
3964 while (nvme_ctrl_pp_status(ctrl)) {
3965 if (time_after(jiffies, fw_act_timeout)) {
3966 dev_warn(ctrl->device,
3967 "Fw activation timeout, reset controller\n");
3968 nvme_try_sched_reset(ctrl);
3974 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
3977 nvme_start_queues(ctrl);
3978 /* read FW slot information to clear the AER */
3979 nvme_get_fw_slot_info(ctrl);
3982 static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
3984 u32 aer_notice_type = (result & 0xff00) >> 8;
3986 trace_nvme_async_event(ctrl, aer_notice_type);
3988 switch (aer_notice_type) {
3989 case NVME_AER_NOTICE_NS_CHANGED:
3990 set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
3991 nvme_queue_scan(ctrl);
3993 case NVME_AER_NOTICE_FW_ACT_STARTING:
3995 * We are (ab)using the RESETTING state to prevent subsequent
3996 * recovery actions from interfering with the controller's
3997 * firmware activation.
3999 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
4000 queue_work(nvme_wq, &ctrl->fw_act_work);
4002 #ifdef CONFIG_NVME_MULTIPATH
4003 case NVME_AER_NOTICE_ANA:
4004 if (!ctrl->ana_log_buf)
4006 queue_work(nvme_wq, &ctrl->ana_work);
4009 case NVME_AER_NOTICE_DISC_CHANGED:
4010 ctrl->aen_result = result;
4013 dev_warn(ctrl->device, "async event result %08x\n", result);
4017 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
4018 volatile union nvme_result *res)
4020 u32 result = le32_to_cpu(res->u32);
4021 u32 aer_type = result & 0x07;
4023 if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
4027 case NVME_AER_NOTICE:
4028 nvme_handle_aen_notice(ctrl, result);
4030 case NVME_AER_ERROR:
4031 case NVME_AER_SMART:
4034 trace_nvme_async_event(ctrl, aer_type);
4035 ctrl->aen_result = result;
4040 queue_work(nvme_wq, &ctrl->async_event_work);
4042 EXPORT_SYMBOL_GPL(nvme_complete_async_event);
4044 void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
4046 nvme_mpath_stop(ctrl);
4047 nvme_stop_keep_alive(ctrl);
4048 flush_work(&ctrl->async_event_work);
4049 cancel_work_sync(&ctrl->fw_act_work);
4051 EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
4053 void nvme_start_ctrl(struct nvme_ctrl *ctrl)
4056 nvme_start_keep_alive(ctrl);
4058 nvme_enable_aen(ctrl);
4060 if (ctrl->queue_count > 1) {
4061 nvme_queue_scan(ctrl);
4062 nvme_start_queues(ctrl);
4064 ctrl->created = true;
4066 EXPORT_SYMBOL_GPL(nvme_start_ctrl);
4068 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
4070 nvme_fault_inject_fini(&ctrl->fault_inject);
4071 dev_pm_qos_hide_latency_tolerance(ctrl->device);
4072 cdev_device_del(&ctrl->cdev, ctrl->device);
4073 nvme_put_ctrl(ctrl);
4075 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
4077 static void nvme_free_ctrl(struct device *dev)
4079 struct nvme_ctrl *ctrl =
4080 container_of(dev, struct nvme_ctrl, ctrl_device);
4081 struct nvme_subsystem *subsys = ctrl->subsys;
4083 if (subsys && ctrl->instance != subsys->instance)
4084 ida_simple_remove(&nvme_instance_ida, ctrl->instance);
4086 kfree(ctrl->effects);
4087 nvme_mpath_uninit(ctrl);
4088 __free_page(ctrl->discard_page);
4091 mutex_lock(&nvme_subsystems_lock);
4092 list_del(&ctrl->subsys_entry);
4093 sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device));
4094 mutex_unlock(&nvme_subsystems_lock);
4097 ctrl->ops->free_ctrl(ctrl);
4100 nvme_put_subsystem(subsys);
4104 * Initialize a NVMe controller structures. This needs to be called during
4105 * earliest initialization so that we have the initialized structured around
4108 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
4109 const struct nvme_ctrl_ops *ops, unsigned long quirks)
4113 ctrl->state = NVME_CTRL_NEW;
4114 spin_lock_init(&ctrl->lock);
4115 mutex_init(&ctrl->scan_lock);
4116 INIT_LIST_HEAD(&ctrl->namespaces);
4117 init_rwsem(&ctrl->namespaces_rwsem);
4120 ctrl->quirks = quirks;
4121 INIT_WORK(&ctrl->scan_work, nvme_scan_work);
4122 INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
4123 INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
4124 INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
4125 init_waitqueue_head(&ctrl->state_wq);
4127 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
4128 memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
4129 ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
4131 BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) >
4133 ctrl->discard_page = alloc_page(GFP_KERNEL);
4134 if (!ctrl->discard_page) {
4139 ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL);
4142 ctrl->instance = ret;
4144 device_initialize(&ctrl->ctrl_device);
4145 ctrl->device = &ctrl->ctrl_device;
4146 ctrl->device->devt = MKDEV(MAJOR(nvme_chr_devt), ctrl->instance);
4147 ctrl->device->class = nvme_class;
4148 ctrl->device->parent = ctrl->dev;
4149 ctrl->device->groups = nvme_dev_attr_groups;
4150 ctrl->device->release = nvme_free_ctrl;
4151 dev_set_drvdata(ctrl->device, ctrl);
4152 ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance);
4154 goto out_release_instance;
4156 nvme_get_ctrl(ctrl);
4157 cdev_init(&ctrl->cdev, &nvme_dev_fops);
4158 ctrl->cdev.owner = ops->module;
4159 ret = cdev_device_add(&ctrl->cdev, ctrl->device);
4164 * Initialize latency tolerance controls. The sysfs files won't
4165 * be visible to userspace unless the device actually supports APST.
4167 ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance;
4168 dev_pm_qos_update_user_latency_tolerance(ctrl->device,
4169 min(default_ps_max_latency_us, (unsigned long)S32_MAX));
4171 nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
4175 nvme_put_ctrl(ctrl);
4176 kfree_const(ctrl->device->kobj.name);
4177 out_release_instance:
4178 ida_simple_remove(&nvme_instance_ida, ctrl->instance);
4180 if (ctrl->discard_page)
4181 __free_page(ctrl->discard_page);
4184 EXPORT_SYMBOL_GPL(nvme_init_ctrl);
4187 * nvme_kill_queues(): Ends all namespace queues
4188 * @ctrl: the dead controller that needs to end
4190 * Call this function when the driver determines it is unable to get the
4191 * controller in a state capable of servicing IO.
4193 void nvme_kill_queues(struct nvme_ctrl *ctrl)
4197 down_read(&ctrl->namespaces_rwsem);
4199 /* Forcibly unquiesce queues to avoid blocking dispatch */
4200 if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q))
4201 blk_mq_unquiesce_queue(ctrl->admin_q);
4203 list_for_each_entry(ns, &ctrl->namespaces, list)
4204 nvme_set_queue_dying(ns);
4206 up_read(&ctrl->namespaces_rwsem);
4208 EXPORT_SYMBOL_GPL(nvme_kill_queues);
4210 void nvme_unfreeze(struct nvme_ctrl *ctrl)
4214 down_read(&ctrl->namespaces_rwsem);
4215 list_for_each_entry(ns, &ctrl->namespaces, list)
4216 blk_mq_unfreeze_queue(ns->queue);
4217 up_read(&ctrl->namespaces_rwsem);
4219 EXPORT_SYMBOL_GPL(nvme_unfreeze);
4221 void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
4225 down_read(&ctrl->namespaces_rwsem);
4226 list_for_each_entry(ns, &ctrl->namespaces, list) {
4227 timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
4231 up_read(&ctrl->namespaces_rwsem);
4233 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
4235 void nvme_wait_freeze(struct nvme_ctrl *ctrl)
4239 down_read(&ctrl->namespaces_rwsem);
4240 list_for_each_entry(ns, &ctrl->namespaces, list)
4241 blk_mq_freeze_queue_wait(ns->queue);
4242 up_read(&ctrl->namespaces_rwsem);
4244 EXPORT_SYMBOL_GPL(nvme_wait_freeze);
4246 void nvme_start_freeze(struct nvme_ctrl *ctrl)
4250 down_read(&ctrl->namespaces_rwsem);
4251 list_for_each_entry(ns, &ctrl->namespaces, list)
4252 blk_freeze_queue_start(ns->queue);
4253 up_read(&ctrl->namespaces_rwsem);
4255 EXPORT_SYMBOL_GPL(nvme_start_freeze);
4257 void nvme_stop_queues(struct nvme_ctrl *ctrl)
4261 down_read(&ctrl->namespaces_rwsem);
4262 list_for_each_entry(ns, &ctrl->namespaces, list)
4263 blk_mq_quiesce_queue(ns->queue);
4264 up_read(&ctrl->namespaces_rwsem);
4266 EXPORT_SYMBOL_GPL(nvme_stop_queues);
4268 void nvme_start_queues(struct nvme_ctrl *ctrl)
4272 down_read(&ctrl->namespaces_rwsem);
4273 list_for_each_entry(ns, &ctrl->namespaces, list)
4274 blk_mq_unquiesce_queue(ns->queue);
4275 up_read(&ctrl->namespaces_rwsem);
4277 EXPORT_SYMBOL_GPL(nvme_start_queues);
4280 void nvme_sync_queues(struct nvme_ctrl *ctrl)
4284 down_read(&ctrl->namespaces_rwsem);
4285 list_for_each_entry(ns, &ctrl->namespaces, list)
4286 blk_sync_queue(ns->queue);
4287 up_read(&ctrl->namespaces_rwsem);
4290 blk_sync_queue(ctrl->admin_q);
4292 EXPORT_SYMBOL_GPL(nvme_sync_queues);
4295 * Check we didn't inadvertently grow the command structure sizes:
4297 static inline void _nvme_check_size(void)
4299 BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64);
4300 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
4301 BUILD_BUG_ON(sizeof(struct nvme_identify) != 64);
4302 BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
4303 BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64);
4304 BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
4305 BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64);
4306 BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64);
4307 BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
4308 BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64);
4309 BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
4310 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
4311 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
4312 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
4313 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
4314 BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
4315 BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64);
4319 static int __init nvme_core_init(void)
4321 int result = -ENOMEM;
4325 nvme_wq = alloc_workqueue("nvme-wq",
4326 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4330 nvme_reset_wq = alloc_workqueue("nvme-reset-wq",
4331 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4335 nvme_delete_wq = alloc_workqueue("nvme-delete-wq",
4336 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4337 if (!nvme_delete_wq)
4338 goto destroy_reset_wq;
4340 result = alloc_chrdev_region(&nvme_chr_devt, 0, NVME_MINORS, "nvme");
4342 goto destroy_delete_wq;
4344 nvme_class = class_create(THIS_MODULE, "nvme");
4345 if (IS_ERR(nvme_class)) {
4346 result = PTR_ERR(nvme_class);
4347 goto unregister_chrdev;
4349 nvme_class->dev_uevent = nvme_class_uevent;
4351 nvme_subsys_class = class_create(THIS_MODULE, "nvme-subsystem");
4352 if (IS_ERR(nvme_subsys_class)) {
4353 result = PTR_ERR(nvme_subsys_class);
4359 class_destroy(nvme_class);
4361 unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
4363 destroy_workqueue(nvme_delete_wq);
4365 destroy_workqueue(nvme_reset_wq);
4367 destroy_workqueue(nvme_wq);
4372 static void __exit nvme_core_exit(void)
4374 class_destroy(nvme_subsys_class);
4375 class_destroy(nvme_class);
4376 unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
4377 destroy_workqueue(nvme_delete_wq);
4378 destroy_workqueue(nvme_reset_wq);
4379 destroy_workqueue(nvme_wq);
4380 ida_destroy(&nvme_instance_ida);
4383 MODULE_LICENSE("GPL");
4384 MODULE_VERSION("1.0");
4385 module_init(nvme_core_init);
4386 module_exit(nvme_core_exit);