1 // SPDX-License-Identifier: GPL-2.0
3 * Common code for the NVMe target.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/random.h>
9 #include <linux/rculist.h>
10 #include <linux/pci-p2pdma.h>
11 #include <linux/scatterlist.h>
13 #define CREATE_TRACE_POINTS
18 struct workqueue_struct *buffered_io_wq;
19 struct workqueue_struct *zbd_wq;
20 static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
21 static DEFINE_IDA(cntlid_ida);
24 * This read/write semaphore is used to synchronize access to configuration
25 * information on a target system that will result in discovery log page
26 * information change for at least one host.
27 * The full list of resources to protected by this semaphore is:
30 * - per-subsystem allowed hosts list
31 * - allow_any_host subsystem attribute
33 * - the nvmet_transports array
35 * When updating any of those lists/structures write lock should be obtained,
36 * while when reading (popolating discovery log page or checking host-subsystem
37 * link) read lock is obtained to allow concurrent reads.
39 DECLARE_RWSEM(nvmet_config_sem);
41 u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
43 DECLARE_RWSEM(nvmet_ana_sem);
45 inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
49 return NVME_SC_SUCCESS;
51 req->error_loc = offsetof(struct nvme_rw_command, length);
52 return NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
54 req->error_loc = offsetof(struct nvme_rw_command, slba);
55 return NVME_SC_LBA_RANGE | NVME_SC_DNR;
57 req->error_loc = offsetof(struct nvme_common_command, opcode);
58 switch (req->cmd->common.opcode) {
60 case nvme_cmd_write_zeroes:
61 return NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
63 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
67 req->error_loc = offsetof(struct nvme_rw_command, nsid);
68 return NVME_SC_ACCESS_DENIED;
72 req->error_loc = offsetof(struct nvme_common_command, opcode);
73 return NVME_SC_INTERNAL | NVME_SC_DNR;
77 u16 nvmet_report_invalid_opcode(struct nvmet_req *req)
79 pr_debug("unhandled cmd %d on qid %d\n", req->cmd->common.opcode,
82 req->error_loc = offsetof(struct nvme_common_command, opcode);
83 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
86 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
87 const char *subsysnqn);
89 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
92 if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
93 req->error_loc = offsetof(struct nvme_common_command, dptr);
94 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
99 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
101 if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
102 req->error_loc = offsetof(struct nvme_common_command, dptr);
103 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
108 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
110 if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) {
111 req->error_loc = offsetof(struct nvme_common_command, dptr);
112 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
117 static u32 nvmet_max_nsid(struct nvmet_subsys *subsys)
119 struct nvmet_ns *cur;
123 xa_for_each(&subsys->namespaces, idx, cur)
129 static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
131 return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
134 static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
136 struct nvmet_req *req;
138 mutex_lock(&ctrl->lock);
139 while (ctrl->nr_async_event_cmds) {
140 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
141 mutex_unlock(&ctrl->lock);
142 nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
143 mutex_lock(&ctrl->lock);
145 mutex_unlock(&ctrl->lock);
148 static void nvmet_async_events_process(struct nvmet_ctrl *ctrl)
150 struct nvmet_async_event *aen;
151 struct nvmet_req *req;
153 mutex_lock(&ctrl->lock);
154 while (ctrl->nr_async_event_cmds && !list_empty(&ctrl->async_events)) {
155 aen = list_first_entry(&ctrl->async_events,
156 struct nvmet_async_event, entry);
157 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
158 nvmet_set_result(req, nvmet_async_event_result(aen));
160 list_del(&aen->entry);
163 mutex_unlock(&ctrl->lock);
164 trace_nvmet_async_event(ctrl, req->cqe->result.u32);
165 nvmet_req_complete(req, 0);
166 mutex_lock(&ctrl->lock);
168 mutex_unlock(&ctrl->lock);
171 static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
173 struct nvmet_async_event *aen, *tmp;
175 mutex_lock(&ctrl->lock);
176 list_for_each_entry_safe(aen, tmp, &ctrl->async_events, entry) {
177 list_del(&aen->entry);
180 mutex_unlock(&ctrl->lock);
183 static void nvmet_async_event_work(struct work_struct *work)
185 struct nvmet_ctrl *ctrl =
186 container_of(work, struct nvmet_ctrl, async_event_work);
188 nvmet_async_events_process(ctrl);
191 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
192 u8 event_info, u8 log_page)
194 struct nvmet_async_event *aen;
196 aen = kmalloc(sizeof(*aen), GFP_KERNEL);
200 aen->event_type = event_type;
201 aen->event_info = event_info;
202 aen->log_page = log_page;
204 mutex_lock(&ctrl->lock);
205 list_add_tail(&aen->entry, &ctrl->async_events);
206 mutex_unlock(&ctrl->lock);
208 schedule_work(&ctrl->async_event_work);
211 static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
215 mutex_lock(&ctrl->lock);
216 if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES)
219 for (i = 0; i < ctrl->nr_changed_ns; i++) {
220 if (ctrl->changed_ns_list[i] == nsid)
224 if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) {
225 ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff);
226 ctrl->nr_changed_ns = U32_MAX;
230 ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid;
232 mutex_unlock(&ctrl->lock);
235 void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
237 struct nvmet_ctrl *ctrl;
239 lockdep_assert_held(&subsys->lock);
241 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
242 nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
243 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
245 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
246 NVME_AER_NOTICE_NS_CHANGED,
247 NVME_LOG_CHANGED_NS);
251 void nvmet_send_ana_event(struct nvmet_subsys *subsys,
252 struct nvmet_port *port)
254 struct nvmet_ctrl *ctrl;
256 mutex_lock(&subsys->lock);
257 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
258 if (port && ctrl->port != port)
260 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE))
262 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
263 NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
265 mutex_unlock(&subsys->lock);
268 void nvmet_port_send_ana_event(struct nvmet_port *port)
270 struct nvmet_subsys_link *p;
272 down_read(&nvmet_config_sem);
273 list_for_each_entry(p, &port->subsystems, entry)
274 nvmet_send_ana_event(p->subsys, port);
275 up_read(&nvmet_config_sem);
278 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
282 down_write(&nvmet_config_sem);
283 if (nvmet_transports[ops->type])
286 nvmet_transports[ops->type] = ops;
287 up_write(&nvmet_config_sem);
291 EXPORT_SYMBOL_GPL(nvmet_register_transport);
293 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
295 down_write(&nvmet_config_sem);
296 nvmet_transports[ops->type] = NULL;
297 up_write(&nvmet_config_sem);
299 EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
301 void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys)
303 struct nvmet_ctrl *ctrl;
305 mutex_lock(&subsys->lock);
306 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
307 if (ctrl->port == port)
308 ctrl->ops->delete_ctrl(ctrl);
310 mutex_unlock(&subsys->lock);
313 int nvmet_enable_port(struct nvmet_port *port)
315 const struct nvmet_fabrics_ops *ops;
318 lockdep_assert_held(&nvmet_config_sem);
320 ops = nvmet_transports[port->disc_addr.trtype];
322 up_write(&nvmet_config_sem);
323 request_module("nvmet-transport-%d", port->disc_addr.trtype);
324 down_write(&nvmet_config_sem);
325 ops = nvmet_transports[port->disc_addr.trtype];
327 pr_err("transport type %d not supported\n",
328 port->disc_addr.trtype);
333 if (!try_module_get(ops->owner))
337 * If the user requested PI support and the transport isn't pi capable,
338 * don't enable the port.
340 if (port->pi_enable && !(ops->flags & NVMF_METADATA_SUPPORTED)) {
341 pr_err("T10-PI is not supported by transport type %d\n",
342 port->disc_addr.trtype);
347 ret = ops->add_port(port);
351 /* If the transport didn't set inline_data_size, then disable it. */
352 if (port->inline_data_size < 0)
353 port->inline_data_size = 0;
355 port->enabled = true;
360 module_put(ops->owner);
364 void nvmet_disable_port(struct nvmet_port *port)
366 const struct nvmet_fabrics_ops *ops;
368 lockdep_assert_held(&nvmet_config_sem);
370 port->enabled = false;
373 ops = nvmet_transports[port->disc_addr.trtype];
374 ops->remove_port(port);
375 module_put(ops->owner);
378 static void nvmet_keep_alive_timer(struct work_struct *work)
380 struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
381 struct nvmet_ctrl, ka_work);
382 bool reset_tbkas = ctrl->reset_tbkas;
384 ctrl->reset_tbkas = false;
386 pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
388 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
392 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
393 ctrl->cntlid, ctrl->kato);
395 nvmet_ctrl_fatal_error(ctrl);
398 void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
400 if (unlikely(ctrl->kato == 0))
403 pr_debug("ctrl %d start keep-alive timer for %d secs\n",
404 ctrl->cntlid, ctrl->kato);
406 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
409 void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
411 if (unlikely(ctrl->kato == 0))
414 pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
416 cancel_delayed_work_sync(&ctrl->ka_work);
419 u16 nvmet_req_find_ns(struct nvmet_req *req)
421 u32 nsid = le32_to_cpu(req->cmd->common.nsid);
423 req->ns = xa_load(&nvmet_req_subsys(req)->namespaces, nsid);
424 if (unlikely(!req->ns)) {
425 req->error_loc = offsetof(struct nvme_common_command, nsid);
426 return NVME_SC_INVALID_NS | NVME_SC_DNR;
429 percpu_ref_get(&req->ns->ref);
430 return NVME_SC_SUCCESS;
433 static void nvmet_destroy_namespace(struct percpu_ref *ref)
435 struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
437 complete(&ns->disable_done);
440 void nvmet_put_namespace(struct nvmet_ns *ns)
442 percpu_ref_put(&ns->ref);
445 static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
447 nvmet_bdev_ns_disable(ns);
448 nvmet_file_ns_disable(ns);
451 static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
454 struct pci_dev *p2p_dev;
460 pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n");
464 if (!blk_queue_pci_p2pdma(ns->bdev->bd_disk->queue)) {
465 pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
471 ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true);
476 * Right now we just check that there is p2pmem available so
477 * we can report an error to the user right away if there
478 * is not. We'll find the actual device to use once we
479 * setup the controller when the port's device is available.
482 p2p_dev = pci_p2pmem_find(nvmet_ns_dev(ns));
484 pr_err("no peer-to-peer memory is available for %s\n",
489 pci_dev_put(p2p_dev);
496 * Note: ctrl->subsys->lock should be held when calling this function
498 static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
501 struct device *clients[2];
502 struct pci_dev *p2p_dev;
505 if (!ctrl->p2p_client || !ns->use_p2pmem)
509 ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true);
513 p2p_dev = pci_dev_get(ns->p2p_dev);
515 clients[0] = ctrl->p2p_client;
516 clients[1] = nvmet_ns_dev(ns);
518 p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients));
520 pr_err("no peer-to-peer memory is available that's supported by %s and %s\n",
521 dev_name(ctrl->p2p_client), ns->device_path);
526 ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev);
528 pci_dev_put(p2p_dev);
530 pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev),
534 void nvmet_ns_revalidate(struct nvmet_ns *ns)
536 loff_t oldsize = ns->size;
539 nvmet_bdev_ns_revalidate(ns);
541 nvmet_file_ns_revalidate(ns);
543 if (oldsize != ns->size)
544 nvmet_ns_changed(ns->subsys, ns->nsid);
547 int nvmet_ns_enable(struct nvmet_ns *ns)
549 struct nvmet_subsys *subsys = ns->subsys;
550 struct nvmet_ctrl *ctrl;
553 mutex_lock(&subsys->lock);
556 if (nvmet_passthru_ctrl(subsys)) {
557 pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
565 if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
568 ret = nvmet_bdev_ns_enable(ns);
570 ret = nvmet_file_ns_enable(ns);
574 ret = nvmet_p2pmem_ns_enable(ns);
576 goto out_dev_disable;
578 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
579 nvmet_p2pmem_ns_add_p2p(ctrl, ns);
581 ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
586 if (ns->nsid > subsys->max_nsid)
587 subsys->max_nsid = ns->nsid;
589 ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL);
591 goto out_restore_subsys_maxnsid;
593 subsys->nr_namespaces++;
595 nvmet_ns_changed(subsys, ns->nsid);
599 mutex_unlock(&subsys->lock);
602 out_restore_subsys_maxnsid:
603 subsys->max_nsid = nvmet_max_nsid(subsys);
604 percpu_ref_exit(&ns->ref);
606 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
607 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
609 nvmet_ns_dev_disable(ns);
613 void nvmet_ns_disable(struct nvmet_ns *ns)
615 struct nvmet_subsys *subsys = ns->subsys;
616 struct nvmet_ctrl *ctrl;
618 mutex_lock(&subsys->lock);
623 xa_erase(&ns->subsys->namespaces, ns->nsid);
624 if (ns->nsid == subsys->max_nsid)
625 subsys->max_nsid = nvmet_max_nsid(subsys);
627 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
628 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
630 mutex_unlock(&subsys->lock);
633 * Now that we removed the namespaces from the lookup list, we
634 * can kill the per_cpu ref and wait for any remaining references
635 * to be dropped, as well as a RCU grace period for anyone only
636 * using the namepace under rcu_read_lock(). Note that we can't
637 * use call_rcu here as we need to ensure the namespaces have
638 * been fully destroyed before unloading the module.
640 percpu_ref_kill(&ns->ref);
642 wait_for_completion(&ns->disable_done);
643 percpu_ref_exit(&ns->ref);
645 mutex_lock(&subsys->lock);
647 subsys->nr_namespaces--;
648 nvmet_ns_changed(subsys, ns->nsid);
649 nvmet_ns_dev_disable(ns);
651 mutex_unlock(&subsys->lock);
654 void nvmet_ns_free(struct nvmet_ns *ns)
656 nvmet_ns_disable(ns);
658 down_write(&nvmet_ana_sem);
659 nvmet_ana_group_enabled[ns->anagrpid]--;
660 up_write(&nvmet_ana_sem);
662 kfree(ns->device_path);
666 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
670 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
674 init_completion(&ns->disable_done);
679 down_write(&nvmet_ana_sem);
680 ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
681 nvmet_ana_group_enabled[ns->anagrpid]++;
682 up_write(&nvmet_ana_sem);
685 ns->buffered_io = false;
686 ns->csi = NVME_CSI_NVM;
691 static void nvmet_update_sq_head(struct nvmet_req *req)
694 u32 old_sqhd, new_sqhd;
697 old_sqhd = req->sq->sqhd;
698 new_sqhd = (old_sqhd + 1) % req->sq->size;
699 } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
702 req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
705 static void nvmet_set_error(struct nvmet_req *req, u16 status)
707 struct nvmet_ctrl *ctrl = req->sq->ctrl;
708 struct nvme_error_slot *new_error_slot;
711 req->cqe->status = cpu_to_le16(status << 1);
713 if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC)
716 spin_lock_irqsave(&ctrl->error_lock, flags);
719 &ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS];
721 new_error_slot->error_count = cpu_to_le64(ctrl->err_counter);
722 new_error_slot->sqid = cpu_to_le16(req->sq->qid);
723 new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id);
724 new_error_slot->status_field = cpu_to_le16(status << 1);
725 new_error_slot->param_error_location = cpu_to_le16(req->error_loc);
726 new_error_slot->lba = cpu_to_le64(req->error_slba);
727 new_error_slot->nsid = req->cmd->common.nsid;
728 spin_unlock_irqrestore(&ctrl->error_lock, flags);
730 /* set the more bit for this request */
731 req->cqe->status |= cpu_to_le16(1 << 14);
734 static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
736 if (!req->sq->sqhd_disabled)
737 nvmet_update_sq_head(req);
738 req->cqe->sq_id = cpu_to_le16(req->sq->qid);
739 req->cqe->command_id = req->cmd->common.command_id;
741 if (unlikely(status))
742 nvmet_set_error(req, status);
744 trace_nvmet_req_complete(req);
747 nvmet_put_namespace(req->ns);
748 req->ops->queue_response(req);
751 void nvmet_req_complete(struct nvmet_req *req, u16 status)
753 __nvmet_req_complete(req, status);
754 percpu_ref_put(&req->sq->ref);
756 EXPORT_SYMBOL_GPL(nvmet_req_complete);
758 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
765 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
775 static void nvmet_confirm_sq(struct percpu_ref *ref)
777 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
779 complete(&sq->confirm_done);
782 void nvmet_sq_destroy(struct nvmet_sq *sq)
784 struct nvmet_ctrl *ctrl = sq->ctrl;
787 * If this is the admin queue, complete all AERs so that our
788 * queue doesn't have outstanding requests on it.
790 if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq)
791 nvmet_async_events_failall(ctrl);
792 percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
793 wait_for_completion(&sq->confirm_done);
794 wait_for_completion(&sq->free_done);
795 percpu_ref_exit(&sq->ref);
799 * The teardown flow may take some time, and the host may not
800 * send us keep-alive during this period, hence reset the
801 * traffic based keep-alive timer so we don't trigger a
802 * controller teardown as a result of a keep-alive expiration.
804 ctrl->reset_tbkas = true;
805 sq->ctrl->sqs[sq->qid] = NULL;
806 nvmet_ctrl_put(ctrl);
807 sq->ctrl = NULL; /* allows reusing the queue later */
810 EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
812 static void nvmet_sq_free(struct percpu_ref *ref)
814 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
816 complete(&sq->free_done);
819 int nvmet_sq_init(struct nvmet_sq *sq)
823 ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
825 pr_err("percpu_ref init failed!\n");
828 init_completion(&sq->free_done);
829 init_completion(&sq->confirm_done);
833 EXPORT_SYMBOL_GPL(nvmet_sq_init);
835 static inline u16 nvmet_check_ana_state(struct nvmet_port *port,
838 enum nvme_ana_state state = port->ana_state[ns->anagrpid];
840 if (unlikely(state == NVME_ANA_INACCESSIBLE))
841 return NVME_SC_ANA_INACCESSIBLE;
842 if (unlikely(state == NVME_ANA_PERSISTENT_LOSS))
843 return NVME_SC_ANA_PERSISTENT_LOSS;
844 if (unlikely(state == NVME_ANA_CHANGE))
845 return NVME_SC_ANA_TRANSITION;
849 static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
851 if (unlikely(req->ns->readonly)) {
852 switch (req->cmd->common.opcode) {
857 return NVME_SC_NS_WRITE_PROTECTED;
864 static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
868 ret = nvmet_check_ctrl_status(req);
872 if (nvmet_req_passthru_ctrl(req))
873 return nvmet_parse_passthru_io_cmd(req);
875 ret = nvmet_req_find_ns(req);
879 ret = nvmet_check_ana_state(req->port, req->ns);
881 req->error_loc = offsetof(struct nvme_common_command, nsid);
884 ret = nvmet_io_cmd_check_access(req);
886 req->error_loc = offsetof(struct nvme_common_command, nsid);
890 switch (req->ns->csi) {
893 return nvmet_file_parse_io_cmd(req);
894 return nvmet_bdev_parse_io_cmd(req);
896 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
897 return nvmet_bdev_zns_parse_io_cmd(req);
898 return NVME_SC_INVALID_IO_CMD_SET;
900 return NVME_SC_INVALID_IO_CMD_SET;
904 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
905 struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops)
907 u8 flags = req->cmd->common.flags;
914 req->metadata_sg = NULL;
916 req->metadata_sg_cnt = 0;
917 req->transfer_len = 0;
918 req->metadata_len = 0;
919 req->cqe->status = 0;
920 req->cqe->sq_head = 0;
922 req->error_loc = NVMET_NO_ERROR_LOC;
925 /* no support for fused commands yet */
926 if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
927 req->error_loc = offsetof(struct nvme_common_command, flags);
928 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
933 * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
934 * contains an address of a single contiguous physical buffer that is
937 if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
938 req->error_loc = offsetof(struct nvme_common_command, flags);
939 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
943 if (unlikely(!req->sq->ctrl))
944 /* will return an error for any non-connect command: */
945 status = nvmet_parse_connect_cmd(req);
946 else if (likely(req->sq->qid != 0))
947 status = nvmet_parse_io_cmd(req);
949 status = nvmet_parse_admin_cmd(req);
954 trace_nvmet_req_init(req, req->cmd);
956 if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
957 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
962 sq->ctrl->reset_tbkas = true;
967 __nvmet_req_complete(req, status);
970 EXPORT_SYMBOL_GPL(nvmet_req_init);
972 void nvmet_req_uninit(struct nvmet_req *req)
974 percpu_ref_put(&req->sq->ref);
976 nvmet_put_namespace(req->ns);
978 EXPORT_SYMBOL_GPL(nvmet_req_uninit);
980 bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len)
982 if (unlikely(len != req->transfer_len)) {
983 req->error_loc = offsetof(struct nvme_common_command, dptr);
984 nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
990 EXPORT_SYMBOL_GPL(nvmet_check_transfer_len);
992 bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
994 if (unlikely(data_len > req->transfer_len)) {
995 req->error_loc = offsetof(struct nvme_common_command, dptr);
996 nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
1003 static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
1005 return req->transfer_len - req->metadata_len;
1008 static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev,
1009 struct nvmet_req *req)
1011 req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
1012 nvmet_data_transfer_len(req));
1016 if (req->metadata_len) {
1017 req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev,
1018 &req->metadata_sg_cnt, req->metadata_len);
1019 if (!req->metadata_sg)
1023 req->p2p_dev = p2p_dev;
1027 pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
1032 static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req)
1034 if (!IS_ENABLED(CONFIG_PCI_P2PDMA) ||
1035 !req->sq->ctrl || !req->sq->qid || !req->ns)
1037 return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid);
1040 int nvmet_req_alloc_sgls(struct nvmet_req *req)
1042 struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req);
1044 if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req))
1047 req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
1049 if (unlikely(!req->sg))
1052 if (req->metadata_len) {
1053 req->metadata_sg = sgl_alloc(req->metadata_len, GFP_KERNEL,
1054 &req->metadata_sg_cnt);
1055 if (unlikely(!req->metadata_sg))
1065 EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgls);
1067 void nvmet_req_free_sgls(struct nvmet_req *req)
1070 pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
1071 if (req->metadata_sg)
1072 pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
1073 req->p2p_dev = NULL;
1076 if (req->metadata_sg)
1077 sgl_free(req->metadata_sg);
1081 req->metadata_sg = NULL;
1083 req->metadata_sg_cnt = 0;
1085 EXPORT_SYMBOL_GPL(nvmet_req_free_sgls);
1087 static inline bool nvmet_cc_en(u32 cc)
1089 return (cc >> NVME_CC_EN_SHIFT) & 0x1;
1092 static inline u8 nvmet_cc_css(u32 cc)
1094 return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
1097 static inline u8 nvmet_cc_mps(u32 cc)
1099 return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
1102 static inline u8 nvmet_cc_ams(u32 cc)
1104 return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
1107 static inline u8 nvmet_cc_shn(u32 cc)
1109 return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
1112 static inline u8 nvmet_cc_iosqes(u32 cc)
1114 return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
1117 static inline u8 nvmet_cc_iocqes(u32 cc)
1119 return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
1122 static inline bool nvmet_css_supported(u8 cc_css)
1124 switch (cc_css <<= NVME_CC_CSS_SHIFT) {
1125 case NVME_CC_CSS_NVM:
1126 case NVME_CC_CSS_CSI:
1133 static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
1135 lockdep_assert_held(&ctrl->lock);
1138 * Only I/O controllers should verify iosqes,iocqes.
1139 * Strictly speaking, the spec says a discovery controller
1140 * should verify iosqes,iocqes are zeroed, however that
1141 * would break backwards compatibility, so don't enforce it.
1143 if (ctrl->subsys->type != NVME_NQN_DISC &&
1144 (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
1145 nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) {
1146 ctrl->csts = NVME_CSTS_CFS;
1150 if (nvmet_cc_mps(ctrl->cc) != 0 ||
1151 nvmet_cc_ams(ctrl->cc) != 0 ||
1152 !nvmet_css_supported(nvmet_cc_css(ctrl->cc))) {
1153 ctrl->csts = NVME_CSTS_CFS;
1157 ctrl->csts = NVME_CSTS_RDY;
1160 * Controllers that are not yet enabled should not really enforce the
1161 * keep alive timeout, but we still want to track a timeout and cleanup
1162 * in case a host died before it enabled the controller. Hence, simply
1163 * reset the keep alive timer when the controller is enabled.
1166 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
1169 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
1171 lockdep_assert_held(&ctrl->lock);
1173 /* XXX: tear down queues? */
1174 ctrl->csts &= ~NVME_CSTS_RDY;
1178 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
1182 mutex_lock(&ctrl->lock);
1186 if (nvmet_cc_en(new) && !nvmet_cc_en(old))
1187 nvmet_start_ctrl(ctrl);
1188 if (!nvmet_cc_en(new) && nvmet_cc_en(old))
1189 nvmet_clear_ctrl(ctrl);
1190 if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
1191 nvmet_clear_ctrl(ctrl);
1192 ctrl->csts |= NVME_CSTS_SHST_CMPLT;
1194 if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
1195 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
1196 mutex_unlock(&ctrl->lock);
1199 static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
1201 /* command sets supported: NVMe command set: */
1202 ctrl->cap = (1ULL << 37);
1203 /* Controller supports one or more I/O Command Sets */
1204 ctrl->cap |= (1ULL << 43);
1205 /* CC.EN timeout in 500msec units: */
1206 ctrl->cap |= (15ULL << 24);
1207 /* maximum queue entries supported: */
1208 ctrl->cap |= NVMET_QUEUE_SIZE - 1;
1211 struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
1212 const char *hostnqn, u16 cntlid,
1213 struct nvmet_req *req)
1215 struct nvmet_ctrl *ctrl = NULL;
1216 struct nvmet_subsys *subsys;
1218 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1220 pr_warn("connect request for invalid subsystem %s!\n",
1222 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1226 mutex_lock(&subsys->lock);
1227 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
1228 if (ctrl->cntlid == cntlid) {
1229 if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
1230 pr_warn("hostnqn mismatch.\n");
1233 if (!kref_get_unless_zero(&ctrl->ref))
1241 ctrl = NULL; /* ctrl not found */
1242 pr_warn("could not find controller %d for subsys %s / host %s\n",
1243 cntlid, subsysnqn, hostnqn);
1244 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
1247 mutex_unlock(&subsys->lock);
1248 nvmet_subsys_put(subsys);
1253 u16 nvmet_check_ctrl_status(struct nvmet_req *req)
1255 if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
1256 pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
1257 req->cmd->common.opcode, req->sq->qid);
1258 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
1261 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
1262 pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
1263 req->cmd->common.opcode, req->sq->qid);
1264 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
1269 bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
1271 struct nvmet_host_link *p;
1273 lockdep_assert_held(&nvmet_config_sem);
1275 if (subsys->allow_any_host)
1278 if (subsys->type == NVME_NQN_DISC) /* allow all access to disc subsys */
1281 list_for_each_entry(p, &subsys->hosts, entry) {
1282 if (!strcmp(nvmet_host_name(p->host), hostnqn))
1290 * Note: ctrl->subsys->lock should be held when calling this function
1292 static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
1293 struct nvmet_req *req)
1295 struct nvmet_ns *ns;
1298 if (!req->p2p_client)
1301 ctrl->p2p_client = get_device(req->p2p_client);
1303 xa_for_each(&ctrl->subsys->namespaces, idx, ns)
1304 nvmet_p2pmem_ns_add_p2p(ctrl, ns);
1308 * Note: ctrl->subsys->lock should be held when calling this function
1310 static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
1312 struct radix_tree_iter iter;
1315 radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
1316 pci_dev_put(radix_tree_deref_slot(slot));
1318 put_device(ctrl->p2p_client);
1321 static void nvmet_fatal_error_handler(struct work_struct *work)
1323 struct nvmet_ctrl *ctrl =
1324 container_of(work, struct nvmet_ctrl, fatal_err_work);
1326 pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
1327 ctrl->ops->delete_ctrl(ctrl);
1330 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
1331 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
1333 struct nvmet_subsys *subsys;
1334 struct nvmet_ctrl *ctrl;
1338 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
1339 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1341 pr_warn("connect request for invalid subsystem %s!\n",
1343 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1344 req->error_loc = offsetof(struct nvme_common_command, dptr);
1348 down_read(&nvmet_config_sem);
1349 if (!nvmet_host_allowed(subsys, hostnqn)) {
1350 pr_info("connect by host %s for subsystem %s not allowed\n",
1351 hostnqn, subsysnqn);
1352 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
1353 up_read(&nvmet_config_sem);
1354 status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
1355 req->error_loc = offsetof(struct nvme_common_command, dptr);
1356 goto out_put_subsystem;
1358 up_read(&nvmet_config_sem);
1360 status = NVME_SC_INTERNAL;
1361 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
1363 goto out_put_subsystem;
1364 mutex_init(&ctrl->lock);
1366 nvmet_init_cap(ctrl);
1368 ctrl->port = req->port;
1370 INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
1371 INIT_LIST_HEAD(&ctrl->async_events);
1372 INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
1373 INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
1374 INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
1376 memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
1377 memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
1379 kref_init(&ctrl->ref);
1380 ctrl->subsys = subsys;
1381 WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
1383 ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
1384 sizeof(__le32), GFP_KERNEL);
1385 if (!ctrl->changed_ns_list)
1388 ctrl->sqs = kcalloc(subsys->max_qid + 1,
1389 sizeof(struct nvmet_sq *),
1392 goto out_free_changed_ns_list;
1394 if (subsys->cntlid_min > subsys->cntlid_max)
1397 ret = ida_simple_get(&cntlid_ida,
1398 subsys->cntlid_min, subsys->cntlid_max,
1401 status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
1406 ctrl->ops = req->ops;
1409 * Discovery controllers may use some arbitrary high value
1410 * in order to cleanup stale discovery sessions
1412 if ((ctrl->subsys->type == NVME_NQN_DISC) && !kato)
1413 kato = NVMET_DISC_KATO_MS;
1415 /* keep-alive timeout in seconds */
1416 ctrl->kato = DIV_ROUND_UP(kato, 1000);
1418 ctrl->err_counter = 0;
1419 spin_lock_init(&ctrl->error_lock);
1421 nvmet_start_keep_alive_timer(ctrl);
1423 mutex_lock(&subsys->lock);
1424 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
1425 nvmet_setup_p2p_ns_map(ctrl, req);
1426 mutex_unlock(&subsys->lock);
1433 out_free_changed_ns_list:
1434 kfree(ctrl->changed_ns_list);
1438 nvmet_subsys_put(subsys);
1443 static void nvmet_ctrl_free(struct kref *ref)
1445 struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
1446 struct nvmet_subsys *subsys = ctrl->subsys;
1448 mutex_lock(&subsys->lock);
1449 nvmet_release_p2p_ns_map(ctrl);
1450 list_del(&ctrl->subsys_entry);
1451 mutex_unlock(&subsys->lock);
1453 nvmet_stop_keep_alive_timer(ctrl);
1455 flush_work(&ctrl->async_event_work);
1456 cancel_work_sync(&ctrl->fatal_err_work);
1458 ida_simple_remove(&cntlid_ida, ctrl->cntlid);
1460 nvmet_async_events_free(ctrl);
1462 kfree(ctrl->changed_ns_list);
1465 nvmet_subsys_put(subsys);
1468 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
1470 kref_put(&ctrl->ref, nvmet_ctrl_free);
1473 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
1475 mutex_lock(&ctrl->lock);
1476 if (!(ctrl->csts & NVME_CSTS_CFS)) {
1477 ctrl->csts |= NVME_CSTS_CFS;
1478 schedule_work(&ctrl->fatal_err_work);
1480 mutex_unlock(&ctrl->lock);
1482 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
1484 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
1485 const char *subsysnqn)
1487 struct nvmet_subsys_link *p;
1492 if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
1493 if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
1495 return nvmet_disc_subsys;
1498 down_read(&nvmet_config_sem);
1499 list_for_each_entry(p, &port->subsystems, entry) {
1500 if (!strncmp(p->subsys->subsysnqn, subsysnqn,
1502 if (!kref_get_unless_zero(&p->subsys->ref))
1504 up_read(&nvmet_config_sem);
1508 up_read(&nvmet_config_sem);
1512 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
1513 enum nvme_subsys_type type)
1515 struct nvmet_subsys *subsys;
1516 char serial[NVMET_SN_MAX_SIZE / 2];
1519 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
1521 return ERR_PTR(-ENOMEM);
1523 subsys->ver = NVMET_DEFAULT_VS;
1524 /* generate a random serial number as our controllers are ephemeral: */
1525 get_random_bytes(&serial, sizeof(serial));
1526 bin2hex(subsys->serial, &serial, sizeof(serial));
1528 subsys->model_number = kstrdup(NVMET_DEFAULT_CTRL_MODEL, GFP_KERNEL);
1529 if (!subsys->model_number) {
1536 subsys->max_qid = NVMET_NR_QUEUES;
1539 subsys->max_qid = 0;
1542 pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
1546 subsys->type = type;
1547 subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
1549 if (!subsys->subsysnqn) {
1553 subsys->cntlid_min = NVME_CNTLID_MIN;
1554 subsys->cntlid_max = NVME_CNTLID_MAX;
1555 kref_init(&subsys->ref);
1557 mutex_init(&subsys->lock);
1558 xa_init(&subsys->namespaces);
1559 INIT_LIST_HEAD(&subsys->ctrls);
1560 INIT_LIST_HEAD(&subsys->hosts);
1565 kfree(subsys->model_number);
1568 return ERR_PTR(ret);
1571 static void nvmet_subsys_free(struct kref *ref)
1573 struct nvmet_subsys *subsys =
1574 container_of(ref, struct nvmet_subsys, ref);
1576 WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
1578 xa_destroy(&subsys->namespaces);
1579 nvmet_passthru_subsys_free(subsys);
1581 kfree(subsys->subsysnqn);
1582 kfree(subsys->model_number);
1586 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
1588 struct nvmet_ctrl *ctrl;
1590 mutex_lock(&subsys->lock);
1591 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1592 ctrl->ops->delete_ctrl(ctrl);
1593 mutex_unlock(&subsys->lock);
1596 void nvmet_subsys_put(struct nvmet_subsys *subsys)
1598 kref_put(&subsys->ref, nvmet_subsys_free);
1601 static int __init nvmet_init(void)
1605 nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
1607 zbd_wq = alloc_workqueue("nvmet-zbd-wq", WQ_MEM_RECLAIM, 0);
1611 buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
1613 if (!buffered_io_wq) {
1615 goto out_free_zbd_work_queue;
1618 error = nvmet_init_discovery();
1620 goto out_free_work_queue;
1622 error = nvmet_init_configfs();
1624 goto out_exit_discovery;
1628 nvmet_exit_discovery();
1629 out_free_work_queue:
1630 destroy_workqueue(buffered_io_wq);
1631 out_free_zbd_work_queue:
1632 destroy_workqueue(zbd_wq);
1636 static void __exit nvmet_exit(void)
1638 nvmet_exit_configfs();
1639 nvmet_exit_discovery();
1640 ida_destroy(&cntlid_ida);
1641 destroy_workqueue(buffered_io_wq);
1642 destroy_workqueue(zbd_wq);
1644 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
1645 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
1648 module_init(nvmet_init);
1649 module_exit(nvmet_exit);
1651 MODULE_LICENSE("GPL v2");