1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2016 Avago Technologies. All rights reserved.
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/parser.h>
8 #include <uapi/scsi/fc/fc_fs.h>
9 #include <uapi/scsi/fc/fc_els.h>
10 #include <linux/delay.h>
11 #include <linux/overflow.h>
15 #include <linux/nvme-fc-driver.h>
16 #include <linux/nvme-fc.h>
17 #include <scsi/scsi_transport_fc.h>
19 /* *************************** Data Structures/Defines ****************** */
22 enum nvme_fc_queue_flags {
23 NVME_FC_Q_CONNECTED = 0,
27 #define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */
29 struct nvme_fc_queue {
30 struct nvme_fc_ctrl *ctrl;
32 struct blk_mq_hw_ctx *hctx;
34 size_t cmnd_capsule_len;
43 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
45 enum nvme_fcop_flags {
46 FCOP_FLAGS_TERMIO = (1 << 0),
47 FCOP_FLAGS_AEN = (1 << 1),
50 struct nvmefc_ls_req_op {
51 struct nvmefc_ls_req ls_req;
53 struct nvme_fc_rport *rport;
54 struct nvme_fc_queue *queue;
59 struct completion ls_done;
60 struct list_head lsreq_list; /* rport->ls_req_list */
64 enum nvme_fcpop_state {
65 FCPOP_STATE_UNINIT = 0,
67 FCPOP_STATE_ACTIVE = 2,
68 FCPOP_STATE_ABORTED = 3,
69 FCPOP_STATE_COMPLETE = 4,
72 struct nvme_fc_fcp_op {
73 struct nvme_request nreq; /*
76 * the 1st element in the
81 struct nvmefc_fcp_req fcp_req;
83 struct nvme_fc_ctrl *ctrl;
84 struct nvme_fc_queue *queue;
92 struct nvme_fc_cmd_iu cmd_iu;
93 struct nvme_fc_ersp_iu rsp_iu;
96 struct nvme_fcp_op_w_sgl {
97 struct nvme_fc_fcp_op op;
98 struct scatterlist sgl[SG_CHUNK_SIZE];
102 struct nvme_fc_lport {
103 struct nvme_fc_local_port localport;
106 struct list_head port_list; /* nvme_fc_port_list */
107 struct list_head endp_list;
108 struct device *dev; /* physical device for dma */
109 struct nvme_fc_port_template *ops;
111 atomic_t act_rport_cnt;
112 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
114 struct nvme_fc_rport {
115 struct nvme_fc_remote_port remoteport;
117 struct list_head endp_list; /* for lport->endp_list */
118 struct list_head ctrl_list;
119 struct list_head ls_req_list;
120 struct list_head disc_list;
121 struct device *dev; /* physical device for dma */
122 struct nvme_fc_lport *lport;
125 atomic_t act_ctrl_cnt;
126 unsigned long dev_loss_end;
127 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
129 enum nvme_fcctrl_flags {
130 FCCTRL_TERMIO = (1 << 0),
133 struct nvme_fc_ctrl {
135 struct nvme_fc_queue *queues;
137 struct nvme_fc_lport *lport;
138 struct nvme_fc_rport *rport;
143 atomic_t err_work_active;
146 struct list_head ctrl_list; /* rport->ctrl_list */
148 struct blk_mq_tag_set admin_tag_set;
149 struct blk_mq_tag_set tag_set;
151 struct delayed_work connect_work;
152 struct work_struct err_work;
157 wait_queue_head_t ioabort_wait;
159 struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS];
161 struct nvme_ctrl ctrl;
164 static inline struct nvme_fc_ctrl *
165 to_fc_ctrl(struct nvme_ctrl *ctrl)
167 return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
170 static inline struct nvme_fc_lport *
171 localport_to_lport(struct nvme_fc_local_port *portptr)
173 return container_of(portptr, struct nvme_fc_lport, localport);
176 static inline struct nvme_fc_rport *
177 remoteport_to_rport(struct nvme_fc_remote_port *portptr)
179 return container_of(portptr, struct nvme_fc_rport, remoteport);
182 static inline struct nvmefc_ls_req_op *
183 ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
185 return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
188 static inline struct nvme_fc_fcp_op *
189 fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
191 return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
196 /* *************************** Globals **************************** */
199 static DEFINE_SPINLOCK(nvme_fc_lock);
201 static LIST_HEAD(nvme_fc_lport_list);
202 static DEFINE_IDA(nvme_fc_local_port_cnt);
203 static DEFINE_IDA(nvme_fc_ctrl_cnt);
205 static struct workqueue_struct *nvme_fc_wq;
207 static bool nvme_fc_waiting_to_unload;
208 static DECLARE_COMPLETION(nvme_fc_unload_proceed);
211 * These items are short-term. They will eventually be moved into
212 * a generic FC class. See comments in module init.
214 static struct device *fc_udev_device;
217 /* *********************** FC-NVME Port Management ************************ */
219 static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
220 struct nvme_fc_queue *, unsigned int);
223 nvme_fc_free_lport(struct kref *ref)
225 struct nvme_fc_lport *lport =
226 container_of(ref, struct nvme_fc_lport, ref);
229 WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
230 WARN_ON(!list_empty(&lport->endp_list));
232 /* remove from transport list */
233 spin_lock_irqsave(&nvme_fc_lock, flags);
234 list_del(&lport->port_list);
235 if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list))
236 complete(&nvme_fc_unload_proceed);
237 spin_unlock_irqrestore(&nvme_fc_lock, flags);
239 ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
240 ida_destroy(&lport->endp_cnt);
242 put_device(lport->dev);
248 nvme_fc_lport_put(struct nvme_fc_lport *lport)
250 kref_put(&lport->ref, nvme_fc_free_lport);
254 nvme_fc_lport_get(struct nvme_fc_lport *lport)
256 return kref_get_unless_zero(&lport->ref);
260 static struct nvme_fc_lport *
261 nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo,
262 struct nvme_fc_port_template *ops,
265 struct nvme_fc_lport *lport;
268 spin_lock_irqsave(&nvme_fc_lock, flags);
270 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
271 if (lport->localport.node_name != pinfo->node_name ||
272 lport->localport.port_name != pinfo->port_name)
275 if (lport->dev != dev) {
276 lport = ERR_PTR(-EXDEV);
280 if (lport->localport.port_state != FC_OBJSTATE_DELETED) {
281 lport = ERR_PTR(-EEXIST);
285 if (!nvme_fc_lport_get(lport)) {
287 * fails if ref cnt already 0. If so,
288 * act as if lport already deleted
294 /* resume the lport */
297 lport->localport.port_role = pinfo->port_role;
298 lport->localport.port_id = pinfo->port_id;
299 lport->localport.port_state = FC_OBJSTATE_ONLINE;
301 spin_unlock_irqrestore(&nvme_fc_lock, flags);
309 spin_unlock_irqrestore(&nvme_fc_lock, flags);
315 * nvme_fc_register_localport - transport entry point called by an
316 * LLDD to register the existence of a NVME
318 * @pinfo: pointer to information about the port to be registered
319 * @template: LLDD entrypoints and operational parameters for the port
320 * @dev: physical hardware device node port corresponds to. Will be
321 * used for DMA mappings
322 * @portptr: pointer to a local port pointer. Upon success, the routine
323 * will allocate a nvme_fc_local_port structure and place its
324 * address in the local port pointer. Upon failure, local port
325 * pointer will be set to 0.
328 * a completion status. Must be 0 upon success; a negative errno
329 * (ex: -ENXIO) upon failure.
332 nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
333 struct nvme_fc_port_template *template,
335 struct nvme_fc_local_port **portptr)
337 struct nvme_fc_lport *newrec;
341 if (!template->localport_delete || !template->remoteport_delete ||
342 !template->ls_req || !template->fcp_io ||
343 !template->ls_abort || !template->fcp_abort ||
344 !template->max_hw_queues || !template->max_sgl_segments ||
345 !template->max_dif_sgl_segments || !template->dma_boundary) {
347 goto out_reghost_failed;
351 * look to see if there is already a localport that had been
352 * deregistered and in the process of waiting for all the
353 * references to fully be removed. If the references haven't
354 * expired, we can simply re-enable the localport. Remoteports
355 * and controller reconnections should resume naturally.
357 newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev);
359 /* found an lport, but something about its state is bad */
360 if (IS_ERR(newrec)) {
361 ret = PTR_ERR(newrec);
362 goto out_reghost_failed;
364 /* found existing lport, which was resumed */
366 *portptr = &newrec->localport;
370 /* nothing found - allocate a new localport struct */
372 newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
376 goto out_reghost_failed;
379 idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
385 if (!get_device(dev) && dev) {
390 INIT_LIST_HEAD(&newrec->port_list);
391 INIT_LIST_HEAD(&newrec->endp_list);
392 kref_init(&newrec->ref);
393 atomic_set(&newrec->act_rport_cnt, 0);
394 newrec->ops = template;
396 ida_init(&newrec->endp_cnt);
397 newrec->localport.private = &newrec[1];
398 newrec->localport.node_name = pinfo->node_name;
399 newrec->localport.port_name = pinfo->port_name;
400 newrec->localport.port_role = pinfo->port_role;
401 newrec->localport.port_id = pinfo->port_id;
402 newrec->localport.port_state = FC_OBJSTATE_ONLINE;
403 newrec->localport.port_num = idx;
405 spin_lock_irqsave(&nvme_fc_lock, flags);
406 list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
407 spin_unlock_irqrestore(&nvme_fc_lock, flags);
410 dma_set_seg_boundary(dev, template->dma_boundary);
412 *portptr = &newrec->localport;
416 ida_simple_remove(&nvme_fc_local_port_cnt, idx);
424 EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
427 * nvme_fc_unregister_localport - transport entry point called by an
428 * LLDD to deregister/remove a previously
429 * registered a NVME host FC port.
430 * @portptr: pointer to the (registered) local port that is to be deregistered.
433 * a completion status. Must be 0 upon success; a negative errno
434 * (ex: -ENXIO) upon failure.
437 nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
439 struct nvme_fc_lport *lport = localport_to_lport(portptr);
445 spin_lock_irqsave(&nvme_fc_lock, flags);
447 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
448 spin_unlock_irqrestore(&nvme_fc_lock, flags);
451 portptr->port_state = FC_OBJSTATE_DELETED;
453 spin_unlock_irqrestore(&nvme_fc_lock, flags);
455 if (atomic_read(&lport->act_rport_cnt) == 0)
456 lport->ops->localport_delete(&lport->localport);
458 nvme_fc_lport_put(lport);
462 EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
465 * TRADDR strings, per FC-NVME are fixed format:
466 * "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters
467 * udev event will only differ by prefix of what field is
469 * "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters
470 * 19 + 43 + null_fudge = 64 characters
472 #define FCNVME_TRADDR_LENGTH 64
475 nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport,
476 struct nvme_fc_rport *rport)
478 char hostaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_HOST_TRADDR=...*/
479 char tgtaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_TRADDR=...*/
480 char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL };
482 if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY))
485 snprintf(hostaddr, sizeof(hostaddr),
486 "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx",
487 lport->localport.node_name, lport->localport.port_name);
488 snprintf(tgtaddr, sizeof(tgtaddr),
489 "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx",
490 rport->remoteport.node_name, rport->remoteport.port_name);
491 kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp);
495 nvme_fc_free_rport(struct kref *ref)
497 struct nvme_fc_rport *rport =
498 container_of(ref, struct nvme_fc_rport, ref);
499 struct nvme_fc_lport *lport =
500 localport_to_lport(rport->remoteport.localport);
503 WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
504 WARN_ON(!list_empty(&rport->ctrl_list));
506 /* remove from lport list */
507 spin_lock_irqsave(&nvme_fc_lock, flags);
508 list_del(&rport->endp_list);
509 spin_unlock_irqrestore(&nvme_fc_lock, flags);
511 WARN_ON(!list_empty(&rport->disc_list));
512 ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
516 nvme_fc_lport_put(lport);
520 nvme_fc_rport_put(struct nvme_fc_rport *rport)
522 kref_put(&rport->ref, nvme_fc_free_rport);
526 nvme_fc_rport_get(struct nvme_fc_rport *rport)
528 return kref_get_unless_zero(&rport->ref);
532 nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
534 switch (ctrl->ctrl.state) {
536 case NVME_CTRL_CONNECTING:
538 * As all reconnects were suppressed, schedule a
541 dev_info(ctrl->ctrl.device,
542 "NVME-FC{%d}: connectivity re-established. "
543 "Attempting reconnect\n", ctrl->cnum);
545 queue_delayed_work(nvme_wq, &ctrl->connect_work, 0);
548 case NVME_CTRL_RESETTING:
550 * Controller is already in the process of terminating the
551 * association. No need to do anything further. The reconnect
552 * step will naturally occur after the reset completes.
557 /* no action to take - let it delete */
562 static struct nvme_fc_rport *
563 nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport,
564 struct nvme_fc_port_info *pinfo)
566 struct nvme_fc_rport *rport;
567 struct nvme_fc_ctrl *ctrl;
570 spin_lock_irqsave(&nvme_fc_lock, flags);
572 list_for_each_entry(rport, &lport->endp_list, endp_list) {
573 if (rport->remoteport.node_name != pinfo->node_name ||
574 rport->remoteport.port_name != pinfo->port_name)
577 if (!nvme_fc_rport_get(rport)) {
578 rport = ERR_PTR(-ENOLCK);
582 spin_unlock_irqrestore(&nvme_fc_lock, flags);
584 spin_lock_irqsave(&rport->lock, flags);
586 /* has it been unregistered */
587 if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) {
588 /* means lldd called us twice */
589 spin_unlock_irqrestore(&rport->lock, flags);
590 nvme_fc_rport_put(rport);
591 return ERR_PTR(-ESTALE);
594 rport->remoteport.port_role = pinfo->port_role;
595 rport->remoteport.port_id = pinfo->port_id;
596 rport->remoteport.port_state = FC_OBJSTATE_ONLINE;
597 rport->dev_loss_end = 0;
600 * kick off a reconnect attempt on all associations to the
601 * remote port. A successful reconnects will resume i/o.
603 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
604 nvme_fc_resume_controller(ctrl);
606 spin_unlock_irqrestore(&rport->lock, flags);
614 spin_unlock_irqrestore(&nvme_fc_lock, flags);
620 __nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport,
621 struct nvme_fc_port_info *pinfo)
623 if (pinfo->dev_loss_tmo)
624 rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo;
626 rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO;
630 * nvme_fc_register_remoteport - transport entry point called by an
631 * LLDD to register the existence of a NVME
632 * subsystem FC port on its fabric.
633 * @localport: pointer to the (registered) local port that the remote
634 * subsystem port is connected to.
635 * @pinfo: pointer to information about the port to be registered
636 * @portptr: pointer to a remote port pointer. Upon success, the routine
637 * will allocate a nvme_fc_remote_port structure and place its
638 * address in the remote port pointer. Upon failure, remote port
639 * pointer will be set to 0.
642 * a completion status. Must be 0 upon success; a negative errno
643 * (ex: -ENXIO) upon failure.
646 nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
647 struct nvme_fc_port_info *pinfo,
648 struct nvme_fc_remote_port **portptr)
650 struct nvme_fc_lport *lport = localport_to_lport(localport);
651 struct nvme_fc_rport *newrec;
655 if (!nvme_fc_lport_get(lport)) {
657 goto out_reghost_failed;
661 * look to see if there is already a remoteport that is waiting
662 * for a reconnect (within dev_loss_tmo) with the same WWN's.
663 * If so, transition to it and reconnect.
665 newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo);
667 /* found an rport, but something about its state is bad */
668 if (IS_ERR(newrec)) {
669 ret = PTR_ERR(newrec);
672 /* found existing rport, which was resumed */
674 nvme_fc_lport_put(lport);
675 __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
676 nvme_fc_signal_discovery_scan(lport, newrec);
677 *portptr = &newrec->remoteport;
681 /* nothing found - allocate a new remoteport struct */
683 newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
690 idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
693 goto out_kfree_rport;
696 INIT_LIST_HEAD(&newrec->endp_list);
697 INIT_LIST_HEAD(&newrec->ctrl_list);
698 INIT_LIST_HEAD(&newrec->ls_req_list);
699 INIT_LIST_HEAD(&newrec->disc_list);
700 kref_init(&newrec->ref);
701 atomic_set(&newrec->act_ctrl_cnt, 0);
702 spin_lock_init(&newrec->lock);
703 newrec->remoteport.localport = &lport->localport;
704 newrec->dev = lport->dev;
705 newrec->lport = lport;
706 newrec->remoteport.private = &newrec[1];
707 newrec->remoteport.port_role = pinfo->port_role;
708 newrec->remoteport.node_name = pinfo->node_name;
709 newrec->remoteport.port_name = pinfo->port_name;
710 newrec->remoteport.port_id = pinfo->port_id;
711 newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
712 newrec->remoteport.port_num = idx;
713 __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
715 spin_lock_irqsave(&nvme_fc_lock, flags);
716 list_add_tail(&newrec->endp_list, &lport->endp_list);
717 spin_unlock_irqrestore(&nvme_fc_lock, flags);
719 nvme_fc_signal_discovery_scan(lport, newrec);
721 *portptr = &newrec->remoteport;
727 nvme_fc_lport_put(lport);
732 EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
735 nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
737 struct nvmefc_ls_req_op *lsop;
741 spin_lock_irqsave(&rport->lock, flags);
743 list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) {
744 if (!(lsop->flags & FCOP_FLAGS_TERMIO)) {
745 lsop->flags |= FCOP_FLAGS_TERMIO;
746 spin_unlock_irqrestore(&rport->lock, flags);
747 rport->lport->ops->ls_abort(&rport->lport->localport,
753 spin_unlock_irqrestore(&rport->lock, flags);
759 nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
761 dev_info(ctrl->ctrl.device,
762 "NVME-FC{%d}: controller connectivity lost. Awaiting "
763 "Reconnect", ctrl->cnum);
765 switch (ctrl->ctrl.state) {
769 * Schedule a controller reset. The reset will terminate the
770 * association and schedule the reconnect timer. Reconnects
771 * will be attempted until either the ctlr_loss_tmo
772 * (max_retries * connect_delay) expires or the remoteport's
773 * dev_loss_tmo expires.
775 if (nvme_reset_ctrl(&ctrl->ctrl)) {
776 dev_warn(ctrl->ctrl.device,
777 "NVME-FC{%d}: Couldn't schedule reset.\n",
779 nvme_delete_ctrl(&ctrl->ctrl);
783 case NVME_CTRL_CONNECTING:
785 * The association has already been terminated and the
786 * controller is attempting reconnects. No need to do anything
787 * futher. Reconnects will be attempted until either the
788 * ctlr_loss_tmo (max_retries * connect_delay) expires or the
789 * remoteport's dev_loss_tmo expires.
793 case NVME_CTRL_RESETTING:
795 * Controller is already in the process of terminating the
796 * association. No need to do anything further. The reconnect
797 * step will kick in naturally after the association is
802 case NVME_CTRL_DELETING:
804 /* no action to take - let it delete */
810 * nvme_fc_unregister_remoteport - transport entry point called by an
811 * LLDD to deregister/remove a previously
812 * registered a NVME subsystem FC port.
813 * @portptr: pointer to the (registered) remote port that is to be
817 * a completion status. Must be 0 upon success; a negative errno
818 * (ex: -ENXIO) upon failure.
821 nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
823 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
824 struct nvme_fc_ctrl *ctrl;
830 spin_lock_irqsave(&rport->lock, flags);
832 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
833 spin_unlock_irqrestore(&rport->lock, flags);
836 portptr->port_state = FC_OBJSTATE_DELETED;
838 rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ);
840 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
841 /* if dev_loss_tmo==0, dev loss is immediate */
842 if (!portptr->dev_loss_tmo) {
843 dev_warn(ctrl->ctrl.device,
844 "NVME-FC{%d}: controller connectivity lost.\n",
846 nvme_delete_ctrl(&ctrl->ctrl);
848 nvme_fc_ctrl_connectivity_loss(ctrl);
851 spin_unlock_irqrestore(&rport->lock, flags);
853 nvme_fc_abort_lsops(rport);
855 if (atomic_read(&rport->act_ctrl_cnt) == 0)
856 rport->lport->ops->remoteport_delete(portptr);
859 * release the reference, which will allow, if all controllers
860 * go away, which should only occur after dev_loss_tmo occurs,
861 * for the rport to be torn down.
863 nvme_fc_rport_put(rport);
867 EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
870 * nvme_fc_rescan_remoteport - transport entry point called by an
871 * LLDD to request a nvme device rescan.
872 * @remoteport: pointer to the (registered) remote port that is to be
878 nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport)
880 struct nvme_fc_rport *rport = remoteport_to_rport(remoteport);
882 nvme_fc_signal_discovery_scan(rport->lport, rport);
884 EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport);
887 nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr,
890 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
893 spin_lock_irqsave(&rport->lock, flags);
895 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
896 spin_unlock_irqrestore(&rport->lock, flags);
900 /* a dev_loss_tmo of 0 (immediate) is allowed to be set */
901 rport->remoteport.dev_loss_tmo = dev_loss_tmo;
903 spin_unlock_irqrestore(&rport->lock, flags);
907 EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss);
910 /* *********************** FC-NVME DMA Handling **************************** */
913 * The fcloop device passes in a NULL device pointer. Real LLD's will
914 * pass in a valid device pointer. If NULL is passed to the dma mapping
915 * routines, depending on the platform, it may or may not succeed, and
919 * Wrapper all the dma routines and check the dev pointer.
921 * If simple mappings (return just a dma address, we'll noop them,
922 * returning a dma address of 0.
924 * On more complex mappings (dma_map_sg), a pseudo routine fills
925 * in the scatter list, setting all dma addresses to 0.
928 static inline dma_addr_t
929 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
930 enum dma_data_direction dir)
932 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
936 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
938 return dev ? dma_mapping_error(dev, dma_addr) : 0;
942 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
943 enum dma_data_direction dir)
946 dma_unmap_single(dev, addr, size, dir);
950 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
951 enum dma_data_direction dir)
954 dma_sync_single_for_cpu(dev, addr, size, dir);
958 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
959 enum dma_data_direction dir)
962 dma_sync_single_for_device(dev, addr, size, dir);
965 /* pseudo dma_map_sg call */
967 fc_map_sg(struct scatterlist *sg, int nents)
969 struct scatterlist *s;
972 WARN_ON(nents == 0 || sg[0].length == 0);
974 for_each_sg(sg, s, nents, i) {
976 #ifdef CONFIG_NEED_SG_DMA_LENGTH
977 s->dma_length = s->length;
984 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
985 enum dma_data_direction dir)
987 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
991 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
992 enum dma_data_direction dir)
995 dma_unmap_sg(dev, sg, nents, dir);
998 /* *********************** FC-NVME LS Handling **************************** */
1000 static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
1001 static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
1005 __nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
1007 struct nvme_fc_rport *rport = lsop->rport;
1008 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1009 unsigned long flags;
1011 spin_lock_irqsave(&rport->lock, flags);
1013 if (!lsop->req_queued) {
1014 spin_unlock_irqrestore(&rport->lock, flags);
1018 list_del(&lsop->lsreq_list);
1020 lsop->req_queued = false;
1022 spin_unlock_irqrestore(&rport->lock, flags);
1024 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1025 (lsreq->rqstlen + lsreq->rsplen),
1028 nvme_fc_rport_put(rport);
1032 __nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
1033 struct nvmefc_ls_req_op *lsop,
1034 void (*done)(struct nvmefc_ls_req *req, int status))
1036 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1037 unsigned long flags;
1040 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
1041 return -ECONNREFUSED;
1043 if (!nvme_fc_rport_get(rport))
1047 lsop->rport = rport;
1048 lsop->req_queued = false;
1049 INIT_LIST_HEAD(&lsop->lsreq_list);
1050 init_completion(&lsop->ls_done);
1052 lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr,
1053 lsreq->rqstlen + lsreq->rsplen,
1055 if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) {
1059 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
1061 spin_lock_irqsave(&rport->lock, flags);
1063 list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);
1065 lsop->req_queued = true;
1067 spin_unlock_irqrestore(&rport->lock, flags);
1069 ret = rport->lport->ops->ls_req(&rport->lport->localport,
1070 &rport->remoteport, lsreq);
1077 lsop->ls_error = ret;
1078 spin_lock_irqsave(&rport->lock, flags);
1079 lsop->req_queued = false;
1080 list_del(&lsop->lsreq_list);
1081 spin_unlock_irqrestore(&rport->lock, flags);
1082 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1083 (lsreq->rqstlen + lsreq->rsplen),
1086 nvme_fc_rport_put(rport);
1092 nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
1094 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1096 lsop->ls_error = status;
1097 complete(&lsop->ls_done);
1101 nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
1103 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1104 struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
1107 ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);
1111 * No timeout/not interruptible as we need the struct
1112 * to exist until the lldd calls us back. Thus mandate
1113 * wait until driver calls back. lldd responsible for
1114 * the timeout action
1116 wait_for_completion(&lsop->ls_done);
1118 __nvme_fc_finish_ls_req(lsop);
1120 ret = lsop->ls_error;
1126 /* ACC or RJT payload ? */
1127 if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
1134 nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
1135 struct nvmefc_ls_req_op *lsop,
1136 void (*done)(struct nvmefc_ls_req *req, int status))
1138 /* don't wait for completion */
1140 return __nvme_fc_send_ls_req(rport, lsop, done);
1143 /* Validation Error indexes into the string table below */
1147 VERR_LSDESC_RQST = 2,
1148 VERR_LSDESC_RQST_LEN = 3,
1150 VERR_ASSOC_ID_LEN = 5,
1152 VERR_CONN_ID_LEN = 7,
1154 VERR_CR_ASSOC_ACC_LEN = 9,
1156 VERR_CR_CONN_ACC_LEN = 11,
1158 VERR_DISCONN_ACC_LEN = 13,
1161 static char *validation_errors[] = {
1165 "Bad LSDESC_RQST Length",
1166 "Not Association ID",
1167 "Bad Association ID Length",
1168 "Not Connection ID",
1169 "Bad Connection ID Length",
1170 "Not CR_ASSOC Rqst",
1171 "Bad CR_ASSOC ACC Length",
1173 "Bad CR_CONN ACC Length",
1174 "Not Disconnect Rqst",
1175 "Bad Disconnect ACC Length",
1179 nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
1180 struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
1182 struct nvmefc_ls_req_op *lsop;
1183 struct nvmefc_ls_req *lsreq;
1184 struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
1185 struct fcnvme_ls_cr_assoc_acc *assoc_acc;
1188 lsop = kzalloc((sizeof(*lsop) +
1189 ctrl->lport->ops->lsrqst_priv_sz +
1190 sizeof(*assoc_rqst) + sizeof(*assoc_acc)), GFP_KERNEL);
1195 lsreq = &lsop->ls_req;
1197 lsreq->private = (void *)&lsop[1];
1198 assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)
1199 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1200 assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
1202 assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
1203 assoc_rqst->desc_list_len =
1204 cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1206 assoc_rqst->assoc_cmd.desc_tag =
1207 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
1208 assoc_rqst->assoc_cmd.desc_len =
1210 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1212 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1213 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1);
1214 /* Linux supports only Dynamic controllers */
1215 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
1216 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
1217 strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
1218 min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
1219 strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
1220 min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
1222 lsop->queue = queue;
1223 lsreq->rqstaddr = assoc_rqst;
1224 lsreq->rqstlen = sizeof(*assoc_rqst);
1225 lsreq->rspaddr = assoc_acc;
1226 lsreq->rsplen = sizeof(*assoc_acc);
1227 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1229 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1231 goto out_free_buffer;
1233 /* process connect LS completion */
1235 /* validate the ACC response */
1236 if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1238 else if (assoc_acc->hdr.desc_list_len !=
1240 sizeof(struct fcnvme_ls_cr_assoc_acc)))
1241 fcret = VERR_CR_ASSOC_ACC_LEN;
1242 else if (assoc_acc->hdr.rqst.desc_tag !=
1243 cpu_to_be32(FCNVME_LSDESC_RQST))
1244 fcret = VERR_LSDESC_RQST;
1245 else if (assoc_acc->hdr.rqst.desc_len !=
1246 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1247 fcret = VERR_LSDESC_RQST_LEN;
1248 else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
1249 fcret = VERR_CR_ASSOC;
1250 else if (assoc_acc->associd.desc_tag !=
1251 cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1252 fcret = VERR_ASSOC_ID;
1253 else if (assoc_acc->associd.desc_len !=
1255 sizeof(struct fcnvme_lsdesc_assoc_id)))
1256 fcret = VERR_ASSOC_ID_LEN;
1257 else if (assoc_acc->connectid.desc_tag !=
1258 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1259 fcret = VERR_CONN_ID;
1260 else if (assoc_acc->connectid.desc_len !=
1261 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1262 fcret = VERR_CONN_ID_LEN;
1267 "q %d connect failed: %s\n",
1268 queue->qnum, validation_errors[fcret]);
1270 ctrl->association_id =
1271 be64_to_cpu(assoc_acc->associd.association_id);
1272 queue->connection_id =
1273 be64_to_cpu(assoc_acc->connectid.connection_id);
1274 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1282 "queue %d connect admin queue failed (%d).\n",
1288 nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1289 u16 qsize, u16 ersp_ratio)
1291 struct nvmefc_ls_req_op *lsop;
1292 struct nvmefc_ls_req *lsreq;
1293 struct fcnvme_ls_cr_conn_rqst *conn_rqst;
1294 struct fcnvme_ls_cr_conn_acc *conn_acc;
1297 lsop = kzalloc((sizeof(*lsop) +
1298 ctrl->lport->ops->lsrqst_priv_sz +
1299 sizeof(*conn_rqst) + sizeof(*conn_acc)), GFP_KERNEL);
1304 lsreq = &lsop->ls_req;
1306 lsreq->private = (void *)&lsop[1];
1307 conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)
1308 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1309 conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
1311 conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
1312 conn_rqst->desc_list_len = cpu_to_be32(
1313 sizeof(struct fcnvme_lsdesc_assoc_id) +
1314 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1316 conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1317 conn_rqst->associd.desc_len =
1319 sizeof(struct fcnvme_lsdesc_assoc_id));
1320 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1321 conn_rqst->connect_cmd.desc_tag =
1322 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
1323 conn_rqst->connect_cmd.desc_len =
1325 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1326 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1327 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
1328 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1);
1330 lsop->queue = queue;
1331 lsreq->rqstaddr = conn_rqst;
1332 lsreq->rqstlen = sizeof(*conn_rqst);
1333 lsreq->rspaddr = conn_acc;
1334 lsreq->rsplen = sizeof(*conn_acc);
1335 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1337 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1339 goto out_free_buffer;
1341 /* process connect LS completion */
1343 /* validate the ACC response */
1344 if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1346 else if (conn_acc->hdr.desc_list_len !=
1347 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
1348 fcret = VERR_CR_CONN_ACC_LEN;
1349 else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
1350 fcret = VERR_LSDESC_RQST;
1351 else if (conn_acc->hdr.rqst.desc_len !=
1352 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1353 fcret = VERR_LSDESC_RQST_LEN;
1354 else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
1355 fcret = VERR_CR_CONN;
1356 else if (conn_acc->connectid.desc_tag !=
1357 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1358 fcret = VERR_CONN_ID;
1359 else if (conn_acc->connectid.desc_len !=
1360 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1361 fcret = VERR_CONN_ID_LEN;
1366 "q %d connect failed: %s\n",
1367 queue->qnum, validation_errors[fcret]);
1369 queue->connection_id =
1370 be64_to_cpu(conn_acc->connectid.connection_id);
1371 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1379 "queue %d connect command failed (%d).\n",
1385 nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
1387 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1389 __nvme_fc_finish_ls_req(lsop);
1391 /* fc-nvme initiator doesn't care about success or failure of cmd */
1397 * This routine sends a FC-NVME LS to disconnect (aka terminate)
1398 * the FC-NVME Association. Terminating the association also
1399 * terminates the FC-NVME connections (per queue, both admin and io
1400 * queues) that are part of the association. E.g. things are torn
1401 * down, and the related FC-NVME Association ID and Connection IDs
1404 * The behavior of the fc-nvme initiator is such that it's
1405 * understanding of the association and connections will implicitly
1406 * be torn down. The action is implicit as it may be due to a loss of
1407 * connectivity with the fc-nvme target, so you may never get a
1408 * response even if you tried. As such, the action of this routine
1409 * is to asynchronously send the LS, ignore any results of the LS, and
1410 * continue on with terminating the association. If the fc-nvme target
1411 * is present and receives the LS, it too can tear down.
1414 nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1416 struct fcnvme_ls_disconnect_rqst *discon_rqst;
1417 struct fcnvme_ls_disconnect_acc *discon_acc;
1418 struct nvmefc_ls_req_op *lsop;
1419 struct nvmefc_ls_req *lsreq;
1422 lsop = kzalloc((sizeof(*lsop) +
1423 ctrl->lport->ops->lsrqst_priv_sz +
1424 sizeof(*discon_rqst) + sizeof(*discon_acc)),
1427 /* couldn't sent it... too bad */
1430 lsreq = &lsop->ls_req;
1432 lsreq->private = (void *)&lsop[1];
1433 discon_rqst = (struct fcnvme_ls_disconnect_rqst *)
1434 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1435 discon_acc = (struct fcnvme_ls_disconnect_acc *)&discon_rqst[1];
1437 discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT;
1438 discon_rqst->desc_list_len = cpu_to_be32(
1439 sizeof(struct fcnvme_lsdesc_assoc_id) +
1440 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1442 discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1443 discon_rqst->associd.desc_len =
1445 sizeof(struct fcnvme_lsdesc_assoc_id));
1447 discon_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1449 discon_rqst->discon_cmd.desc_tag = cpu_to_be32(
1450 FCNVME_LSDESC_DISCONN_CMD);
1451 discon_rqst->discon_cmd.desc_len =
1453 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1454 discon_rqst->discon_cmd.scope = FCNVME_DISCONN_ASSOCIATION;
1455 discon_rqst->discon_cmd.id = cpu_to_be64(ctrl->association_id);
1457 lsreq->rqstaddr = discon_rqst;
1458 lsreq->rqstlen = sizeof(*discon_rqst);
1459 lsreq->rspaddr = discon_acc;
1460 lsreq->rsplen = sizeof(*discon_acc);
1461 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1463 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
1464 nvme_fc_disconnect_assoc_done);
1468 /* only meaningful part to terminating the association */
1469 ctrl->association_id = 0;
1473 /* *********************** NVME Ctrl Routines **************************** */
1475 static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
1478 __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
1479 struct nvme_fc_fcp_op *op)
1481 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
1482 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1483 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
1484 sizeof(op->cmd_iu), DMA_TO_DEVICE);
1486 atomic_set(&op->state, FCPOP_STATE_UNINIT);
1490 nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
1491 unsigned int hctx_idx)
1493 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1495 return __nvme_fc_exit_request(set->driver_data, op);
1499 __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
1501 unsigned long flags;
1504 spin_lock_irqsave(&ctrl->lock, flags);
1505 opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
1506 if (opstate != FCPOP_STATE_ACTIVE)
1507 atomic_set(&op->state, opstate);
1508 else if (ctrl->flags & FCCTRL_TERMIO)
1510 spin_unlock_irqrestore(&ctrl->lock, flags);
1512 if (opstate != FCPOP_STATE_ACTIVE)
1515 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1516 &ctrl->rport->remoteport,
1517 op->queue->lldd_handle,
1524 nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
1526 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
1529 /* ensure we've initialized the ops once */
1530 if (!(aen_op->flags & FCOP_FLAGS_AEN))
1533 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
1534 __nvme_fc_abort_op(ctrl, aen_op);
1538 __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
1539 struct nvme_fc_fcp_op *op, int opstate)
1541 unsigned long flags;
1543 if (opstate == FCPOP_STATE_ABORTED) {
1544 spin_lock_irqsave(&ctrl->lock, flags);
1545 if (ctrl->flags & FCCTRL_TERMIO) {
1547 wake_up(&ctrl->ioabort_wait);
1549 spin_unlock_irqrestore(&ctrl->lock, flags);
1554 nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1556 struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
1557 struct request *rq = op->rq;
1558 struct nvmefc_fcp_req *freq = &op->fcp_req;
1559 struct nvme_fc_ctrl *ctrl = op->ctrl;
1560 struct nvme_fc_queue *queue = op->queue;
1561 struct nvme_completion *cqe = &op->rsp_iu.cqe;
1562 struct nvme_command *sqe = &op->cmd_iu.sqe;
1563 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
1564 union nvme_result result;
1565 bool terminate_assoc = true;
1570 * The current linux implementation of a nvme controller
1571 * allocates a single tag set for all io queues and sizes
1572 * the io queues to fully hold all possible tags. Thus, the
1573 * implementation does not reference or care about the sqhd
1574 * value as it never needs to use the sqhd/sqtail pointers
1575 * for submission pacing.
1577 * This affects the FC-NVME implementation in two ways:
1578 * 1) As the value doesn't matter, we don't need to waste
1579 * cycles extracting it from ERSPs and stamping it in the
1580 * cases where the transport fabricates CQEs on successful
1582 * 2) The FC-NVME implementation requires that delivery of
1583 * ERSP completions are to go back to the nvme layer in order
1584 * relative to the rsn, such that the sqhd value will always
1585 * be "in order" for the nvme layer. As the nvme layer in
1586 * linux doesn't care about sqhd, there's no need to return
1590 * As the core nvme layer in linux currently does not look at
1591 * every field in the cqe - in cases where the FC transport must
1592 * fabricate a CQE, the following fields will not be set as they
1593 * are not referenced:
1594 * cqe.sqid, cqe.sqhd, cqe.command_id
1596 * Failure or error of an individual i/o, in a transport
1597 * detected fashion unrelated to the nvme completion status,
1598 * potentially cause the initiator and target sides to get out
1599 * of sync on SQ head/tail (aka outstanding io count allowed).
1600 * Per FC-NVME spec, failure of an individual command requires
1601 * the connection to be terminated, which in turn requires the
1602 * association to be terminated.
1605 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
1607 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
1608 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1610 if (opstate == FCPOP_STATE_ABORTED)
1611 status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
1612 else if (freq->status)
1613 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1616 * For the linux implementation, if we have an unsuccesful
1617 * status, they blk-mq layer can typically be called with the
1618 * non-zero status and the content of the cqe isn't important.
1624 * command completed successfully relative to the wire
1625 * protocol. However, validate anything received and
1626 * extract the status and result from the cqe (create it
1630 switch (freq->rcv_rsplen) {
1633 case NVME_FC_SIZEOF_ZEROS_RSP:
1635 * No response payload or 12 bytes of payload (which
1636 * should all be zeros) are considered successful and
1637 * no payload in the CQE by the transport.
1639 if (freq->transferred_length !=
1640 be32_to_cpu(op->cmd_iu.data_len)) {
1641 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1647 case sizeof(struct nvme_fc_ersp_iu):
1649 * The ERSP IU contains a full completion with CQE.
1650 * Validate ERSP IU and look at cqe.
1652 if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
1653 (freq->rcv_rsplen / 4) ||
1654 be32_to_cpu(op->rsp_iu.xfrd_len) !=
1655 freq->transferred_length ||
1656 op->rsp_iu.status_code ||
1657 sqe->common.command_id != cqe->command_id)) {
1658 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1661 result = cqe->result;
1662 status = cqe->status;
1666 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1670 terminate_assoc = false;
1673 if (op->flags & FCOP_FLAGS_AEN) {
1674 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
1675 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
1676 atomic_set(&op->state, FCPOP_STATE_IDLE);
1677 op->flags = FCOP_FLAGS_AEN; /* clear other flags */
1678 nvme_fc_ctrl_put(ctrl);
1682 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
1683 nvme_end_request(rq, status, result);
1686 if (terminate_assoc)
1687 nvme_fc_error_recovery(ctrl, "transport detected io error");
1691 __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
1692 struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
1693 struct request *rq, u32 rqno)
1695 struct nvme_fcp_op_w_sgl *op_w_sgl =
1696 container_of(op, typeof(*op_w_sgl), op);
1697 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1700 memset(op, 0, sizeof(*op));
1701 op->fcp_req.cmdaddr = &op->cmd_iu;
1702 op->fcp_req.cmdlen = sizeof(op->cmd_iu);
1703 op->fcp_req.rspaddr = &op->rsp_iu;
1704 op->fcp_req.rsplen = sizeof(op->rsp_iu);
1705 op->fcp_req.done = nvme_fc_fcpio_done;
1711 cmdiu->scsi_id = NVME_CMD_SCSI_ID;
1712 cmdiu->fc_id = NVME_CMD_FC_ID;
1713 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
1715 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
1716 &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
1717 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
1719 "FCP Op failed - cmdiu dma mapping failed.\n");
1724 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
1725 &op->rsp_iu, sizeof(op->rsp_iu),
1727 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
1729 "FCP Op failed - rspiu dma mapping failed.\n");
1733 atomic_set(&op->state, FCPOP_STATE_IDLE);
1739 nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
1740 unsigned int hctx_idx, unsigned int numa_node)
1742 struct nvme_fc_ctrl *ctrl = set->driver_data;
1743 struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq);
1744 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
1745 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
1748 res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
1751 op->op.fcp_req.first_sgl = &op->sgl[0];
1752 op->op.fcp_req.private = &op->priv[0];
1753 nvme_req(rq)->ctrl = &ctrl->ctrl;
1758 nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
1760 struct nvme_fc_fcp_op *aen_op;
1761 struct nvme_fc_cmd_iu *cmdiu;
1762 struct nvme_command *sqe;
1766 aen_op = ctrl->aen_ops;
1767 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
1768 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
1773 cmdiu = &aen_op->cmd_iu;
1775 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
1776 aen_op, (struct request *)NULL,
1777 (NVME_AQ_BLK_MQ_DEPTH + i));
1783 aen_op->flags = FCOP_FLAGS_AEN;
1784 aen_op->fcp_req.private = private;
1786 memset(sqe, 0, sizeof(*sqe));
1787 sqe->common.opcode = nvme_admin_async_event;
1788 /* Note: core layer may overwrite the sqe.command_id value */
1789 sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i;
1795 nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
1797 struct nvme_fc_fcp_op *aen_op;
1800 aen_op = ctrl->aen_ops;
1801 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
1802 if (!aen_op->fcp_req.private)
1805 __nvme_fc_exit_request(ctrl, aen_op);
1807 kfree(aen_op->fcp_req.private);
1808 aen_op->fcp_req.private = NULL;
1813 __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
1816 struct nvme_fc_queue *queue = &ctrl->queues[qidx];
1818 hctx->driver_data = queue;
1823 nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1824 unsigned int hctx_idx)
1826 struct nvme_fc_ctrl *ctrl = data;
1828 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
1834 nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1835 unsigned int hctx_idx)
1837 struct nvme_fc_ctrl *ctrl = data;
1839 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
1845 nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
1847 struct nvme_fc_queue *queue;
1849 queue = &ctrl->queues[idx];
1850 memset(queue, 0, sizeof(*queue));
1853 atomic_set(&queue->csn, 0);
1854 queue->dev = ctrl->dev;
1857 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
1859 queue->cmnd_capsule_len = sizeof(struct nvme_command);
1862 * Considered whether we should allocate buffers for all SQEs
1863 * and CQEs and dma map them - mapping their respective entries
1864 * into the request structures (kernel vm addr and dma address)
1865 * thus the driver could use the buffers/mappings directly.
1866 * It only makes sense if the LLDD would use them for its
1867 * messaging api. It's very unlikely most adapter api's would use
1868 * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload
1869 * structures were used instead.
1874 * This routine terminates a queue at the transport level.
1875 * The transport has already ensured that all outstanding ios on
1876 * the queue have been terminated.
1877 * The transport will send a Disconnect LS request to terminate
1878 * the queue's connection. Termination of the admin queue will also
1879 * terminate the association at the target.
1882 nvme_fc_free_queue(struct nvme_fc_queue *queue)
1884 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
1887 clear_bit(NVME_FC_Q_LIVE, &queue->flags);
1889 * Current implementation never disconnects a single queue.
1890 * It always terminates a whole association. So there is never
1891 * a disconnect(queue) LS sent to the target.
1894 queue->connection_id = 0;
1895 atomic_set(&queue->csn, 0);
1899 __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
1900 struct nvme_fc_queue *queue, unsigned int qidx)
1902 if (ctrl->lport->ops->delete_queue)
1903 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
1904 queue->lldd_handle);
1905 queue->lldd_handle = NULL;
1909 nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
1913 for (i = 1; i < ctrl->ctrl.queue_count; i++)
1914 nvme_fc_free_queue(&ctrl->queues[i]);
1918 __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
1919 struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
1923 queue->lldd_handle = NULL;
1924 if (ctrl->lport->ops->create_queue)
1925 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
1926 qidx, qsize, &queue->lldd_handle);
1932 nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
1934 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1];
1937 for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--)
1938 __nvme_fc_delete_hw_queue(ctrl, queue, i);
1942 nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1944 struct nvme_fc_queue *queue = &ctrl->queues[1];
1947 for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) {
1948 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
1957 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
1962 nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1966 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
1967 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
1971 ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
1975 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
1982 nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
1986 for (i = 1; i < ctrl->ctrl.queue_count; i++)
1987 nvme_fc_init_queue(ctrl, i);
1991 nvme_fc_ctrl_free(struct kref *ref)
1993 struct nvme_fc_ctrl *ctrl =
1994 container_of(ref, struct nvme_fc_ctrl, ref);
1995 unsigned long flags;
1997 if (ctrl->ctrl.tagset) {
1998 blk_cleanup_queue(ctrl->ctrl.connect_q);
1999 blk_mq_free_tag_set(&ctrl->tag_set);
2002 /* remove from rport list */
2003 spin_lock_irqsave(&ctrl->rport->lock, flags);
2004 list_del(&ctrl->ctrl_list);
2005 spin_unlock_irqrestore(&ctrl->rport->lock, flags);
2007 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2008 blk_cleanup_queue(ctrl->ctrl.admin_q);
2009 blk_mq_free_tag_set(&ctrl->admin_tag_set);
2011 kfree(ctrl->queues);
2013 put_device(ctrl->dev);
2014 nvme_fc_rport_put(ctrl->rport);
2016 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
2017 if (ctrl->ctrl.opts)
2018 nvmf_free_options(ctrl->ctrl.opts);
2023 nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
2025 kref_put(&ctrl->ref, nvme_fc_ctrl_free);
2029 nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
2031 return kref_get_unless_zero(&ctrl->ref);
2035 * All accesses from nvme core layer done - can now free the
2036 * controller. Called after last nvme_put_ctrl() call
2039 nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
2041 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2043 WARN_ON(nctrl != &ctrl->ctrl);
2045 nvme_fc_ctrl_put(ctrl);
2049 nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
2054 * if an error (io timeout, etc) while (re)connecting,
2055 * it's an error on creating the new association.
2056 * Start the error recovery thread if it hasn't already
2057 * been started. It is expected there could be multiple
2058 * ios hitting this path before things are cleaned up.
2060 if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
2061 active = atomic_xchg(&ctrl->err_work_active, 1);
2062 if (!active && !queue_work(nvme_fc_wq, &ctrl->err_work)) {
2063 atomic_set(&ctrl->err_work_active, 0);
2069 /* Otherwise, only proceed if in LIVE state - e.g. on first error */
2070 if (ctrl->ctrl.state != NVME_CTRL_LIVE)
2073 dev_warn(ctrl->ctrl.device,
2074 "NVME-FC{%d}: transport association error detected: %s\n",
2075 ctrl->cnum, errmsg);
2076 dev_warn(ctrl->ctrl.device,
2077 "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
2079 nvme_reset_ctrl(&ctrl->ctrl);
2082 static enum blk_eh_timer_return
2083 nvme_fc_timeout(struct request *rq, bool reserved)
2085 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2086 struct nvme_fc_ctrl *ctrl = op->ctrl;
2089 * we can't individually ABTS an io without affecting the queue,
2090 * thus killing the queue, and thus the association.
2091 * So resolve by performing a controller reset, which will stop
2092 * the host/io stack, terminate the association on the link,
2093 * and recreate an association on the link.
2095 nvme_fc_error_recovery(ctrl, "io timeout error");
2098 * the io abort has been initiated. Have the reset timer
2099 * restarted and the abort completion will complete the io
2100 * shortly. Avoids a synchronous wait while the abort finishes.
2102 return BLK_EH_RESET_TIMER;
2106 nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2107 struct nvme_fc_fcp_op *op)
2109 struct nvmefc_fcp_req *freq = &op->fcp_req;
2110 enum dma_data_direction dir;
2115 if (!blk_rq_nr_phys_segments(rq))
2118 freq->sg_table.sgl = freq->first_sgl;
2119 ret = sg_alloc_table_chained(&freq->sg_table,
2120 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl,
2125 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
2126 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
2127 dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
2128 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
2130 if (unlikely(freq->sg_cnt <= 0)) {
2131 sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE);
2137 * TODO: blk_integrity_rq(rq) for DIF
2143 nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2144 struct nvme_fc_fcp_op *op)
2146 struct nvmefc_fcp_req *freq = &op->fcp_req;
2151 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
2152 ((rq_data_dir(rq) == WRITE) ?
2153 DMA_TO_DEVICE : DMA_FROM_DEVICE));
2155 nvme_cleanup_cmd(rq);
2157 sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE);
2163 * In FC, the queue is a logical thing. At transport connect, the target
2164 * creates its "queue" and returns a handle that is to be given to the
2165 * target whenever it posts something to the corresponding SQ. When an
2166 * SQE is sent on a SQ, FC effectively considers the SQE, or rather the
2167 * command contained within the SQE, an io, and assigns a FC exchange
2168 * to it. The SQE and the associated SQ handle are sent in the initial
2169 * CMD IU sents on the exchange. All transfers relative to the io occur
2170 * as part of the exchange. The CQE is the last thing for the io,
2171 * which is transferred (explicitly or implicitly) with the RSP IU
2172 * sent on the exchange. After the CQE is received, the FC exchange is
2173 * terminaed and the Exchange may be used on a different io.
2175 * The transport to LLDD api has the transport making a request for a
2176 * new fcp io request to the LLDD. The LLDD then allocates a FC exchange
2177 * resource and transfers the command. The LLDD will then process all
2178 * steps to complete the io. Upon completion, the transport done routine
2181 * So - while the operation is outstanding to the LLDD, there is a link
2182 * level FC exchange resource that is also outstanding. This must be
2183 * considered in all cleanup operations.
2186 nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
2187 struct nvme_fc_fcp_op *op, u32 data_len,
2188 enum nvmefc_fcp_datadir io_dir)
2190 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2191 struct nvme_command *sqe = &cmdiu->sqe;
2195 * before attempting to send the io, check to see if we believe
2196 * the target device is present
2198 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
2199 return BLK_STS_RESOURCE;
2201 if (!nvme_fc_ctrl_get(ctrl))
2202 return BLK_STS_IOERR;
2204 /* format the FC-NVME CMD IU and fcp_req */
2205 cmdiu->connection_id = cpu_to_be64(queue->connection_id);
2206 cmdiu->data_len = cpu_to_be32(data_len);
2208 case NVMEFC_FCP_WRITE:
2209 cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
2211 case NVMEFC_FCP_READ:
2212 cmdiu->flags = FCNVME_CMD_FLAGS_READ;
2214 case NVMEFC_FCP_NODATA:
2218 op->fcp_req.payload_length = data_len;
2219 op->fcp_req.io_dir = io_dir;
2220 op->fcp_req.transferred_length = 0;
2221 op->fcp_req.rcv_rsplen = 0;
2222 op->fcp_req.status = NVME_SC_SUCCESS;
2223 op->fcp_req.sqid = cpu_to_le16(queue->qnum);
2226 * validate per fabric rules, set fields mandated by fabric spec
2227 * as well as those by FC-NVME spec.
2229 WARN_ON_ONCE(sqe->common.metadata);
2230 sqe->common.flags |= NVME_CMD_SGL_METABUF;
2233 * format SQE DPTR field per FC-NVME rules:
2234 * type=0x5 Transport SGL Data Block Descriptor
2235 * subtype=0xA Transport-specific value
2237 * length=length of the data series
2239 sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2240 NVME_SGL_FMT_TRANSPORT_A;
2241 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
2242 sqe->rw.dptr.sgl.addr = 0;
2244 if (!(op->flags & FCOP_FLAGS_AEN)) {
2245 ret = nvme_fc_map_data(ctrl, op->rq, op);
2247 nvme_cleanup_cmd(op->rq);
2248 nvme_fc_ctrl_put(ctrl);
2249 if (ret == -ENOMEM || ret == -EAGAIN)
2250 return BLK_STS_RESOURCE;
2251 return BLK_STS_IOERR;
2255 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
2256 sizeof(op->cmd_iu), DMA_TO_DEVICE);
2258 atomic_set(&op->state, FCPOP_STATE_ACTIVE);
2260 if (!(op->flags & FCOP_FLAGS_AEN))
2261 blk_mq_start_request(op->rq);
2263 cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn));
2264 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
2265 &ctrl->rport->remoteport,
2266 queue->lldd_handle, &op->fcp_req);
2270 * If the lld fails to send the command is there an issue with
2271 * the csn value? If the command that fails is the Connect,
2272 * no - as the connection won't be live. If it is a command
2273 * post-connect, it's possible a gap in csn may be created.
2274 * Does this matter? As Linux initiators don't send fused
2275 * commands, no. The gap would exist, but as there's nothing
2276 * that depends on csn order to be delivered on the target
2277 * side, it shouldn't hurt. It would be difficult for a
2278 * target to even detect the csn gap as it has no idea when the
2279 * cmd with the csn was supposed to arrive.
2281 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
2282 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2284 if (!(op->flags & FCOP_FLAGS_AEN))
2285 nvme_fc_unmap_data(ctrl, op->rq, op);
2287 nvme_fc_ctrl_put(ctrl);
2289 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
2291 return BLK_STS_IOERR;
2293 return BLK_STS_RESOURCE;
2300 nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
2301 const struct blk_mq_queue_data *bd)
2303 struct nvme_ns *ns = hctx->queue->queuedata;
2304 struct nvme_fc_queue *queue = hctx->driver_data;
2305 struct nvme_fc_ctrl *ctrl = queue->ctrl;
2306 struct request *rq = bd->rq;
2307 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2308 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2309 struct nvme_command *sqe = &cmdiu->sqe;
2310 enum nvmefc_fcp_datadir io_dir;
2311 bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags);
2315 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
2316 !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2317 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2319 ret = nvme_setup_cmd(ns, rq, sqe);
2324 * nvme core doesn't quite treat the rq opaquely. Commands such
2325 * as WRITE ZEROES will return a non-zero rq payload_bytes yet
2326 * there is no actual payload to be transferred.
2327 * To get it right, key data transmission on there being 1 or
2328 * more physical segments in the sg list. If there is no
2329 * physical segments, there is no payload.
2331 if (blk_rq_nr_phys_segments(rq)) {
2332 data_len = blk_rq_payload_bytes(rq);
2333 io_dir = ((rq_data_dir(rq) == WRITE) ?
2334 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
2337 io_dir = NVMEFC_FCP_NODATA;
2341 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
2345 nvme_fc_submit_async_event(struct nvme_ctrl *arg)
2347 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
2348 struct nvme_fc_fcp_op *aen_op;
2349 unsigned long flags;
2350 bool terminating = false;
2353 spin_lock_irqsave(&ctrl->lock, flags);
2354 if (ctrl->flags & FCCTRL_TERMIO)
2356 spin_unlock_irqrestore(&ctrl->lock, flags);
2361 aen_op = &ctrl->aen_ops[0];
2363 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
2366 dev_err(ctrl->ctrl.device,
2367 "failed async event work\n");
2371 nvme_fc_complete_rq(struct request *rq)
2373 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2374 struct nvme_fc_ctrl *ctrl = op->ctrl;
2376 atomic_set(&op->state, FCPOP_STATE_IDLE);
2378 nvme_fc_unmap_data(ctrl, rq, op);
2379 nvme_complete_rq(rq);
2380 nvme_fc_ctrl_put(ctrl);
2384 * This routine is used by the transport when it needs to find active
2385 * io on a queue that is to be terminated. The transport uses
2386 * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
2387 * this routine to kill them on a 1 by 1 basis.
2389 * As FC allocates FC exchange for each io, the transport must contact
2390 * the LLDD to terminate the exchange, thus releasing the FC exchange.
2391 * After terminating the exchange the LLDD will call the transport's
2392 * normal io done path for the request, but it will have an aborted
2393 * status. The done path will return the io request back to the block
2394 * layer with an error status.
2397 nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
2399 struct nvme_ctrl *nctrl = data;
2400 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2401 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
2403 __nvme_fc_abort_op(ctrl, op);
2408 static const struct blk_mq_ops nvme_fc_mq_ops = {
2409 .queue_rq = nvme_fc_queue_rq,
2410 .complete = nvme_fc_complete_rq,
2411 .init_request = nvme_fc_init_request,
2412 .exit_request = nvme_fc_exit_request,
2413 .init_hctx = nvme_fc_init_hctx,
2414 .timeout = nvme_fc_timeout,
2418 nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2420 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2421 unsigned int nr_io_queues;
2424 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2425 ctrl->lport->ops->max_hw_queues);
2426 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2428 dev_info(ctrl->ctrl.device,
2429 "set_queue_count failed: %d\n", ret);
2433 ctrl->ctrl.queue_count = nr_io_queues + 1;
2437 nvme_fc_init_io_queues(ctrl);
2439 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
2440 ctrl->tag_set.ops = &nvme_fc_mq_ops;
2441 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
2442 ctrl->tag_set.reserved_tags = 1; /* fabric connect */
2443 ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
2444 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
2445 ctrl->tag_set.cmd_size =
2446 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
2447 ctrl->lport->ops->fcprqst_priv_sz);
2448 ctrl->tag_set.driver_data = ctrl;
2449 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
2450 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
2452 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
2456 ctrl->ctrl.tagset = &ctrl->tag_set;
2458 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
2459 if (IS_ERR(ctrl->ctrl.connect_q)) {
2460 ret = PTR_ERR(ctrl->ctrl.connect_q);
2461 goto out_free_tag_set;
2464 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2466 goto out_cleanup_blk_queue;
2468 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2470 goto out_delete_hw_queues;
2472 ctrl->ioq_live = true;
2476 out_delete_hw_queues:
2477 nvme_fc_delete_hw_io_queues(ctrl);
2478 out_cleanup_blk_queue:
2479 blk_cleanup_queue(ctrl->ctrl.connect_q);
2481 blk_mq_free_tag_set(&ctrl->tag_set);
2482 nvme_fc_free_io_queues(ctrl);
2484 /* force put free routine to ignore io queues */
2485 ctrl->ctrl.tagset = NULL;
2491 nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
2493 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2494 u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1;
2495 unsigned int nr_io_queues;
2498 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2499 ctrl->lport->ops->max_hw_queues);
2500 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2502 dev_info(ctrl->ctrl.device,
2503 "set_queue_count failed: %d\n", ret);
2507 if (!nr_io_queues && prior_ioq_cnt) {
2508 dev_info(ctrl->ctrl.device,
2509 "Fail Reconnect: At least 1 io queue "
2510 "required (was %d)\n", prior_ioq_cnt);
2514 ctrl->ctrl.queue_count = nr_io_queues + 1;
2515 /* check for io queues existing */
2516 if (ctrl->ctrl.queue_count == 1)
2519 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2521 goto out_free_io_queues;
2523 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2525 goto out_delete_hw_queues;
2527 if (prior_ioq_cnt != nr_io_queues)
2528 dev_info(ctrl->ctrl.device,
2529 "reconnect: revising io queue count from %d to %d\n",
2530 prior_ioq_cnt, nr_io_queues);
2531 blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
2535 out_delete_hw_queues:
2536 nvme_fc_delete_hw_io_queues(ctrl);
2538 nvme_fc_free_io_queues(ctrl);
2543 nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport)
2545 struct nvme_fc_lport *lport = rport->lport;
2547 atomic_inc(&lport->act_rport_cnt);
2551 nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport)
2553 struct nvme_fc_lport *lport = rport->lport;
2556 cnt = atomic_dec_return(&lport->act_rport_cnt);
2557 if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED)
2558 lport->ops->localport_delete(&lport->localport);
2562 nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl)
2564 struct nvme_fc_rport *rport = ctrl->rport;
2567 if (ctrl->assoc_active)
2570 ctrl->assoc_active = true;
2571 cnt = atomic_inc_return(&rport->act_ctrl_cnt);
2573 nvme_fc_rport_active_on_lport(rport);
2579 nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl)
2581 struct nvme_fc_rport *rport = ctrl->rport;
2582 struct nvme_fc_lport *lport = rport->lport;
2585 /* ctrl->assoc_active=false will be set independently */
2587 cnt = atomic_dec_return(&rport->act_ctrl_cnt);
2589 if (rport->remoteport.port_state == FC_OBJSTATE_DELETED)
2590 lport->ops->remoteport_delete(&rport->remoteport);
2591 nvme_fc_rport_inactive_on_lport(rport);
2598 * This routine restarts the controller on the host side, and
2599 * on the link side, recreates the controller association.
2602 nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
2604 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2608 ++ctrl->ctrl.nr_reconnects;
2610 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
2613 if (nvme_fc_ctlr_active_on_rport(ctrl))
2616 dev_info(ctrl->ctrl.device,
2617 "NVME-FC{%d}: create association : host wwpn 0x%016llx "
2618 " rport wwpn 0x%016llx: NQN \"%s\"\n",
2619 ctrl->cnum, ctrl->lport->localport.port_name,
2620 ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn);
2623 * Create the admin queue
2626 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
2629 goto out_free_queue;
2631 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
2632 NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4));
2634 goto out_delete_hw_queue;
2636 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2638 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
2640 goto out_disconnect_admin_queue;
2642 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
2645 * Check controller capabilities
2647 * todo:- add code to check if ctrl attributes changed from
2648 * prior connection values
2651 ret = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
2653 dev_err(ctrl->ctrl.device,
2654 "prop_get NVME_REG_CAP failed\n");
2655 goto out_disconnect_admin_queue;
2659 min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
2661 ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
2663 goto out_disconnect_admin_queue;
2665 ctrl->ctrl.max_hw_sectors =
2666 (ctrl->lport->ops->max_sgl_segments - 1) << (PAGE_SHIFT - 9);
2668 ret = nvme_init_identify(&ctrl->ctrl);
2670 goto out_disconnect_admin_queue;
2674 /* FC-NVME does not have other data in the capsule */
2675 if (ctrl->ctrl.icdoff) {
2676 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
2678 goto out_disconnect_admin_queue;
2681 /* FC-NVME supports normal SGL Data Block Descriptors */
2683 if (opts->queue_size > ctrl->ctrl.maxcmd) {
2684 /* warn if maxcmd is lower than queue_size */
2685 dev_warn(ctrl->ctrl.device,
2686 "queue_size %zu > ctrl maxcmd %u, reducing "
2688 opts->queue_size, ctrl->ctrl.maxcmd);
2689 opts->queue_size = ctrl->ctrl.maxcmd;
2692 if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
2693 /* warn if sqsize is lower than queue_size */
2694 dev_warn(ctrl->ctrl.device,
2695 "queue_size %zu > ctrl sqsize %u, clamping down\n",
2696 opts->queue_size, ctrl->ctrl.sqsize + 1);
2697 opts->queue_size = ctrl->ctrl.sqsize + 1;
2700 ret = nvme_fc_init_aen_ops(ctrl);
2702 goto out_term_aen_ops;
2705 * Create the io queues
2708 if (ctrl->ctrl.queue_count > 1) {
2709 if (!ctrl->ioq_live)
2710 ret = nvme_fc_create_io_queues(ctrl);
2712 ret = nvme_fc_recreate_io_queues(ctrl);
2714 goto out_term_aen_ops;
2717 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
2719 ctrl->ctrl.nr_reconnects = 0;
2722 nvme_start_ctrl(&ctrl->ctrl);
2724 return 0; /* Success */
2727 nvme_fc_term_aen_ops(ctrl);
2728 out_disconnect_admin_queue:
2729 /* send a Disconnect(association) LS to fc-nvme target */
2730 nvme_fc_xmt_disconnect_assoc(ctrl);
2731 out_delete_hw_queue:
2732 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2734 nvme_fc_free_queue(&ctrl->queues[0]);
2735 ctrl->assoc_active = false;
2736 nvme_fc_ctlr_inactive_on_rport(ctrl);
2742 * This routine stops operation of the controller on the host side.
2743 * On the host os stack side: Admin and IO queues are stopped,
2744 * outstanding ios on them terminated via FC ABTS.
2745 * On the link side: the association is terminated.
2748 nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
2750 unsigned long flags;
2752 if (!ctrl->assoc_active)
2754 ctrl->assoc_active = false;
2756 spin_lock_irqsave(&ctrl->lock, flags);
2757 ctrl->flags |= FCCTRL_TERMIO;
2759 spin_unlock_irqrestore(&ctrl->lock, flags);
2762 * If io queues are present, stop them and terminate all outstanding
2763 * ios on them. As FC allocates FC exchange for each io, the
2764 * transport must contact the LLDD to terminate the exchange,
2765 * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
2766 * to tell us what io's are busy and invoke a transport routine
2767 * to kill them with the LLDD. After terminating the exchange
2768 * the LLDD will call the transport's normal io done path, but it
2769 * will have an aborted status. The done path will return the
2770 * io requests back to the block layer as part of normal completions
2771 * (but with error status).
2773 if (ctrl->ctrl.queue_count > 1) {
2774 nvme_stop_queues(&ctrl->ctrl);
2775 blk_mq_tagset_busy_iter(&ctrl->tag_set,
2776 nvme_fc_terminate_exchange, &ctrl->ctrl);
2780 * Other transports, which don't have link-level contexts bound
2781 * to sqe's, would try to gracefully shutdown the controller by
2782 * writing the registers for shutdown and polling (call
2783 * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially
2784 * just aborted and we will wait on those contexts, and given
2785 * there was no indication of how live the controlelr is on the
2786 * link, don't send more io to create more contexts for the
2787 * shutdown. Let the controller fail via keepalive failure if
2788 * its still present.
2792 * clean up the admin queue. Same thing as above.
2793 * use blk_mq_tagset_busy_itr() and the transport routine to
2794 * terminate the exchanges.
2796 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
2797 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
2798 nvme_fc_terminate_exchange, &ctrl->ctrl);
2800 /* kill the aens as they are a separate path */
2801 nvme_fc_abort_aen_ops(ctrl);
2803 /* wait for all io that had to be aborted */
2804 spin_lock_irq(&ctrl->lock);
2805 wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
2806 ctrl->flags &= ~FCCTRL_TERMIO;
2807 spin_unlock_irq(&ctrl->lock);
2809 nvme_fc_term_aen_ops(ctrl);
2812 * send a Disconnect(association) LS to fc-nvme target
2813 * Note: could have been sent at top of process, but
2814 * cleaner on link traffic if after the aborts complete.
2815 * Note: if association doesn't exist, association_id will be 0
2817 if (ctrl->association_id)
2818 nvme_fc_xmt_disconnect_assoc(ctrl);
2820 if (ctrl->ctrl.tagset) {
2821 nvme_fc_delete_hw_io_queues(ctrl);
2822 nvme_fc_free_io_queues(ctrl);
2825 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2826 nvme_fc_free_queue(&ctrl->queues[0]);
2828 /* re-enable the admin_q so anything new can fast fail */
2829 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2831 /* resume the io queues so that things will fast fail */
2832 nvme_start_queues(&ctrl->ctrl);
2834 nvme_fc_ctlr_inactive_on_rport(ctrl);
2838 nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
2840 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2842 cancel_work_sync(&ctrl->err_work);
2843 cancel_delayed_work_sync(&ctrl->connect_work);
2845 * kill the association on the link side. this will block
2846 * waiting for io to terminate
2848 nvme_fc_delete_association(ctrl);
2852 nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
2854 struct nvme_fc_rport *rport = ctrl->rport;
2855 struct nvme_fc_remote_port *portptr = &rport->remoteport;
2856 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
2859 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
2862 if (portptr->port_state == FC_OBJSTATE_ONLINE)
2863 dev_info(ctrl->ctrl.device,
2864 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
2865 ctrl->cnum, status);
2866 else if (time_after_eq(jiffies, rport->dev_loss_end))
2869 if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
2870 if (portptr->port_state == FC_OBJSTATE_ONLINE)
2871 dev_info(ctrl->ctrl.device,
2872 "NVME-FC{%d}: Reconnect attempt in %ld "
2874 ctrl->cnum, recon_delay / HZ);
2875 else if (time_after(jiffies + recon_delay, rport->dev_loss_end))
2876 recon_delay = rport->dev_loss_end - jiffies;
2878 queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
2880 if (portptr->port_state == FC_OBJSTATE_ONLINE)
2881 dev_warn(ctrl->ctrl.device,
2882 "NVME-FC{%d}: Max reconnect attempts (%d) "
2884 ctrl->cnum, ctrl->ctrl.nr_reconnects);
2886 dev_warn(ctrl->ctrl.device,
2887 "NVME-FC{%d}: dev_loss_tmo (%d) expired "
2888 "while waiting for remoteport connectivity.\n",
2889 ctrl->cnum, portptr->dev_loss_tmo);
2890 WARN_ON(nvme_delete_ctrl(&ctrl->ctrl));
2895 __nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl)
2897 nvme_stop_keep_alive(&ctrl->ctrl);
2899 /* will block will waiting for io to terminate */
2900 nvme_fc_delete_association(ctrl);
2902 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING &&
2903 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
2904 dev_err(ctrl->ctrl.device,
2905 "NVME-FC{%d}: error_recovery: Couldn't change state "
2906 "to CONNECTING\n", ctrl->cnum);
2910 nvme_fc_reset_ctrl_work(struct work_struct *work)
2912 struct nvme_fc_ctrl *ctrl =
2913 container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
2916 __nvme_fc_terminate_io(ctrl);
2918 nvme_stop_ctrl(&ctrl->ctrl);
2920 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE)
2921 ret = nvme_fc_create_association(ctrl);
2926 nvme_fc_reconnect_or_delete(ctrl, ret);
2928 dev_info(ctrl->ctrl.device,
2929 "NVME-FC{%d}: controller reset complete\n",
2934 nvme_fc_connect_err_work(struct work_struct *work)
2936 struct nvme_fc_ctrl *ctrl =
2937 container_of(work, struct nvme_fc_ctrl, err_work);
2939 __nvme_fc_terminate_io(ctrl);
2941 atomic_set(&ctrl->err_work_active, 0);
2944 * Rescheduling the connection after recovering
2945 * from the io error is left to the reconnect work
2946 * item, which is what should have stalled waiting on
2947 * the io that had the error that scheduled this work.
2951 static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
2953 .module = THIS_MODULE,
2954 .flags = NVME_F_FABRICS,
2955 .reg_read32 = nvmf_reg_read32,
2956 .reg_read64 = nvmf_reg_read64,
2957 .reg_write32 = nvmf_reg_write32,
2958 .free_ctrl = nvme_fc_nvme_ctrl_freed,
2959 .submit_async_event = nvme_fc_submit_async_event,
2960 .delete_ctrl = nvme_fc_delete_ctrl,
2961 .get_address = nvmf_get_address,
2965 nvme_fc_connect_ctrl_work(struct work_struct *work)
2969 struct nvme_fc_ctrl *ctrl =
2970 container_of(to_delayed_work(work),
2971 struct nvme_fc_ctrl, connect_work);
2973 ret = nvme_fc_create_association(ctrl);
2975 nvme_fc_reconnect_or_delete(ctrl, ret);
2977 dev_info(ctrl->ctrl.device,
2978 "NVME-FC{%d}: controller connect complete\n",
2983 static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
2984 .queue_rq = nvme_fc_queue_rq,
2985 .complete = nvme_fc_complete_rq,
2986 .init_request = nvme_fc_init_request,
2987 .exit_request = nvme_fc_exit_request,
2988 .init_hctx = nvme_fc_init_admin_hctx,
2989 .timeout = nvme_fc_timeout,
2994 * Fails a controller request if it matches an existing controller
2995 * (association) with the same tuple:
2996 * <Host NQN, Host ID, local FC port, remote FC port, SUBSYS NQN>
2998 * The ports don't need to be compared as they are intrinsically
2999 * already matched by the port pointers supplied.
3002 nvme_fc_existing_controller(struct nvme_fc_rport *rport,
3003 struct nvmf_ctrl_options *opts)
3005 struct nvme_fc_ctrl *ctrl;
3006 unsigned long flags;
3009 spin_lock_irqsave(&rport->lock, flags);
3010 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
3011 found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts);
3015 spin_unlock_irqrestore(&rport->lock, flags);
3020 static struct nvme_ctrl *
3021 nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
3022 struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
3024 struct nvme_fc_ctrl *ctrl;
3025 unsigned long flags;
3028 if (!(rport->remoteport.port_role &
3029 (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
3034 if (!opts->duplicate_connect &&
3035 nvme_fc_existing_controller(rport, opts)) {
3040 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
3046 idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
3052 ctrl->ctrl.opts = opts;
3053 ctrl->ctrl.nr_reconnects = 0;
3055 ctrl->ctrl.numa_node = dev_to_node(lport->dev);
3057 ctrl->ctrl.numa_node = NUMA_NO_NODE;
3058 INIT_LIST_HEAD(&ctrl->ctrl_list);
3059 ctrl->lport = lport;
3060 ctrl->rport = rport;
3061 ctrl->dev = lport->dev;
3063 ctrl->ioq_live = false;
3064 ctrl->assoc_active = false;
3065 atomic_set(&ctrl->err_work_active, 0);
3066 init_waitqueue_head(&ctrl->ioabort_wait);
3068 get_device(ctrl->dev);
3069 kref_init(&ctrl->ref);
3071 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
3072 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
3073 INIT_WORK(&ctrl->err_work, nvme_fc_connect_err_work);
3074 spin_lock_init(&ctrl->lock);
3076 /* io queue count */
3077 ctrl->ctrl.queue_count = min_t(unsigned int,
3079 lport->ops->max_hw_queues);
3080 ctrl->ctrl.queue_count++; /* +1 for admin queue */
3082 ctrl->ctrl.sqsize = opts->queue_size - 1;
3083 ctrl->ctrl.kato = opts->kato;
3084 ctrl->ctrl.cntlid = 0xffff;
3087 ctrl->queues = kcalloc(ctrl->ctrl.queue_count,
3088 sizeof(struct nvme_fc_queue), GFP_KERNEL);
3092 nvme_fc_init_queue(ctrl, 0);
3094 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
3095 ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
3096 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
3097 ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
3098 ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
3099 ctrl->admin_tag_set.cmd_size =
3100 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
3101 ctrl->lport->ops->fcprqst_priv_sz);
3102 ctrl->admin_tag_set.driver_data = ctrl;
3103 ctrl->admin_tag_set.nr_hw_queues = 1;
3104 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
3105 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
3107 ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
3109 goto out_free_queues;
3110 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
3112 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3113 if (IS_ERR(ctrl->ctrl.admin_q)) {
3114 ret = PTR_ERR(ctrl->ctrl.admin_q);
3115 goto out_free_admin_tag_set;
3119 * Would have been nice to init io queues tag set as well.
3120 * However, we require interaction from the controller
3121 * for max io queue count before we can do so.
3122 * Defer this to the connect path.
3125 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
3127 goto out_cleanup_admin_q;
3129 /* at this point, teardown path changes to ref counting on nvme ctrl */
3131 spin_lock_irqsave(&rport->lock, flags);
3132 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
3133 spin_unlock_irqrestore(&rport->lock, flags);
3135 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) ||
3136 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
3137 dev_err(ctrl->ctrl.device,
3138 "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum);
3142 nvme_get_ctrl(&ctrl->ctrl);
3144 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
3145 nvme_put_ctrl(&ctrl->ctrl);
3146 dev_err(ctrl->ctrl.device,
3147 "NVME-FC{%d}: failed to schedule initial connect\n",
3152 flush_delayed_work(&ctrl->connect_work);
3154 dev_info(ctrl->ctrl.device,
3155 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
3156 ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
3161 nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
3162 cancel_work_sync(&ctrl->ctrl.reset_work);
3163 cancel_work_sync(&ctrl->err_work);
3164 cancel_delayed_work_sync(&ctrl->connect_work);
3166 ctrl->ctrl.opts = NULL;
3168 /* initiate nvme ctrl ref counting teardown */
3169 nvme_uninit_ctrl(&ctrl->ctrl);
3171 /* Remove core ctrl ref. */
3172 nvme_put_ctrl(&ctrl->ctrl);
3174 /* as we're past the point where we transition to the ref
3175 * counting teardown path, if we return a bad pointer here,
3176 * the calling routine, thinking it's prior to the
3177 * transition, will do an rport put. Since the teardown
3178 * path also does a rport put, we do an extra get here to
3179 * so proper order/teardown happens.
3181 nvme_fc_rport_get(rport);
3183 return ERR_PTR(-EIO);
3185 out_cleanup_admin_q:
3186 blk_cleanup_queue(ctrl->ctrl.admin_q);
3187 out_free_admin_tag_set:
3188 blk_mq_free_tag_set(&ctrl->admin_tag_set);
3190 kfree(ctrl->queues);
3192 put_device(ctrl->dev);
3193 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
3197 /* exit via here doesn't follow ctlr ref points */
3198 return ERR_PTR(ret);
3202 struct nvmet_fc_traddr {
3208 __nvme_fc_parse_u64(substring_t *sstr, u64 *val)
3212 if (match_u64(sstr, &token64))
3220 * This routine validates and extracts the WWN's from the TRADDR string.
3221 * As kernel parsers need the 0x to determine number base, universally
3222 * build string to parse with 0x prefix before parsing name strings.
3225 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
3227 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
3228 substring_t wwn = { name, &name[sizeof(name)-1] };
3229 int nnoffset, pnoffset;
3231 /* validate if string is one of the 2 allowed formats */
3232 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
3233 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
3234 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
3235 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
3236 nnoffset = NVME_FC_TRADDR_OXNNLEN;
3237 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
3238 NVME_FC_TRADDR_OXNNLEN;
3239 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
3240 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
3241 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
3242 "pn-", NVME_FC_TRADDR_NNLEN))) {
3243 nnoffset = NVME_FC_TRADDR_NNLEN;
3244 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
3250 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
3252 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3253 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
3256 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3257 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
3263 pr_warn("%s: bad traddr string\n", __func__);
3267 static struct nvme_ctrl *
3268 nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
3270 struct nvme_fc_lport *lport;
3271 struct nvme_fc_rport *rport;
3272 struct nvme_ctrl *ctrl;
3273 struct nvmet_fc_traddr laddr = { 0L, 0L };
3274 struct nvmet_fc_traddr raddr = { 0L, 0L };
3275 unsigned long flags;
3278 ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE);
3279 if (ret || !raddr.nn || !raddr.pn)
3280 return ERR_PTR(-EINVAL);
3282 ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE);
3283 if (ret || !laddr.nn || !laddr.pn)
3284 return ERR_PTR(-EINVAL);
3286 /* find the host and remote ports to connect together */
3287 spin_lock_irqsave(&nvme_fc_lock, flags);
3288 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3289 if (lport->localport.node_name != laddr.nn ||
3290 lport->localport.port_name != laddr.pn)
3293 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3294 if (rport->remoteport.node_name != raddr.nn ||
3295 rport->remoteport.port_name != raddr.pn)
3298 /* if fail to get reference fall through. Will error */
3299 if (!nvme_fc_rport_get(rport))
3302 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3304 ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport);
3306 nvme_fc_rport_put(rport);
3310 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3312 pr_warn("%s: %s - %s combination not found\n",
3313 __func__, opts->traddr, opts->host_traddr);
3314 return ERR_PTR(-ENOENT);
3318 static struct nvmf_transport_ops nvme_fc_transport = {
3320 .module = THIS_MODULE,
3321 .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
3322 .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
3323 .create_ctrl = nvme_fc_create_ctrl,
3326 /* Arbitrary successive failures max. With lots of subsystems could be high */
3327 #define DISCOVERY_MAX_FAIL 20
3329 static ssize_t nvme_fc_nvme_discovery_store(struct device *dev,
3330 struct device_attribute *attr, const char *buf, size_t count)
3332 unsigned long flags;
3333 LIST_HEAD(local_disc_list);
3334 struct nvme_fc_lport *lport;
3335 struct nvme_fc_rport *rport;
3338 spin_lock_irqsave(&nvme_fc_lock, flags);
3340 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3341 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3342 if (!nvme_fc_lport_get(lport))
3344 if (!nvme_fc_rport_get(rport)) {
3346 * This is a temporary condition. Upon restart
3347 * this rport will be gone from the list.
3349 * Revert the lport put and retry. Anything
3350 * added to the list already will be skipped (as
3351 * they are no longer list_empty). Loops should
3352 * resume at rports that were not yet seen.
3354 nvme_fc_lport_put(lport);
3356 if (failcnt++ < DISCOVERY_MAX_FAIL)
3359 pr_err("nvme_discovery: too many reference "
3361 goto process_local_list;
3363 if (list_empty(&rport->disc_list))
3364 list_add_tail(&rport->disc_list,
3370 while (!list_empty(&local_disc_list)) {
3371 rport = list_first_entry(&local_disc_list,
3372 struct nvme_fc_rport, disc_list);
3373 list_del_init(&rport->disc_list);
3374 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3376 lport = rport->lport;
3377 /* signal discovery. Won't hurt if it repeats */
3378 nvme_fc_signal_discovery_scan(lport, rport);
3379 nvme_fc_rport_put(rport);
3380 nvme_fc_lport_put(lport);
3382 spin_lock_irqsave(&nvme_fc_lock, flags);
3384 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3388 static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store);
3390 static struct attribute *nvme_fc_attrs[] = {
3391 &dev_attr_nvme_discovery.attr,
3395 static struct attribute_group nvme_fc_attr_group = {
3396 .attrs = nvme_fc_attrs,
3399 static const struct attribute_group *nvme_fc_attr_groups[] = {
3400 &nvme_fc_attr_group,
3404 static struct class fc_class = {
3406 .dev_groups = nvme_fc_attr_groups,
3407 .owner = THIS_MODULE,
3410 static int __init nvme_fc_init_module(void)
3414 nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
3420 * It is expected that in the future the kernel will combine
3421 * the FC-isms that are currently under scsi and now being
3422 * added to by NVME into a new standalone FC class. The SCSI
3423 * and NVME protocols and their devices would be under this
3426 * As we need something to post FC-specific udev events to,
3427 * specifically for nvme probe events, start by creating the
3428 * new device class. When the new standalone FC class is
3429 * put in place, this code will move to a more generic
3430 * location for the class.
3432 ret = class_register(&fc_class);
3434 pr_err("couldn't register class fc\n");
3435 goto out_destroy_wq;
3439 * Create a device for the FC-centric udev events
3441 fc_udev_device = device_create(&fc_class, NULL, MKDEV(0, 0), NULL,
3443 if (IS_ERR(fc_udev_device)) {
3444 pr_err("couldn't create fc_udev device!\n");
3445 ret = PTR_ERR(fc_udev_device);
3446 goto out_destroy_class;
3449 ret = nvmf_register_transport(&nvme_fc_transport);
3451 goto out_destroy_device;
3456 device_destroy(&fc_class, MKDEV(0, 0));
3458 class_unregister(&fc_class);
3460 destroy_workqueue(nvme_fc_wq);
3466 nvme_fc_delete_controllers(struct nvme_fc_rport *rport)
3468 struct nvme_fc_ctrl *ctrl;
3470 spin_lock(&rport->lock);
3471 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
3472 dev_warn(ctrl->ctrl.device,
3473 "NVME-FC{%d}: transport unloading: deleting ctrl\n",
3475 nvme_delete_ctrl(&ctrl->ctrl);
3477 spin_unlock(&rport->lock);
3481 nvme_fc_cleanup_for_unload(void)
3483 struct nvme_fc_lport *lport;
3484 struct nvme_fc_rport *rport;
3486 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3487 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3488 nvme_fc_delete_controllers(rport);
3493 static void __exit nvme_fc_exit_module(void)
3495 unsigned long flags;
3496 bool need_cleanup = false;
3498 spin_lock_irqsave(&nvme_fc_lock, flags);
3499 nvme_fc_waiting_to_unload = true;
3500 if (!list_empty(&nvme_fc_lport_list)) {
3501 need_cleanup = true;
3502 nvme_fc_cleanup_for_unload();
3504 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3506 pr_info("%s: waiting for ctlr deletes\n", __func__);
3507 wait_for_completion(&nvme_fc_unload_proceed);
3508 pr_info("%s: ctrl deletes complete\n", __func__);
3511 nvmf_unregister_transport(&nvme_fc_transport);
3513 ida_destroy(&nvme_fc_local_port_cnt);
3514 ida_destroy(&nvme_fc_ctrl_cnt);
3516 device_destroy(&fc_class, MKDEV(0, 0));
3517 class_unregister(&fc_class);
3518 destroy_workqueue(nvme_fc_wq);
3521 module_init(nvme_fc_init_module);
3522 module_exit(nvme_fc_exit_module);
3524 MODULE_LICENSE("GPL v2");