2 * Copyright (c) 2016 Avago Technologies. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful.
9 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12 * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13 * See the GNU General Public License for more details, a copy of which
14 * can be found in the file COPYING included with this package
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/module.h>
19 #include <linux/parser.h>
20 #include <uapi/scsi/fc/fc_fs.h>
21 #include <uapi/scsi/fc/fc_els.h>
22 #include <linux/delay.h>
26 #include <linux/nvme-fc-driver.h>
27 #include <linux/nvme-fc.h>
30 /* *************************** Data Structures/Defines ****************** */
33 enum nvme_fc_queue_flags {
34 NVME_FC_Q_CONNECTED = 0,
38 #define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */
40 struct nvme_fc_queue {
41 struct nvme_fc_ctrl *ctrl;
43 struct blk_mq_hw_ctx *hctx;
45 size_t cmnd_capsule_len;
54 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
56 enum nvme_fcop_flags {
57 FCOP_FLAGS_TERMIO = (1 << 0),
58 FCOP_FLAGS_AEN = (1 << 1),
61 struct nvmefc_ls_req_op {
62 struct nvmefc_ls_req ls_req;
64 struct nvme_fc_rport *rport;
65 struct nvme_fc_queue *queue;
70 struct completion ls_done;
71 struct list_head lsreq_list; /* rport->ls_req_list */
75 enum nvme_fcpop_state {
76 FCPOP_STATE_UNINIT = 0,
78 FCPOP_STATE_ACTIVE = 2,
79 FCPOP_STATE_ABORTED = 3,
80 FCPOP_STATE_COMPLETE = 4,
83 struct nvme_fc_fcp_op {
84 struct nvme_request nreq; /*
87 * the 1st element in the
92 struct nvmefc_fcp_req fcp_req;
94 struct nvme_fc_ctrl *ctrl;
95 struct nvme_fc_queue *queue;
103 struct nvme_fc_cmd_iu cmd_iu;
104 struct nvme_fc_ersp_iu rsp_iu;
107 struct nvme_fc_lport {
108 struct nvme_fc_local_port localport;
111 struct list_head port_list; /* nvme_fc_port_list */
112 struct list_head endp_list;
113 struct device *dev; /* physical device for dma */
114 struct nvme_fc_port_template *ops;
116 atomic_t act_rport_cnt;
117 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
119 struct nvme_fc_rport {
120 struct nvme_fc_remote_port remoteport;
122 struct list_head endp_list; /* for lport->endp_list */
123 struct list_head ctrl_list;
124 struct list_head ls_req_list;
125 struct device *dev; /* physical device for dma */
126 struct nvme_fc_lport *lport;
129 atomic_t act_ctrl_cnt;
130 unsigned long dev_loss_end;
131 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
133 enum nvme_fcctrl_flags {
134 FCCTRL_TERMIO = (1 << 0),
137 struct nvme_fc_ctrl {
139 struct nvme_fc_queue *queues;
141 struct nvme_fc_lport *lport;
142 struct nvme_fc_rport *rport;
149 struct list_head ctrl_list; /* rport->ctrl_list */
151 struct blk_mq_tag_set admin_tag_set;
152 struct blk_mq_tag_set tag_set;
154 struct delayed_work connect_work;
159 wait_queue_head_t ioabort_wait;
161 struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS];
163 struct nvme_ctrl ctrl;
166 static inline struct nvme_fc_ctrl *
167 to_fc_ctrl(struct nvme_ctrl *ctrl)
169 return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
172 static inline struct nvme_fc_lport *
173 localport_to_lport(struct nvme_fc_local_port *portptr)
175 return container_of(portptr, struct nvme_fc_lport, localport);
178 static inline struct nvme_fc_rport *
179 remoteport_to_rport(struct nvme_fc_remote_port *portptr)
181 return container_of(portptr, struct nvme_fc_rport, remoteport);
184 static inline struct nvmefc_ls_req_op *
185 ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
187 return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
190 static inline struct nvme_fc_fcp_op *
191 fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
193 return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
198 /* *************************** Globals **************************** */
201 static DEFINE_SPINLOCK(nvme_fc_lock);
203 static LIST_HEAD(nvme_fc_lport_list);
204 static DEFINE_IDA(nvme_fc_local_port_cnt);
205 static DEFINE_IDA(nvme_fc_ctrl_cnt);
210 * These items are short-term. They will eventually be moved into
211 * a generic FC class. See comments in module init.
213 static struct class *fc_class;
214 static struct device *fc_udev_device;
217 /* *********************** FC-NVME Port Management ************************ */
219 static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
220 struct nvme_fc_queue *, unsigned int);
223 nvme_fc_free_lport(struct kref *ref)
225 struct nvme_fc_lport *lport =
226 container_of(ref, struct nvme_fc_lport, ref);
229 WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
230 WARN_ON(!list_empty(&lport->endp_list));
232 /* remove from transport list */
233 spin_lock_irqsave(&nvme_fc_lock, flags);
234 list_del(&lport->port_list);
235 spin_unlock_irqrestore(&nvme_fc_lock, flags);
237 ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
238 ida_destroy(&lport->endp_cnt);
240 put_device(lport->dev);
246 nvme_fc_lport_put(struct nvme_fc_lport *lport)
248 kref_put(&lport->ref, nvme_fc_free_lport);
252 nvme_fc_lport_get(struct nvme_fc_lport *lport)
254 return kref_get_unless_zero(&lport->ref);
258 static struct nvme_fc_lport *
259 nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo,
260 struct nvme_fc_port_template *ops,
263 struct nvme_fc_lport *lport;
266 spin_lock_irqsave(&nvme_fc_lock, flags);
268 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
269 if (lport->localport.node_name != pinfo->node_name ||
270 lport->localport.port_name != pinfo->port_name)
273 if (lport->dev != dev) {
274 lport = ERR_PTR(-EXDEV);
278 if (lport->localport.port_state != FC_OBJSTATE_DELETED) {
279 lport = ERR_PTR(-EEXIST);
283 if (!nvme_fc_lport_get(lport)) {
285 * fails if ref cnt already 0. If so,
286 * act as if lport already deleted
292 /* resume the lport */
295 lport->localport.port_role = pinfo->port_role;
296 lport->localport.port_id = pinfo->port_id;
297 lport->localport.port_state = FC_OBJSTATE_ONLINE;
299 spin_unlock_irqrestore(&nvme_fc_lock, flags);
307 spin_unlock_irqrestore(&nvme_fc_lock, flags);
313 * nvme_fc_register_localport - transport entry point called by an
314 * LLDD to register the existence of a NVME
316 * @pinfo: pointer to information about the port to be registered
317 * @template: LLDD entrypoints and operational parameters for the port
318 * @dev: physical hardware device node port corresponds to. Will be
319 * used for DMA mappings
320 * @lport_p: pointer to a local port pointer. Upon success, the routine
321 * will allocate a nvme_fc_local_port structure and place its
322 * address in the local port pointer. Upon failure, local port
323 * pointer will be set to 0.
326 * a completion status. Must be 0 upon success; a negative errno
327 * (ex: -ENXIO) upon failure.
330 nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
331 struct nvme_fc_port_template *template,
333 struct nvme_fc_local_port **portptr)
335 struct nvme_fc_lport *newrec;
339 if (!template->localport_delete || !template->remoteport_delete ||
340 !template->ls_req || !template->fcp_io ||
341 !template->ls_abort || !template->fcp_abort ||
342 !template->max_hw_queues || !template->max_sgl_segments ||
343 !template->max_dif_sgl_segments || !template->dma_boundary) {
345 goto out_reghost_failed;
349 * look to see if there is already a localport that had been
350 * deregistered and in the process of waiting for all the
351 * references to fully be removed. If the references haven't
352 * expired, we can simply re-enable the localport. Remoteports
353 * and controller reconnections should resume naturally.
355 newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev);
357 /* found an lport, but something about its state is bad */
358 if (IS_ERR(newrec)) {
359 ret = PTR_ERR(newrec);
360 goto out_reghost_failed;
362 /* found existing lport, which was resumed */
364 *portptr = &newrec->localport;
368 /* nothing found - allocate a new localport struct */
370 newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
374 goto out_reghost_failed;
377 idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
383 if (!get_device(dev) && dev) {
388 INIT_LIST_HEAD(&newrec->port_list);
389 INIT_LIST_HEAD(&newrec->endp_list);
390 kref_init(&newrec->ref);
391 atomic_set(&newrec->act_rport_cnt, 0);
392 newrec->ops = template;
394 ida_init(&newrec->endp_cnt);
395 newrec->localport.private = &newrec[1];
396 newrec->localport.node_name = pinfo->node_name;
397 newrec->localport.port_name = pinfo->port_name;
398 newrec->localport.port_role = pinfo->port_role;
399 newrec->localport.port_id = pinfo->port_id;
400 newrec->localport.port_state = FC_OBJSTATE_ONLINE;
401 newrec->localport.port_num = idx;
403 spin_lock_irqsave(&nvme_fc_lock, flags);
404 list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
405 spin_unlock_irqrestore(&nvme_fc_lock, flags);
408 dma_set_seg_boundary(dev, template->dma_boundary);
410 *portptr = &newrec->localport;
414 ida_simple_remove(&nvme_fc_local_port_cnt, idx);
422 EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
425 * nvme_fc_unregister_localport - transport entry point called by an
426 * LLDD to deregister/remove a previously
427 * registered a NVME host FC port.
428 * @localport: pointer to the (registered) local port that is to be
432 * a completion status. Must be 0 upon success; a negative errno
433 * (ex: -ENXIO) upon failure.
436 nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
438 struct nvme_fc_lport *lport = localport_to_lport(portptr);
444 spin_lock_irqsave(&nvme_fc_lock, flags);
446 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
447 spin_unlock_irqrestore(&nvme_fc_lock, flags);
450 portptr->port_state = FC_OBJSTATE_DELETED;
452 spin_unlock_irqrestore(&nvme_fc_lock, flags);
454 if (atomic_read(&lport->act_rport_cnt) == 0)
455 lport->ops->localport_delete(&lport->localport);
457 nvme_fc_lport_put(lport);
461 EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
464 * TRADDR strings, per FC-NVME are fixed format:
465 * "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters
466 * udev event will only differ by prefix of what field is
468 * "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters
469 * 19 + 43 + null_fudge = 64 characters
471 #define FCNVME_TRADDR_LENGTH 64
474 nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport,
475 struct nvme_fc_rport *rport)
477 char hostaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_HOST_TRADDR=...*/
478 char tgtaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_TRADDR=...*/
479 char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL };
481 if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY))
484 snprintf(hostaddr, sizeof(hostaddr),
485 "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx",
486 lport->localport.node_name, lport->localport.port_name);
487 snprintf(tgtaddr, sizeof(tgtaddr),
488 "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx",
489 rport->remoteport.node_name, rport->remoteport.port_name);
490 kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp);
494 nvme_fc_free_rport(struct kref *ref)
496 struct nvme_fc_rport *rport =
497 container_of(ref, struct nvme_fc_rport, ref);
498 struct nvme_fc_lport *lport =
499 localport_to_lport(rport->remoteport.localport);
502 WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
503 WARN_ON(!list_empty(&rport->ctrl_list));
505 /* remove from lport list */
506 spin_lock_irqsave(&nvme_fc_lock, flags);
507 list_del(&rport->endp_list);
508 spin_unlock_irqrestore(&nvme_fc_lock, flags);
510 ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
514 nvme_fc_lport_put(lport);
518 nvme_fc_rport_put(struct nvme_fc_rport *rport)
520 kref_put(&rport->ref, nvme_fc_free_rport);
524 nvme_fc_rport_get(struct nvme_fc_rport *rport)
526 return kref_get_unless_zero(&rport->ref);
530 nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
532 switch (ctrl->ctrl.state) {
534 case NVME_CTRL_CONNECTING:
536 * As all reconnects were suppressed, schedule a
539 dev_info(ctrl->ctrl.device,
540 "NVME-FC{%d}: connectivity re-established. "
541 "Attempting reconnect\n", ctrl->cnum);
543 queue_delayed_work(nvme_wq, &ctrl->connect_work, 0);
546 case NVME_CTRL_RESETTING:
548 * Controller is already in the process of terminating the
549 * association. No need to do anything further. The reconnect
550 * step will naturally occur after the reset completes.
555 /* no action to take - let it delete */
560 static struct nvme_fc_rport *
561 nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport,
562 struct nvme_fc_port_info *pinfo)
564 struct nvme_fc_rport *rport;
565 struct nvme_fc_ctrl *ctrl;
568 spin_lock_irqsave(&nvme_fc_lock, flags);
570 list_for_each_entry(rport, &lport->endp_list, endp_list) {
571 if (rport->remoteport.node_name != pinfo->node_name ||
572 rport->remoteport.port_name != pinfo->port_name)
575 if (!nvme_fc_rport_get(rport)) {
576 rport = ERR_PTR(-ENOLCK);
580 spin_unlock_irqrestore(&nvme_fc_lock, flags);
582 spin_lock_irqsave(&rport->lock, flags);
584 /* has it been unregistered */
585 if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) {
586 /* means lldd called us twice */
587 spin_unlock_irqrestore(&rport->lock, flags);
588 nvme_fc_rport_put(rport);
589 return ERR_PTR(-ESTALE);
592 rport->remoteport.port_role = pinfo->port_role;
593 rport->remoteport.port_id = pinfo->port_id;
594 rport->remoteport.port_state = FC_OBJSTATE_ONLINE;
595 rport->dev_loss_end = 0;
598 * kick off a reconnect attempt on all associations to the
599 * remote port. A successful reconnects will resume i/o.
601 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
602 nvme_fc_resume_controller(ctrl);
604 spin_unlock_irqrestore(&rport->lock, flags);
612 spin_unlock_irqrestore(&nvme_fc_lock, flags);
618 __nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport,
619 struct nvme_fc_port_info *pinfo)
621 if (pinfo->dev_loss_tmo)
622 rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo;
624 rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO;
628 * nvme_fc_register_remoteport - transport entry point called by an
629 * LLDD to register the existence of a NVME
630 * subsystem FC port on its fabric.
631 * @localport: pointer to the (registered) local port that the remote
632 * subsystem port is connected to.
633 * @pinfo: pointer to information about the port to be registered
634 * @rport_p: pointer to a remote port pointer. Upon success, the routine
635 * will allocate a nvme_fc_remote_port structure and place its
636 * address in the remote port pointer. Upon failure, remote port
637 * pointer will be set to 0.
640 * a completion status. Must be 0 upon success; a negative errno
641 * (ex: -ENXIO) upon failure.
644 nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
645 struct nvme_fc_port_info *pinfo,
646 struct nvme_fc_remote_port **portptr)
648 struct nvme_fc_lport *lport = localport_to_lport(localport);
649 struct nvme_fc_rport *newrec;
653 if (!nvme_fc_lport_get(lport)) {
655 goto out_reghost_failed;
659 * look to see if there is already a remoteport that is waiting
660 * for a reconnect (within dev_loss_tmo) with the same WWN's.
661 * If so, transition to it and reconnect.
663 newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo);
665 /* found an rport, but something about its state is bad */
666 if (IS_ERR(newrec)) {
667 ret = PTR_ERR(newrec);
670 /* found existing rport, which was resumed */
672 nvme_fc_lport_put(lport);
673 __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
674 nvme_fc_signal_discovery_scan(lport, newrec);
675 *portptr = &newrec->remoteport;
679 /* nothing found - allocate a new remoteport struct */
681 newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
688 idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
691 goto out_kfree_rport;
694 INIT_LIST_HEAD(&newrec->endp_list);
695 INIT_LIST_HEAD(&newrec->ctrl_list);
696 INIT_LIST_HEAD(&newrec->ls_req_list);
697 kref_init(&newrec->ref);
698 atomic_set(&newrec->act_ctrl_cnt, 0);
699 spin_lock_init(&newrec->lock);
700 newrec->remoteport.localport = &lport->localport;
701 newrec->dev = lport->dev;
702 newrec->lport = lport;
703 newrec->remoteport.private = &newrec[1];
704 newrec->remoteport.port_role = pinfo->port_role;
705 newrec->remoteport.node_name = pinfo->node_name;
706 newrec->remoteport.port_name = pinfo->port_name;
707 newrec->remoteport.port_id = pinfo->port_id;
708 newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
709 newrec->remoteport.port_num = idx;
710 __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
712 spin_lock_irqsave(&nvme_fc_lock, flags);
713 list_add_tail(&newrec->endp_list, &lport->endp_list);
714 spin_unlock_irqrestore(&nvme_fc_lock, flags);
716 nvme_fc_signal_discovery_scan(lport, newrec);
718 *portptr = &newrec->remoteport;
724 nvme_fc_lport_put(lport);
729 EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
732 nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
734 struct nvmefc_ls_req_op *lsop;
738 spin_lock_irqsave(&rport->lock, flags);
740 list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) {
741 if (!(lsop->flags & FCOP_FLAGS_TERMIO)) {
742 lsop->flags |= FCOP_FLAGS_TERMIO;
743 spin_unlock_irqrestore(&rport->lock, flags);
744 rport->lport->ops->ls_abort(&rport->lport->localport,
750 spin_unlock_irqrestore(&rport->lock, flags);
756 nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
758 dev_info(ctrl->ctrl.device,
759 "NVME-FC{%d}: controller connectivity lost. Awaiting "
760 "Reconnect", ctrl->cnum);
762 switch (ctrl->ctrl.state) {
766 * Schedule a controller reset. The reset will terminate the
767 * association and schedule the reconnect timer. Reconnects
768 * will be attempted until either the ctlr_loss_tmo
769 * (max_retries * connect_delay) expires or the remoteport's
770 * dev_loss_tmo expires.
772 if (nvme_reset_ctrl(&ctrl->ctrl)) {
773 dev_warn(ctrl->ctrl.device,
774 "NVME-FC{%d}: Couldn't schedule reset.\n",
776 nvme_delete_ctrl(&ctrl->ctrl);
780 case NVME_CTRL_CONNECTING:
782 * The association has already been terminated and the
783 * controller is attempting reconnects. No need to do anything
784 * futher. Reconnects will be attempted until either the
785 * ctlr_loss_tmo (max_retries * connect_delay) expires or the
786 * remoteport's dev_loss_tmo expires.
790 case NVME_CTRL_RESETTING:
792 * Controller is already in the process of terminating the
793 * association. No need to do anything further. The reconnect
794 * step will kick in naturally after the association is
799 case NVME_CTRL_DELETING:
801 /* no action to take - let it delete */
807 * nvme_fc_unregister_remoteport - transport entry point called by an
808 * LLDD to deregister/remove a previously
809 * registered a NVME subsystem FC port.
810 * @remoteport: pointer to the (registered) remote port that is to be
814 * a completion status. Must be 0 upon success; a negative errno
815 * (ex: -ENXIO) upon failure.
818 nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
820 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
821 struct nvme_fc_ctrl *ctrl;
827 spin_lock_irqsave(&rport->lock, flags);
829 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
830 spin_unlock_irqrestore(&rport->lock, flags);
833 portptr->port_state = FC_OBJSTATE_DELETED;
835 rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ);
837 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
838 /* if dev_loss_tmo==0, dev loss is immediate */
839 if (!portptr->dev_loss_tmo) {
840 dev_warn(ctrl->ctrl.device,
841 "NVME-FC{%d}: controller connectivity lost.\n",
843 nvme_delete_ctrl(&ctrl->ctrl);
845 nvme_fc_ctrl_connectivity_loss(ctrl);
848 spin_unlock_irqrestore(&rport->lock, flags);
850 nvme_fc_abort_lsops(rport);
852 if (atomic_read(&rport->act_ctrl_cnt) == 0)
853 rport->lport->ops->remoteport_delete(portptr);
856 * release the reference, which will allow, if all controllers
857 * go away, which should only occur after dev_loss_tmo occurs,
858 * for the rport to be torn down.
860 nvme_fc_rport_put(rport);
864 EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
867 * nvme_fc_rescan_remoteport - transport entry point called by an
868 * LLDD to request a nvme device rescan.
869 * @remoteport: pointer to the (registered) remote port that is to be
875 nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport)
877 struct nvme_fc_rport *rport = remoteport_to_rport(remoteport);
879 nvme_fc_signal_discovery_scan(rport->lport, rport);
881 EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport);
884 nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr,
887 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
890 spin_lock_irqsave(&rport->lock, flags);
892 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
893 spin_unlock_irqrestore(&rport->lock, flags);
897 /* a dev_loss_tmo of 0 (immediate) is allowed to be set */
898 rport->remoteport.dev_loss_tmo = dev_loss_tmo;
900 spin_unlock_irqrestore(&rport->lock, flags);
904 EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss);
907 /* *********************** FC-NVME DMA Handling **************************** */
910 * The fcloop device passes in a NULL device pointer. Real LLD's will
911 * pass in a valid device pointer. If NULL is passed to the dma mapping
912 * routines, depending on the platform, it may or may not succeed, and
916 * Wrapper all the dma routines and check the dev pointer.
918 * If simple mappings (return just a dma address, we'll noop them,
919 * returning a dma address of 0.
921 * On more complex mappings (dma_map_sg), a pseudo routine fills
922 * in the scatter list, setting all dma addresses to 0.
925 static inline dma_addr_t
926 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
927 enum dma_data_direction dir)
929 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
933 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
935 return dev ? dma_mapping_error(dev, dma_addr) : 0;
939 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
940 enum dma_data_direction dir)
943 dma_unmap_single(dev, addr, size, dir);
947 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
948 enum dma_data_direction dir)
951 dma_sync_single_for_cpu(dev, addr, size, dir);
955 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
956 enum dma_data_direction dir)
959 dma_sync_single_for_device(dev, addr, size, dir);
962 /* pseudo dma_map_sg call */
964 fc_map_sg(struct scatterlist *sg, int nents)
966 struct scatterlist *s;
969 WARN_ON(nents == 0 || sg[0].length == 0);
971 for_each_sg(sg, s, nents, i) {
973 #ifdef CONFIG_NEED_SG_DMA_LENGTH
974 s->dma_length = s->length;
981 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
982 enum dma_data_direction dir)
984 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
988 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
989 enum dma_data_direction dir)
992 dma_unmap_sg(dev, sg, nents, dir);
995 /* *********************** FC-NVME LS Handling **************************** */
997 static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
998 static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
1002 __nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
1004 struct nvme_fc_rport *rport = lsop->rport;
1005 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1006 unsigned long flags;
1008 spin_lock_irqsave(&rport->lock, flags);
1010 if (!lsop->req_queued) {
1011 spin_unlock_irqrestore(&rport->lock, flags);
1015 list_del(&lsop->lsreq_list);
1017 lsop->req_queued = false;
1019 spin_unlock_irqrestore(&rport->lock, flags);
1021 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1022 (lsreq->rqstlen + lsreq->rsplen),
1025 nvme_fc_rport_put(rport);
1029 __nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
1030 struct nvmefc_ls_req_op *lsop,
1031 void (*done)(struct nvmefc_ls_req *req, int status))
1033 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1034 unsigned long flags;
1037 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
1038 return -ECONNREFUSED;
1040 if (!nvme_fc_rport_get(rport))
1044 lsop->rport = rport;
1045 lsop->req_queued = false;
1046 INIT_LIST_HEAD(&lsop->lsreq_list);
1047 init_completion(&lsop->ls_done);
1049 lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr,
1050 lsreq->rqstlen + lsreq->rsplen,
1052 if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) {
1056 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
1058 spin_lock_irqsave(&rport->lock, flags);
1060 list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);
1062 lsop->req_queued = true;
1064 spin_unlock_irqrestore(&rport->lock, flags);
1066 ret = rport->lport->ops->ls_req(&rport->lport->localport,
1067 &rport->remoteport, lsreq);
1074 lsop->ls_error = ret;
1075 spin_lock_irqsave(&rport->lock, flags);
1076 lsop->req_queued = false;
1077 list_del(&lsop->lsreq_list);
1078 spin_unlock_irqrestore(&rport->lock, flags);
1079 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1080 (lsreq->rqstlen + lsreq->rsplen),
1083 nvme_fc_rport_put(rport);
1089 nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
1091 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1093 lsop->ls_error = status;
1094 complete(&lsop->ls_done);
1098 nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
1100 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1101 struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
1104 ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);
1108 * No timeout/not interruptible as we need the struct
1109 * to exist until the lldd calls us back. Thus mandate
1110 * wait until driver calls back. lldd responsible for
1111 * the timeout action
1113 wait_for_completion(&lsop->ls_done);
1115 __nvme_fc_finish_ls_req(lsop);
1117 ret = lsop->ls_error;
1123 /* ACC or RJT payload ? */
1124 if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
1131 nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
1132 struct nvmefc_ls_req_op *lsop,
1133 void (*done)(struct nvmefc_ls_req *req, int status))
1135 /* don't wait for completion */
1137 return __nvme_fc_send_ls_req(rport, lsop, done);
1140 /* Validation Error indexes into the string table below */
1144 VERR_LSDESC_RQST = 2,
1145 VERR_LSDESC_RQST_LEN = 3,
1147 VERR_ASSOC_ID_LEN = 5,
1149 VERR_CONN_ID_LEN = 7,
1151 VERR_CR_ASSOC_ACC_LEN = 9,
1153 VERR_CR_CONN_ACC_LEN = 11,
1155 VERR_DISCONN_ACC_LEN = 13,
1158 static char *validation_errors[] = {
1162 "Bad LSDESC_RQST Length",
1163 "Not Association ID",
1164 "Bad Association ID Length",
1165 "Not Connection ID",
1166 "Bad Connection ID Length",
1167 "Not CR_ASSOC Rqst",
1168 "Bad CR_ASSOC ACC Length",
1170 "Bad CR_CONN ACC Length",
1171 "Not Disconnect Rqst",
1172 "Bad Disconnect ACC Length",
1176 nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
1177 struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
1179 struct nvmefc_ls_req_op *lsop;
1180 struct nvmefc_ls_req *lsreq;
1181 struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
1182 struct fcnvme_ls_cr_assoc_acc *assoc_acc;
1185 lsop = kzalloc((sizeof(*lsop) +
1186 ctrl->lport->ops->lsrqst_priv_sz +
1187 sizeof(*assoc_rqst) + sizeof(*assoc_acc)), GFP_KERNEL);
1192 lsreq = &lsop->ls_req;
1194 lsreq->private = (void *)&lsop[1];
1195 assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)
1196 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1197 assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
1199 assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
1200 assoc_rqst->desc_list_len =
1201 cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1203 assoc_rqst->assoc_cmd.desc_tag =
1204 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
1205 assoc_rqst->assoc_cmd.desc_len =
1207 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1209 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1210 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1);
1211 /* Linux supports only Dynamic controllers */
1212 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
1213 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
1214 strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
1215 min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
1216 strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
1217 min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
1219 lsop->queue = queue;
1220 lsreq->rqstaddr = assoc_rqst;
1221 lsreq->rqstlen = sizeof(*assoc_rqst);
1222 lsreq->rspaddr = assoc_acc;
1223 lsreq->rsplen = sizeof(*assoc_acc);
1224 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1226 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1228 goto out_free_buffer;
1230 /* process connect LS completion */
1232 /* validate the ACC response */
1233 if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1235 else if (assoc_acc->hdr.desc_list_len !=
1237 sizeof(struct fcnvme_ls_cr_assoc_acc)))
1238 fcret = VERR_CR_ASSOC_ACC_LEN;
1239 else if (assoc_acc->hdr.rqst.desc_tag !=
1240 cpu_to_be32(FCNVME_LSDESC_RQST))
1241 fcret = VERR_LSDESC_RQST;
1242 else if (assoc_acc->hdr.rqst.desc_len !=
1243 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1244 fcret = VERR_LSDESC_RQST_LEN;
1245 else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
1246 fcret = VERR_CR_ASSOC;
1247 else if (assoc_acc->associd.desc_tag !=
1248 cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1249 fcret = VERR_ASSOC_ID;
1250 else if (assoc_acc->associd.desc_len !=
1252 sizeof(struct fcnvme_lsdesc_assoc_id)))
1253 fcret = VERR_ASSOC_ID_LEN;
1254 else if (assoc_acc->connectid.desc_tag !=
1255 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1256 fcret = VERR_CONN_ID;
1257 else if (assoc_acc->connectid.desc_len !=
1258 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1259 fcret = VERR_CONN_ID_LEN;
1264 "q %d connect failed: %s\n",
1265 queue->qnum, validation_errors[fcret]);
1267 ctrl->association_id =
1268 be64_to_cpu(assoc_acc->associd.association_id);
1269 queue->connection_id =
1270 be64_to_cpu(assoc_acc->connectid.connection_id);
1271 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1279 "queue %d connect admin queue failed (%d).\n",
1285 nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1286 u16 qsize, u16 ersp_ratio)
1288 struct nvmefc_ls_req_op *lsop;
1289 struct nvmefc_ls_req *lsreq;
1290 struct fcnvme_ls_cr_conn_rqst *conn_rqst;
1291 struct fcnvme_ls_cr_conn_acc *conn_acc;
1294 lsop = kzalloc((sizeof(*lsop) +
1295 ctrl->lport->ops->lsrqst_priv_sz +
1296 sizeof(*conn_rqst) + sizeof(*conn_acc)), GFP_KERNEL);
1301 lsreq = &lsop->ls_req;
1303 lsreq->private = (void *)&lsop[1];
1304 conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)
1305 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1306 conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
1308 conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
1309 conn_rqst->desc_list_len = cpu_to_be32(
1310 sizeof(struct fcnvme_lsdesc_assoc_id) +
1311 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1313 conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1314 conn_rqst->associd.desc_len =
1316 sizeof(struct fcnvme_lsdesc_assoc_id));
1317 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1318 conn_rqst->connect_cmd.desc_tag =
1319 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
1320 conn_rqst->connect_cmd.desc_len =
1322 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1323 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1324 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
1325 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1);
1327 lsop->queue = queue;
1328 lsreq->rqstaddr = conn_rqst;
1329 lsreq->rqstlen = sizeof(*conn_rqst);
1330 lsreq->rspaddr = conn_acc;
1331 lsreq->rsplen = sizeof(*conn_acc);
1332 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1334 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1336 goto out_free_buffer;
1338 /* process connect LS completion */
1340 /* validate the ACC response */
1341 if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1343 else if (conn_acc->hdr.desc_list_len !=
1344 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
1345 fcret = VERR_CR_CONN_ACC_LEN;
1346 else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
1347 fcret = VERR_LSDESC_RQST;
1348 else if (conn_acc->hdr.rqst.desc_len !=
1349 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1350 fcret = VERR_LSDESC_RQST_LEN;
1351 else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
1352 fcret = VERR_CR_CONN;
1353 else if (conn_acc->connectid.desc_tag !=
1354 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1355 fcret = VERR_CONN_ID;
1356 else if (conn_acc->connectid.desc_len !=
1357 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1358 fcret = VERR_CONN_ID_LEN;
1363 "q %d connect failed: %s\n",
1364 queue->qnum, validation_errors[fcret]);
1366 queue->connection_id =
1367 be64_to_cpu(conn_acc->connectid.connection_id);
1368 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1376 "queue %d connect command failed (%d).\n",
1382 nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
1384 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1386 __nvme_fc_finish_ls_req(lsop);
1388 /* fc-nvme iniator doesn't care about success or failure of cmd */
1394 * This routine sends a FC-NVME LS to disconnect (aka terminate)
1395 * the FC-NVME Association. Terminating the association also
1396 * terminates the FC-NVME connections (per queue, both admin and io
1397 * queues) that are part of the association. E.g. things are torn
1398 * down, and the related FC-NVME Association ID and Connection IDs
1401 * The behavior of the fc-nvme initiator is such that it's
1402 * understanding of the association and connections will implicitly
1403 * be torn down. The action is implicit as it may be due to a loss of
1404 * connectivity with the fc-nvme target, so you may never get a
1405 * response even if you tried. As such, the action of this routine
1406 * is to asynchronously send the LS, ignore any results of the LS, and
1407 * continue on with terminating the association. If the fc-nvme target
1408 * is present and receives the LS, it too can tear down.
1411 nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1413 struct fcnvme_ls_disconnect_rqst *discon_rqst;
1414 struct fcnvme_ls_disconnect_acc *discon_acc;
1415 struct nvmefc_ls_req_op *lsop;
1416 struct nvmefc_ls_req *lsreq;
1419 lsop = kzalloc((sizeof(*lsop) +
1420 ctrl->lport->ops->lsrqst_priv_sz +
1421 sizeof(*discon_rqst) + sizeof(*discon_acc)),
1424 /* couldn't sent it... too bad */
1427 lsreq = &lsop->ls_req;
1429 lsreq->private = (void *)&lsop[1];
1430 discon_rqst = (struct fcnvme_ls_disconnect_rqst *)
1431 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1432 discon_acc = (struct fcnvme_ls_disconnect_acc *)&discon_rqst[1];
1434 discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT;
1435 discon_rqst->desc_list_len = cpu_to_be32(
1436 sizeof(struct fcnvme_lsdesc_assoc_id) +
1437 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1439 discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1440 discon_rqst->associd.desc_len =
1442 sizeof(struct fcnvme_lsdesc_assoc_id));
1444 discon_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1446 discon_rqst->discon_cmd.desc_tag = cpu_to_be32(
1447 FCNVME_LSDESC_DISCONN_CMD);
1448 discon_rqst->discon_cmd.desc_len =
1450 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1451 discon_rqst->discon_cmd.scope = FCNVME_DISCONN_ASSOCIATION;
1452 discon_rqst->discon_cmd.id = cpu_to_be64(ctrl->association_id);
1454 lsreq->rqstaddr = discon_rqst;
1455 lsreq->rqstlen = sizeof(*discon_rqst);
1456 lsreq->rspaddr = discon_acc;
1457 lsreq->rsplen = sizeof(*discon_acc);
1458 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1460 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
1461 nvme_fc_disconnect_assoc_done);
1465 /* only meaningful part to terminating the association */
1466 ctrl->association_id = 0;
1470 /* *********************** NVME Ctrl Routines **************************** */
1472 static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
1475 __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
1476 struct nvme_fc_fcp_op *op)
1478 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
1479 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1480 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
1481 sizeof(op->cmd_iu), DMA_TO_DEVICE);
1483 atomic_set(&op->state, FCPOP_STATE_UNINIT);
1487 nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
1488 unsigned int hctx_idx)
1490 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1492 return __nvme_fc_exit_request(set->driver_data, op);
1496 __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
1498 unsigned long flags;
1501 spin_lock_irqsave(&ctrl->lock, flags);
1502 opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
1503 if (opstate != FCPOP_STATE_ACTIVE)
1504 atomic_set(&op->state, opstate);
1505 else if (ctrl->flags & FCCTRL_TERMIO)
1507 spin_unlock_irqrestore(&ctrl->lock, flags);
1509 if (opstate != FCPOP_STATE_ACTIVE)
1512 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1513 &ctrl->rport->remoteport,
1514 op->queue->lldd_handle,
1521 nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
1523 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
1526 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
1527 __nvme_fc_abort_op(ctrl, aen_op);
1531 __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
1532 struct nvme_fc_fcp_op *op, int opstate)
1534 unsigned long flags;
1536 if (opstate == FCPOP_STATE_ABORTED) {
1537 spin_lock_irqsave(&ctrl->lock, flags);
1538 if (ctrl->flags & FCCTRL_TERMIO) {
1540 wake_up(&ctrl->ioabort_wait);
1542 spin_unlock_irqrestore(&ctrl->lock, flags);
1547 nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1549 struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
1550 struct request *rq = op->rq;
1551 struct nvmefc_fcp_req *freq = &op->fcp_req;
1552 struct nvme_fc_ctrl *ctrl = op->ctrl;
1553 struct nvme_fc_queue *queue = op->queue;
1554 struct nvme_completion *cqe = &op->rsp_iu.cqe;
1555 struct nvme_command *sqe = &op->cmd_iu.sqe;
1556 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
1557 union nvme_result result;
1558 bool terminate_assoc = true;
1563 * The current linux implementation of a nvme controller
1564 * allocates a single tag set for all io queues and sizes
1565 * the io queues to fully hold all possible tags. Thus, the
1566 * implementation does not reference or care about the sqhd
1567 * value as it never needs to use the sqhd/sqtail pointers
1568 * for submission pacing.
1570 * This affects the FC-NVME implementation in two ways:
1571 * 1) As the value doesn't matter, we don't need to waste
1572 * cycles extracting it from ERSPs and stamping it in the
1573 * cases where the transport fabricates CQEs on successful
1575 * 2) The FC-NVME implementation requires that delivery of
1576 * ERSP completions are to go back to the nvme layer in order
1577 * relative to the rsn, such that the sqhd value will always
1578 * be "in order" for the nvme layer. As the nvme layer in
1579 * linux doesn't care about sqhd, there's no need to return
1583 * As the core nvme layer in linux currently does not look at
1584 * every field in the cqe - in cases where the FC transport must
1585 * fabricate a CQE, the following fields will not be set as they
1586 * are not referenced:
1587 * cqe.sqid, cqe.sqhd, cqe.command_id
1589 * Failure or error of an individual i/o, in a transport
1590 * detected fashion unrelated to the nvme completion status,
1591 * potentially cause the initiator and target sides to get out
1592 * of sync on SQ head/tail (aka outstanding io count allowed).
1593 * Per FC-NVME spec, failure of an individual command requires
1594 * the connection to be terminated, which in turn requires the
1595 * association to be terminated.
1598 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
1600 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
1601 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1603 if (opstate == FCPOP_STATE_ABORTED)
1604 status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
1605 else if (freq->status)
1606 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1609 * For the linux implementation, if we have an unsuccesful
1610 * status, they blk-mq layer can typically be called with the
1611 * non-zero status and the content of the cqe isn't important.
1617 * command completed successfully relative to the wire
1618 * protocol. However, validate anything received and
1619 * extract the status and result from the cqe (create it
1623 switch (freq->rcv_rsplen) {
1626 case NVME_FC_SIZEOF_ZEROS_RSP:
1628 * No response payload or 12 bytes of payload (which
1629 * should all be zeros) are considered successful and
1630 * no payload in the CQE by the transport.
1632 if (freq->transferred_length !=
1633 be32_to_cpu(op->cmd_iu.data_len)) {
1634 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1640 case sizeof(struct nvme_fc_ersp_iu):
1642 * The ERSP IU contains a full completion with CQE.
1643 * Validate ERSP IU and look at cqe.
1645 if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
1646 (freq->rcv_rsplen / 4) ||
1647 be32_to_cpu(op->rsp_iu.xfrd_len) !=
1648 freq->transferred_length ||
1649 op->rsp_iu.status_code ||
1650 sqe->common.command_id != cqe->command_id)) {
1651 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1654 result = cqe->result;
1655 status = cqe->status;
1659 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1663 terminate_assoc = false;
1666 if (op->flags & FCOP_FLAGS_AEN) {
1667 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
1668 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
1669 atomic_set(&op->state, FCPOP_STATE_IDLE);
1670 op->flags = FCOP_FLAGS_AEN; /* clear other flags */
1671 nvme_fc_ctrl_put(ctrl);
1675 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
1676 nvme_end_request(rq, status, result);
1679 if (terminate_assoc)
1680 nvme_fc_error_recovery(ctrl, "transport detected io error");
1684 __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
1685 struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
1686 struct request *rq, u32 rqno)
1688 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1691 memset(op, 0, sizeof(*op));
1692 op->fcp_req.cmdaddr = &op->cmd_iu;
1693 op->fcp_req.cmdlen = sizeof(op->cmd_iu);
1694 op->fcp_req.rspaddr = &op->rsp_iu;
1695 op->fcp_req.rsplen = sizeof(op->rsp_iu);
1696 op->fcp_req.done = nvme_fc_fcpio_done;
1697 op->fcp_req.first_sgl = (struct scatterlist *)&op[1];
1698 op->fcp_req.private = &op->fcp_req.first_sgl[SG_CHUNK_SIZE];
1704 cmdiu->scsi_id = NVME_CMD_SCSI_ID;
1705 cmdiu->fc_id = NVME_CMD_FC_ID;
1706 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
1708 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
1709 &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
1710 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
1712 "FCP Op failed - cmdiu dma mapping failed.\n");
1717 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
1718 &op->rsp_iu, sizeof(op->rsp_iu),
1720 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
1722 "FCP Op failed - rspiu dma mapping failed.\n");
1726 atomic_set(&op->state, FCPOP_STATE_IDLE);
1732 nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
1733 unsigned int hctx_idx, unsigned int numa_node)
1735 struct nvme_fc_ctrl *ctrl = set->driver_data;
1736 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1737 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
1738 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
1740 return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
1744 nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
1746 struct nvme_fc_fcp_op *aen_op;
1747 struct nvme_fc_cmd_iu *cmdiu;
1748 struct nvme_command *sqe;
1752 aen_op = ctrl->aen_ops;
1753 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
1754 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
1759 cmdiu = &aen_op->cmd_iu;
1761 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
1762 aen_op, (struct request *)NULL,
1763 (NVME_AQ_BLK_MQ_DEPTH + i));
1769 aen_op->flags = FCOP_FLAGS_AEN;
1770 aen_op->fcp_req.first_sgl = NULL; /* no sg list */
1771 aen_op->fcp_req.private = private;
1773 memset(sqe, 0, sizeof(*sqe));
1774 sqe->common.opcode = nvme_admin_async_event;
1775 /* Note: core layer may overwrite the sqe.command_id value */
1776 sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i;
1782 nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
1784 struct nvme_fc_fcp_op *aen_op;
1787 aen_op = ctrl->aen_ops;
1788 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
1789 if (!aen_op->fcp_req.private)
1792 __nvme_fc_exit_request(ctrl, aen_op);
1794 kfree(aen_op->fcp_req.private);
1795 aen_op->fcp_req.private = NULL;
1800 __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
1803 struct nvme_fc_queue *queue = &ctrl->queues[qidx];
1805 hctx->driver_data = queue;
1810 nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1811 unsigned int hctx_idx)
1813 struct nvme_fc_ctrl *ctrl = data;
1815 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
1821 nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1822 unsigned int hctx_idx)
1824 struct nvme_fc_ctrl *ctrl = data;
1826 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
1832 nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
1834 struct nvme_fc_queue *queue;
1836 queue = &ctrl->queues[idx];
1837 memset(queue, 0, sizeof(*queue));
1840 atomic_set(&queue->csn, 1);
1841 queue->dev = ctrl->dev;
1844 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
1846 queue->cmnd_capsule_len = sizeof(struct nvme_command);
1849 * Considered whether we should allocate buffers for all SQEs
1850 * and CQEs and dma map them - mapping their respective entries
1851 * into the request structures (kernel vm addr and dma address)
1852 * thus the driver could use the buffers/mappings directly.
1853 * It only makes sense if the LLDD would use them for its
1854 * messaging api. It's very unlikely most adapter api's would use
1855 * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload
1856 * structures were used instead.
1861 * This routine terminates a queue at the transport level.
1862 * The transport has already ensured that all outstanding ios on
1863 * the queue have been terminated.
1864 * The transport will send a Disconnect LS request to terminate
1865 * the queue's connection. Termination of the admin queue will also
1866 * terminate the association at the target.
1869 nvme_fc_free_queue(struct nvme_fc_queue *queue)
1871 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
1874 clear_bit(NVME_FC_Q_LIVE, &queue->flags);
1876 * Current implementation never disconnects a single queue.
1877 * It always terminates a whole association. So there is never
1878 * a disconnect(queue) LS sent to the target.
1881 queue->connection_id = 0;
1882 atomic_set(&queue->csn, 1);
1886 __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
1887 struct nvme_fc_queue *queue, unsigned int qidx)
1889 if (ctrl->lport->ops->delete_queue)
1890 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
1891 queue->lldd_handle);
1892 queue->lldd_handle = NULL;
1896 nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
1900 for (i = 1; i < ctrl->ctrl.queue_count; i++)
1901 nvme_fc_free_queue(&ctrl->queues[i]);
1905 __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
1906 struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
1910 queue->lldd_handle = NULL;
1911 if (ctrl->lport->ops->create_queue)
1912 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
1913 qidx, qsize, &queue->lldd_handle);
1919 nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
1921 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1];
1924 for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--)
1925 __nvme_fc_delete_hw_queue(ctrl, queue, i);
1929 nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1931 struct nvme_fc_queue *queue = &ctrl->queues[1];
1934 for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) {
1935 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
1944 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
1949 nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1953 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
1954 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
1958 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
1962 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
1969 nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
1973 for (i = 1; i < ctrl->ctrl.queue_count; i++)
1974 nvme_fc_init_queue(ctrl, i);
1978 nvme_fc_ctrl_free(struct kref *ref)
1980 struct nvme_fc_ctrl *ctrl =
1981 container_of(ref, struct nvme_fc_ctrl, ref);
1982 unsigned long flags;
1984 if (ctrl->ctrl.tagset) {
1985 blk_cleanup_queue(ctrl->ctrl.connect_q);
1986 blk_mq_free_tag_set(&ctrl->tag_set);
1989 /* remove from rport list */
1990 spin_lock_irqsave(&ctrl->rport->lock, flags);
1991 list_del(&ctrl->ctrl_list);
1992 spin_unlock_irqrestore(&ctrl->rport->lock, flags);
1994 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
1995 blk_cleanup_queue(ctrl->ctrl.admin_q);
1996 blk_mq_free_tag_set(&ctrl->admin_tag_set);
1998 kfree(ctrl->queues);
2000 put_device(ctrl->dev);
2001 nvme_fc_rport_put(ctrl->rport);
2003 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
2004 if (ctrl->ctrl.opts)
2005 nvmf_free_options(ctrl->ctrl.opts);
2010 nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
2012 kref_put(&ctrl->ref, nvme_fc_ctrl_free);
2016 nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
2018 return kref_get_unless_zero(&ctrl->ref);
2022 * All accesses from nvme core layer done - can now free the
2023 * controller. Called after last nvme_put_ctrl() call
2026 nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
2028 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2030 WARN_ON(nctrl != &ctrl->ctrl);
2032 nvme_fc_ctrl_put(ctrl);
2036 nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
2038 /* only proceed if in LIVE state - e.g. on first error */
2039 if (ctrl->ctrl.state != NVME_CTRL_LIVE)
2042 dev_warn(ctrl->ctrl.device,
2043 "NVME-FC{%d}: transport association error detected: %s\n",
2044 ctrl->cnum, errmsg);
2045 dev_warn(ctrl->ctrl.device,
2046 "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
2048 nvme_reset_ctrl(&ctrl->ctrl);
2051 static enum blk_eh_timer_return
2052 nvme_fc_timeout(struct request *rq, bool reserved)
2054 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2055 struct nvme_fc_ctrl *ctrl = op->ctrl;
2058 * we can't individually ABTS an io without affecting the queue,
2059 * thus killing the queue, and thus the association.
2060 * So resolve by performing a controller reset, which will stop
2061 * the host/io stack, terminate the association on the link,
2062 * and recreate an association on the link.
2064 nvme_fc_error_recovery(ctrl, "io timeout error");
2067 * the io abort has been initiated. Have the reset timer
2068 * restarted and the abort completion will complete the io
2069 * shortly. Avoids a synchronous wait while the abort finishes.
2071 return BLK_EH_RESET_TIMER;
2075 nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2076 struct nvme_fc_fcp_op *op)
2078 struct nvmefc_fcp_req *freq = &op->fcp_req;
2079 enum dma_data_direction dir;
2084 if (!blk_rq_payload_bytes(rq))
2087 freq->sg_table.sgl = freq->first_sgl;
2088 ret = sg_alloc_table_chained(&freq->sg_table,
2089 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl);
2093 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
2094 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
2095 dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
2096 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
2098 if (unlikely(freq->sg_cnt <= 0)) {
2099 sg_free_table_chained(&freq->sg_table, true);
2105 * TODO: blk_integrity_rq(rq) for DIF
2111 nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2112 struct nvme_fc_fcp_op *op)
2114 struct nvmefc_fcp_req *freq = &op->fcp_req;
2119 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
2120 ((rq_data_dir(rq) == WRITE) ?
2121 DMA_TO_DEVICE : DMA_FROM_DEVICE));
2123 nvme_cleanup_cmd(rq);
2125 sg_free_table_chained(&freq->sg_table, true);
2131 * In FC, the queue is a logical thing. At transport connect, the target
2132 * creates its "queue" and returns a handle that is to be given to the
2133 * target whenever it posts something to the corresponding SQ. When an
2134 * SQE is sent on a SQ, FC effectively considers the SQE, or rather the
2135 * command contained within the SQE, an io, and assigns a FC exchange
2136 * to it. The SQE and the associated SQ handle are sent in the initial
2137 * CMD IU sents on the exchange. All transfers relative to the io occur
2138 * as part of the exchange. The CQE is the last thing for the io,
2139 * which is transferred (explicitly or implicitly) with the RSP IU
2140 * sent on the exchange. After the CQE is received, the FC exchange is
2141 * terminaed and the Exchange may be used on a different io.
2143 * The transport to LLDD api has the transport making a request for a
2144 * new fcp io request to the LLDD. The LLDD then allocates a FC exchange
2145 * resource and transfers the command. The LLDD will then process all
2146 * steps to complete the io. Upon completion, the transport done routine
2149 * So - while the operation is outstanding to the LLDD, there is a link
2150 * level FC exchange resource that is also outstanding. This must be
2151 * considered in all cleanup operations.
2154 nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
2155 struct nvme_fc_fcp_op *op, u32 data_len,
2156 enum nvmefc_fcp_datadir io_dir)
2158 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2159 struct nvme_command *sqe = &cmdiu->sqe;
2164 * before attempting to send the io, check to see if we believe
2165 * the target device is present
2167 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
2168 return BLK_STS_RESOURCE;
2170 if (!nvme_fc_ctrl_get(ctrl))
2171 return BLK_STS_IOERR;
2173 /* format the FC-NVME CMD IU and fcp_req */
2174 cmdiu->connection_id = cpu_to_be64(queue->connection_id);
2175 csn = atomic_inc_return(&queue->csn);
2176 cmdiu->csn = cpu_to_be32(csn);
2177 cmdiu->data_len = cpu_to_be32(data_len);
2179 case NVMEFC_FCP_WRITE:
2180 cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
2182 case NVMEFC_FCP_READ:
2183 cmdiu->flags = FCNVME_CMD_FLAGS_READ;
2185 case NVMEFC_FCP_NODATA:
2189 op->fcp_req.payload_length = data_len;
2190 op->fcp_req.io_dir = io_dir;
2191 op->fcp_req.transferred_length = 0;
2192 op->fcp_req.rcv_rsplen = 0;
2193 op->fcp_req.status = NVME_SC_SUCCESS;
2194 op->fcp_req.sqid = cpu_to_le16(queue->qnum);
2197 * validate per fabric rules, set fields mandated by fabric spec
2198 * as well as those by FC-NVME spec.
2200 WARN_ON_ONCE(sqe->common.metadata);
2201 sqe->common.flags |= NVME_CMD_SGL_METABUF;
2204 * format SQE DPTR field per FC-NVME rules:
2205 * type=0x5 Transport SGL Data Block Descriptor
2206 * subtype=0xA Transport-specific value
2208 * length=length of the data series
2210 sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2211 NVME_SGL_FMT_TRANSPORT_A;
2212 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
2213 sqe->rw.dptr.sgl.addr = 0;
2215 if (!(op->flags & FCOP_FLAGS_AEN)) {
2216 ret = nvme_fc_map_data(ctrl, op->rq, op);
2218 nvme_cleanup_cmd(op->rq);
2219 nvme_fc_ctrl_put(ctrl);
2220 if (ret == -ENOMEM || ret == -EAGAIN)
2221 return BLK_STS_RESOURCE;
2222 return BLK_STS_IOERR;
2226 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
2227 sizeof(op->cmd_iu), DMA_TO_DEVICE);
2229 atomic_set(&op->state, FCPOP_STATE_ACTIVE);
2231 if (!(op->flags & FCOP_FLAGS_AEN))
2232 blk_mq_start_request(op->rq);
2234 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
2235 &ctrl->rport->remoteport,
2236 queue->lldd_handle, &op->fcp_req);
2239 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
2240 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2242 if (!(op->flags & FCOP_FLAGS_AEN))
2243 nvme_fc_unmap_data(ctrl, op->rq, op);
2245 nvme_fc_ctrl_put(ctrl);
2247 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
2249 return BLK_STS_IOERR;
2251 return BLK_STS_RESOURCE;
2258 nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
2259 const struct blk_mq_queue_data *bd)
2261 struct nvme_ns *ns = hctx->queue->queuedata;
2262 struct nvme_fc_queue *queue = hctx->driver_data;
2263 struct nvme_fc_ctrl *ctrl = queue->ctrl;
2264 struct request *rq = bd->rq;
2265 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2266 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2267 struct nvme_command *sqe = &cmdiu->sqe;
2268 enum nvmefc_fcp_datadir io_dir;
2269 bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags);
2273 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
2274 !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2275 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2277 ret = nvme_setup_cmd(ns, rq, sqe);
2281 data_len = blk_rq_payload_bytes(rq);
2283 io_dir = ((rq_data_dir(rq) == WRITE) ?
2284 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
2286 io_dir = NVMEFC_FCP_NODATA;
2288 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
2291 static struct blk_mq_tags *
2292 nvme_fc_tagset(struct nvme_fc_queue *queue)
2294 if (queue->qnum == 0)
2295 return queue->ctrl->admin_tag_set.tags[queue->qnum];
2297 return queue->ctrl->tag_set.tags[queue->qnum - 1];
2301 nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
2304 struct nvme_fc_queue *queue = hctx->driver_data;
2305 struct nvme_fc_ctrl *ctrl = queue->ctrl;
2306 struct request *req;
2307 struct nvme_fc_fcp_op *op;
2309 req = blk_mq_tag_to_rq(nvme_fc_tagset(queue), tag);
2313 op = blk_mq_rq_to_pdu(req);
2315 if ((atomic_read(&op->state) == FCPOP_STATE_ACTIVE) &&
2316 (ctrl->lport->ops->poll_queue))
2317 ctrl->lport->ops->poll_queue(&ctrl->lport->localport,
2318 queue->lldd_handle);
2320 return ((atomic_read(&op->state) != FCPOP_STATE_ACTIVE));
2324 nvme_fc_submit_async_event(struct nvme_ctrl *arg)
2326 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
2327 struct nvme_fc_fcp_op *aen_op;
2328 unsigned long flags;
2329 bool terminating = false;
2332 spin_lock_irqsave(&ctrl->lock, flags);
2333 if (ctrl->flags & FCCTRL_TERMIO)
2335 spin_unlock_irqrestore(&ctrl->lock, flags);
2340 aen_op = &ctrl->aen_ops[0];
2342 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
2345 dev_err(ctrl->ctrl.device,
2346 "failed async event work\n");
2350 nvme_fc_complete_rq(struct request *rq)
2352 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2353 struct nvme_fc_ctrl *ctrl = op->ctrl;
2355 atomic_set(&op->state, FCPOP_STATE_IDLE);
2357 nvme_fc_unmap_data(ctrl, rq, op);
2358 nvme_complete_rq(rq);
2359 nvme_fc_ctrl_put(ctrl);
2363 * This routine is used by the transport when it needs to find active
2364 * io on a queue that is to be terminated. The transport uses
2365 * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
2366 * this routine to kill them on a 1 by 1 basis.
2368 * As FC allocates FC exchange for each io, the transport must contact
2369 * the LLDD to terminate the exchange, thus releasing the FC exchange.
2370 * After terminating the exchange the LLDD will call the transport's
2371 * normal io done path for the request, but it will have an aborted
2372 * status. The done path will return the io request back to the block
2373 * layer with an error status.
2376 nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
2378 struct nvme_ctrl *nctrl = data;
2379 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2380 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
2382 __nvme_fc_abort_op(ctrl, op);
2386 static const struct blk_mq_ops nvme_fc_mq_ops = {
2387 .queue_rq = nvme_fc_queue_rq,
2388 .complete = nvme_fc_complete_rq,
2389 .init_request = nvme_fc_init_request,
2390 .exit_request = nvme_fc_exit_request,
2391 .init_hctx = nvme_fc_init_hctx,
2392 .poll = nvme_fc_poll,
2393 .timeout = nvme_fc_timeout,
2397 nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2399 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2400 unsigned int nr_io_queues;
2403 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2404 ctrl->lport->ops->max_hw_queues);
2405 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2407 dev_info(ctrl->ctrl.device,
2408 "set_queue_count failed: %d\n", ret);
2412 ctrl->ctrl.queue_count = nr_io_queues + 1;
2416 nvme_fc_init_io_queues(ctrl);
2418 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
2419 ctrl->tag_set.ops = &nvme_fc_mq_ops;
2420 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
2421 ctrl->tag_set.reserved_tags = 1; /* fabric connect */
2422 ctrl->tag_set.numa_node = NUMA_NO_NODE;
2423 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
2424 ctrl->tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
2426 sizeof(struct scatterlist)) +
2427 ctrl->lport->ops->fcprqst_priv_sz;
2428 ctrl->tag_set.driver_data = ctrl;
2429 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
2430 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
2432 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
2436 ctrl->ctrl.tagset = &ctrl->tag_set;
2438 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
2439 if (IS_ERR(ctrl->ctrl.connect_q)) {
2440 ret = PTR_ERR(ctrl->ctrl.connect_q);
2441 goto out_free_tag_set;
2444 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2446 goto out_cleanup_blk_queue;
2448 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2450 goto out_delete_hw_queues;
2452 ctrl->ioq_live = true;
2456 out_delete_hw_queues:
2457 nvme_fc_delete_hw_io_queues(ctrl);
2458 out_cleanup_blk_queue:
2459 blk_cleanup_queue(ctrl->ctrl.connect_q);
2461 blk_mq_free_tag_set(&ctrl->tag_set);
2462 nvme_fc_free_io_queues(ctrl);
2464 /* force put free routine to ignore io queues */
2465 ctrl->ctrl.tagset = NULL;
2471 nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
2473 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2474 unsigned int nr_io_queues;
2477 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2478 ctrl->lport->ops->max_hw_queues);
2479 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2481 dev_info(ctrl->ctrl.device,
2482 "set_queue_count failed: %d\n", ret);
2486 ctrl->ctrl.queue_count = nr_io_queues + 1;
2487 /* check for io queues existing */
2488 if (ctrl->ctrl.queue_count == 1)
2491 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2493 goto out_free_io_queues;
2495 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2497 goto out_delete_hw_queues;
2499 blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
2503 out_delete_hw_queues:
2504 nvme_fc_delete_hw_io_queues(ctrl);
2506 nvme_fc_free_io_queues(ctrl);
2511 nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport)
2513 struct nvme_fc_lport *lport = rport->lport;
2515 atomic_inc(&lport->act_rport_cnt);
2519 nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport)
2521 struct nvme_fc_lport *lport = rport->lport;
2524 cnt = atomic_dec_return(&lport->act_rport_cnt);
2525 if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED)
2526 lport->ops->localport_delete(&lport->localport);
2530 nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl)
2532 struct nvme_fc_rport *rport = ctrl->rport;
2535 if (ctrl->assoc_active)
2538 ctrl->assoc_active = true;
2539 cnt = atomic_inc_return(&rport->act_ctrl_cnt);
2541 nvme_fc_rport_active_on_lport(rport);
2547 nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl)
2549 struct nvme_fc_rport *rport = ctrl->rport;
2550 struct nvme_fc_lport *lport = rport->lport;
2553 /* ctrl->assoc_active=false will be set independently */
2555 cnt = atomic_dec_return(&rport->act_ctrl_cnt);
2557 if (rport->remoteport.port_state == FC_OBJSTATE_DELETED)
2558 lport->ops->remoteport_delete(&rport->remoteport);
2559 nvme_fc_rport_inactive_on_lport(rport);
2566 * This routine restarts the controller on the host side, and
2567 * on the link side, recreates the controller association.
2570 nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
2572 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2576 ++ctrl->ctrl.nr_reconnects;
2578 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
2581 if (nvme_fc_ctlr_active_on_rport(ctrl))
2585 * Create the admin queue
2588 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
2591 goto out_free_queue;
2593 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
2594 NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4));
2596 goto out_delete_hw_queue;
2598 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2600 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
2602 goto out_disconnect_admin_queue;
2604 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
2607 * Check controller capabilities
2609 * todo:- add code to check if ctrl attributes changed from
2610 * prior connection values
2613 ret = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
2615 dev_err(ctrl->ctrl.device,
2616 "prop_get NVME_REG_CAP failed\n");
2617 goto out_disconnect_admin_queue;
2621 min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
2623 ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
2625 goto out_disconnect_admin_queue;
2627 ctrl->ctrl.max_hw_sectors =
2628 (ctrl->lport->ops->max_sgl_segments - 1) << (PAGE_SHIFT - 9);
2630 ret = nvme_init_identify(&ctrl->ctrl);
2632 goto out_disconnect_admin_queue;
2636 /* FC-NVME does not have other data in the capsule */
2637 if (ctrl->ctrl.icdoff) {
2638 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
2640 goto out_disconnect_admin_queue;
2643 /* FC-NVME supports normal SGL Data Block Descriptors */
2645 if (opts->queue_size > ctrl->ctrl.maxcmd) {
2646 /* warn if maxcmd is lower than queue_size */
2647 dev_warn(ctrl->ctrl.device,
2648 "queue_size %zu > ctrl maxcmd %u, reducing "
2650 opts->queue_size, ctrl->ctrl.maxcmd);
2651 opts->queue_size = ctrl->ctrl.maxcmd;
2654 if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
2655 /* warn if sqsize is lower than queue_size */
2656 dev_warn(ctrl->ctrl.device,
2657 "queue_size %zu > ctrl sqsize %u, clamping down\n",
2658 opts->queue_size, ctrl->ctrl.sqsize + 1);
2659 opts->queue_size = ctrl->ctrl.sqsize + 1;
2662 ret = nvme_fc_init_aen_ops(ctrl);
2664 goto out_term_aen_ops;
2667 * Create the io queues
2670 if (ctrl->ctrl.queue_count > 1) {
2671 if (!ctrl->ioq_live)
2672 ret = nvme_fc_create_io_queues(ctrl);
2674 ret = nvme_fc_recreate_io_queues(ctrl);
2676 goto out_term_aen_ops;
2679 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
2681 ctrl->ctrl.nr_reconnects = 0;
2684 nvme_start_ctrl(&ctrl->ctrl);
2686 return 0; /* Success */
2689 nvme_fc_term_aen_ops(ctrl);
2690 out_disconnect_admin_queue:
2691 /* send a Disconnect(association) LS to fc-nvme target */
2692 nvme_fc_xmt_disconnect_assoc(ctrl);
2693 out_delete_hw_queue:
2694 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2696 nvme_fc_free_queue(&ctrl->queues[0]);
2697 ctrl->assoc_active = false;
2698 nvme_fc_ctlr_inactive_on_rport(ctrl);
2704 * This routine stops operation of the controller on the host side.
2705 * On the host os stack side: Admin and IO queues are stopped,
2706 * outstanding ios on them terminated via FC ABTS.
2707 * On the link side: the association is terminated.
2710 nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
2712 unsigned long flags;
2714 if (!ctrl->assoc_active)
2716 ctrl->assoc_active = false;
2718 spin_lock_irqsave(&ctrl->lock, flags);
2719 ctrl->flags |= FCCTRL_TERMIO;
2721 spin_unlock_irqrestore(&ctrl->lock, flags);
2724 * If io queues are present, stop them and terminate all outstanding
2725 * ios on them. As FC allocates FC exchange for each io, the
2726 * transport must contact the LLDD to terminate the exchange,
2727 * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
2728 * to tell us what io's are busy and invoke a transport routine
2729 * to kill them with the LLDD. After terminating the exchange
2730 * the LLDD will call the transport's normal io done path, but it
2731 * will have an aborted status. The done path will return the
2732 * io requests back to the block layer as part of normal completions
2733 * (but with error status).
2735 if (ctrl->ctrl.queue_count > 1) {
2736 nvme_stop_queues(&ctrl->ctrl);
2737 blk_mq_tagset_busy_iter(&ctrl->tag_set,
2738 nvme_fc_terminate_exchange, &ctrl->ctrl);
2742 * Other transports, which don't have link-level contexts bound
2743 * to sqe's, would try to gracefully shutdown the controller by
2744 * writing the registers for shutdown and polling (call
2745 * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially
2746 * just aborted and we will wait on those contexts, and given
2747 * there was no indication of how live the controlelr is on the
2748 * link, don't send more io to create more contexts for the
2749 * shutdown. Let the controller fail via keepalive failure if
2750 * its still present.
2754 * clean up the admin queue. Same thing as above.
2755 * use blk_mq_tagset_busy_itr() and the transport routine to
2756 * terminate the exchanges.
2758 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
2759 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
2760 nvme_fc_terminate_exchange, &ctrl->ctrl);
2762 /* kill the aens as they are a separate path */
2763 nvme_fc_abort_aen_ops(ctrl);
2765 /* wait for all io that had to be aborted */
2766 spin_lock_irq(&ctrl->lock);
2767 wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
2768 ctrl->flags &= ~FCCTRL_TERMIO;
2769 spin_unlock_irq(&ctrl->lock);
2771 nvme_fc_term_aen_ops(ctrl);
2774 * send a Disconnect(association) LS to fc-nvme target
2775 * Note: could have been sent at top of process, but
2776 * cleaner on link traffic if after the aborts complete.
2777 * Note: if association doesn't exist, association_id will be 0
2779 if (ctrl->association_id)
2780 nvme_fc_xmt_disconnect_assoc(ctrl);
2782 if (ctrl->ctrl.tagset) {
2783 nvme_fc_delete_hw_io_queues(ctrl);
2784 nvme_fc_free_io_queues(ctrl);
2787 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2788 nvme_fc_free_queue(&ctrl->queues[0]);
2790 /* re-enable the admin_q so anything new can fast fail */
2791 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2793 /* resume the io queues so that things will fast fail */
2794 nvme_start_queues(&ctrl->ctrl);
2796 nvme_fc_ctlr_inactive_on_rport(ctrl);
2800 nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
2802 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2804 cancel_delayed_work_sync(&ctrl->connect_work);
2806 * kill the association on the link side. this will block
2807 * waiting for io to terminate
2809 nvme_fc_delete_association(ctrl);
2813 nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
2815 struct nvme_fc_rport *rport = ctrl->rport;
2816 struct nvme_fc_remote_port *portptr = &rport->remoteport;
2817 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
2820 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
2823 if (portptr->port_state == FC_OBJSTATE_ONLINE)
2824 dev_info(ctrl->ctrl.device,
2825 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
2826 ctrl->cnum, status);
2827 else if (time_after_eq(jiffies, rport->dev_loss_end))
2830 if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
2831 if (portptr->port_state == FC_OBJSTATE_ONLINE)
2832 dev_info(ctrl->ctrl.device,
2833 "NVME-FC{%d}: Reconnect attempt in %ld "
2835 ctrl->cnum, recon_delay / HZ);
2836 else if (time_after(jiffies + recon_delay, rport->dev_loss_end))
2837 recon_delay = rport->dev_loss_end - jiffies;
2839 queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
2841 if (portptr->port_state == FC_OBJSTATE_ONLINE)
2842 dev_warn(ctrl->ctrl.device,
2843 "NVME-FC{%d}: Max reconnect attempts (%d) "
2845 ctrl->cnum, ctrl->ctrl.nr_reconnects);
2847 dev_warn(ctrl->ctrl.device,
2848 "NVME-FC{%d}: dev_loss_tmo (%d) expired "
2849 "while waiting for remoteport connectivity.\n",
2850 ctrl->cnum, portptr->dev_loss_tmo);
2851 WARN_ON(nvme_delete_ctrl(&ctrl->ctrl));
2856 nvme_fc_reset_ctrl_work(struct work_struct *work)
2858 struct nvme_fc_ctrl *ctrl =
2859 container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
2862 nvme_stop_ctrl(&ctrl->ctrl);
2864 /* will block will waiting for io to terminate */
2865 nvme_fc_delete_association(ctrl);
2867 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2868 dev_err(ctrl->ctrl.device,
2869 "NVME-FC{%d}: error_recovery: Couldn't change state "
2870 "to CONNECTING\n", ctrl->cnum);
2874 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE)
2875 ret = nvme_fc_create_association(ctrl);
2880 nvme_fc_reconnect_or_delete(ctrl, ret);
2882 dev_info(ctrl->ctrl.device,
2883 "NVME-FC{%d}: controller reset complete\n",
2887 static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
2889 .module = THIS_MODULE,
2890 .flags = NVME_F_FABRICS,
2891 .reg_read32 = nvmf_reg_read32,
2892 .reg_read64 = nvmf_reg_read64,
2893 .reg_write32 = nvmf_reg_write32,
2894 .free_ctrl = nvme_fc_nvme_ctrl_freed,
2895 .submit_async_event = nvme_fc_submit_async_event,
2896 .delete_ctrl = nvme_fc_delete_ctrl,
2897 .get_address = nvmf_get_address,
2901 nvme_fc_connect_ctrl_work(struct work_struct *work)
2905 struct nvme_fc_ctrl *ctrl =
2906 container_of(to_delayed_work(work),
2907 struct nvme_fc_ctrl, connect_work);
2909 ret = nvme_fc_create_association(ctrl);
2911 nvme_fc_reconnect_or_delete(ctrl, ret);
2913 dev_info(ctrl->ctrl.device,
2914 "NVME-FC{%d}: controller connect complete\n",
2919 static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
2920 .queue_rq = nvme_fc_queue_rq,
2921 .complete = nvme_fc_complete_rq,
2922 .init_request = nvme_fc_init_request,
2923 .exit_request = nvme_fc_exit_request,
2924 .init_hctx = nvme_fc_init_admin_hctx,
2925 .timeout = nvme_fc_timeout,
2930 * Fails a controller request if it matches an existing controller
2931 * (association) with the same tuple:
2932 * <Host NQN, Host ID, local FC port, remote FC port, SUBSYS NQN>
2934 * The ports don't need to be compared as they are intrinsically
2935 * already matched by the port pointers supplied.
2938 nvme_fc_existing_controller(struct nvme_fc_rport *rport,
2939 struct nvmf_ctrl_options *opts)
2941 struct nvme_fc_ctrl *ctrl;
2942 unsigned long flags;
2945 spin_lock_irqsave(&rport->lock, flags);
2946 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
2947 found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts);
2951 spin_unlock_irqrestore(&rport->lock, flags);
2956 static struct nvme_ctrl *
2957 nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
2958 struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
2960 struct nvme_fc_ctrl *ctrl;
2961 unsigned long flags;
2964 if (!(rport->remoteport.port_role &
2965 (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
2970 if (!opts->duplicate_connect &&
2971 nvme_fc_existing_controller(rport, opts)) {
2976 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2982 idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
2988 ctrl->ctrl.opts = opts;
2989 ctrl->ctrl.nr_reconnects = 0;
2990 INIT_LIST_HEAD(&ctrl->ctrl_list);
2991 ctrl->lport = lport;
2992 ctrl->rport = rport;
2993 ctrl->dev = lport->dev;
2995 ctrl->ioq_live = false;
2996 ctrl->assoc_active = false;
2997 init_waitqueue_head(&ctrl->ioabort_wait);
2999 get_device(ctrl->dev);
3000 kref_init(&ctrl->ref);
3002 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
3003 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
3004 spin_lock_init(&ctrl->lock);
3006 /* io queue count */
3007 ctrl->ctrl.queue_count = min_t(unsigned int,
3009 lport->ops->max_hw_queues);
3010 ctrl->ctrl.queue_count++; /* +1 for admin queue */
3012 ctrl->ctrl.sqsize = opts->queue_size - 1;
3013 ctrl->ctrl.kato = opts->kato;
3014 ctrl->ctrl.cntlid = 0xffff;
3017 ctrl->queues = kcalloc(ctrl->ctrl.queue_count,
3018 sizeof(struct nvme_fc_queue), GFP_KERNEL);
3022 nvme_fc_init_queue(ctrl, 0);
3024 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
3025 ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
3026 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
3027 ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
3028 ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
3029 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
3031 sizeof(struct scatterlist)) +
3032 ctrl->lport->ops->fcprqst_priv_sz;
3033 ctrl->admin_tag_set.driver_data = ctrl;
3034 ctrl->admin_tag_set.nr_hw_queues = 1;
3035 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
3036 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
3038 ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
3040 goto out_free_queues;
3041 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
3043 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3044 if (IS_ERR(ctrl->ctrl.admin_q)) {
3045 ret = PTR_ERR(ctrl->ctrl.admin_q);
3046 goto out_free_admin_tag_set;
3050 * Would have been nice to init io queues tag set as well.
3051 * However, we require interaction from the controller
3052 * for max io queue count before we can do so.
3053 * Defer this to the connect path.
3056 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
3058 goto out_cleanup_admin_q;
3060 /* at this point, teardown path changes to ref counting on nvme ctrl */
3062 spin_lock_irqsave(&rport->lock, flags);
3063 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
3064 spin_unlock_irqrestore(&rport->lock, flags);
3066 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) ||
3067 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
3068 dev_err(ctrl->ctrl.device,
3069 "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum);
3073 nvme_get_ctrl(&ctrl->ctrl);
3075 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
3076 nvme_put_ctrl(&ctrl->ctrl);
3077 dev_err(ctrl->ctrl.device,
3078 "NVME-FC{%d}: failed to schedule initial connect\n",
3083 flush_delayed_work(&ctrl->connect_work);
3085 dev_info(ctrl->ctrl.device,
3086 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
3087 ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
3092 nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
3093 cancel_work_sync(&ctrl->ctrl.reset_work);
3094 cancel_delayed_work_sync(&ctrl->connect_work);
3096 ctrl->ctrl.opts = NULL;
3098 /* initiate nvme ctrl ref counting teardown */
3099 nvme_uninit_ctrl(&ctrl->ctrl);
3101 /* Remove core ctrl ref. */
3102 nvme_put_ctrl(&ctrl->ctrl);
3104 /* as we're past the point where we transition to the ref
3105 * counting teardown path, if we return a bad pointer here,
3106 * the calling routine, thinking it's prior to the
3107 * transition, will do an rport put. Since the teardown
3108 * path also does a rport put, we do an extra get here to
3109 * so proper order/teardown happens.
3111 nvme_fc_rport_get(rport);
3113 return ERR_PTR(-EIO);
3115 out_cleanup_admin_q:
3116 blk_cleanup_queue(ctrl->ctrl.admin_q);
3117 out_free_admin_tag_set:
3118 blk_mq_free_tag_set(&ctrl->admin_tag_set);
3120 kfree(ctrl->queues);
3122 put_device(ctrl->dev);
3123 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
3127 /* exit via here doesn't follow ctlr ref points */
3128 return ERR_PTR(ret);
3132 struct nvmet_fc_traddr {
3138 __nvme_fc_parse_u64(substring_t *sstr, u64 *val)
3142 if (match_u64(sstr, &token64))
3150 * This routine validates and extracts the WWN's from the TRADDR string.
3151 * As kernel parsers need the 0x to determine number base, universally
3152 * build string to parse with 0x prefix before parsing name strings.
3155 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
3157 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
3158 substring_t wwn = { name, &name[sizeof(name)-1] };
3159 int nnoffset, pnoffset;
3161 /* validate it string one of the 2 allowed formats */
3162 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
3163 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
3164 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
3165 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
3166 nnoffset = NVME_FC_TRADDR_OXNNLEN;
3167 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
3168 NVME_FC_TRADDR_OXNNLEN;
3169 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
3170 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
3171 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
3172 "pn-", NVME_FC_TRADDR_NNLEN))) {
3173 nnoffset = NVME_FC_TRADDR_NNLEN;
3174 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
3180 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
3182 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3183 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
3186 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3187 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
3193 pr_warn("%s: bad traddr string\n", __func__);
3197 static struct nvme_ctrl *
3198 nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
3200 struct nvme_fc_lport *lport;
3201 struct nvme_fc_rport *rport;
3202 struct nvme_ctrl *ctrl;
3203 struct nvmet_fc_traddr laddr = { 0L, 0L };
3204 struct nvmet_fc_traddr raddr = { 0L, 0L };
3205 unsigned long flags;
3208 ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE);
3209 if (ret || !raddr.nn || !raddr.pn)
3210 return ERR_PTR(-EINVAL);
3212 ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE);
3213 if (ret || !laddr.nn || !laddr.pn)
3214 return ERR_PTR(-EINVAL);
3216 /* find the host and remote ports to connect together */
3217 spin_lock_irqsave(&nvme_fc_lock, flags);
3218 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3219 if (lport->localport.node_name != laddr.nn ||
3220 lport->localport.port_name != laddr.pn)
3223 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3224 if (rport->remoteport.node_name != raddr.nn ||
3225 rport->remoteport.port_name != raddr.pn)
3228 /* if fail to get reference fall through. Will error */
3229 if (!nvme_fc_rport_get(rport))
3232 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3234 ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport);
3236 nvme_fc_rport_put(rport);
3240 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3242 pr_warn("%s: %s - %s combination not found\n",
3243 __func__, opts->traddr, opts->host_traddr);
3244 return ERR_PTR(-ENOENT);
3248 static struct nvmf_transport_ops nvme_fc_transport = {
3250 .module = THIS_MODULE,
3251 .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
3252 .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
3253 .create_ctrl = nvme_fc_create_ctrl,
3256 static int __init nvme_fc_init_module(void)
3262 * It is expected that in the future the kernel will combine
3263 * the FC-isms that are currently under scsi and now being
3264 * added to by NVME into a new standalone FC class. The SCSI
3265 * and NVME protocols and their devices would be under this
3268 * As we need something to post FC-specific udev events to,
3269 * specifically for nvme probe events, start by creating the
3270 * new device class. When the new standalone FC class is
3271 * put in place, this code will move to a more generic
3272 * location for the class.
3274 fc_class = class_create(THIS_MODULE, "fc");
3275 if (IS_ERR(fc_class)) {
3276 pr_err("couldn't register class fc\n");
3277 return PTR_ERR(fc_class);
3281 * Create a device for the FC-centric udev events
3283 fc_udev_device = device_create(fc_class, NULL, MKDEV(0, 0), NULL,
3285 if (IS_ERR(fc_udev_device)) {
3286 pr_err("couldn't create fc_udev device!\n");
3287 ret = PTR_ERR(fc_udev_device);
3288 goto out_destroy_class;
3291 ret = nvmf_register_transport(&nvme_fc_transport);
3293 goto out_destroy_device;
3298 device_destroy(fc_class, MKDEV(0, 0));
3300 class_destroy(fc_class);
3304 static void __exit nvme_fc_exit_module(void)
3306 /* sanity check - all lports should be removed */
3307 if (!list_empty(&nvme_fc_lport_list))
3308 pr_warn("%s: localport list not empty\n", __func__);
3310 nvmf_unregister_transport(&nvme_fc_transport);
3312 ida_destroy(&nvme_fc_local_port_cnt);
3313 ida_destroy(&nvme_fc_ctrl_cnt);
3315 device_destroy(fc_class, MKDEV(0, 0));
3316 class_destroy(fc_class);
3319 module_init(nvme_fc_init_module);
3320 module_exit(nvme_fc_exit_module);
3322 MODULE_LICENSE("GPL v2");