1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2016 Avago Technologies. All rights reserved.
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/parser.h>
8 #include <uapi/scsi/fc/fc_fs.h>
9 #include <uapi/scsi/fc/fc_els.h>
10 #include <linux/delay.h>
11 #include <linux/overflow.h>
15 #include <linux/nvme-fc-driver.h>
16 #include <linux/nvme-fc.h>
18 #include <scsi/scsi_transport_fc.h>
20 /* *************************** Data Structures/Defines ****************** */
23 enum nvme_fc_queue_flags {
24 NVME_FC_Q_CONNECTED = 0,
28 #define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */
30 struct nvme_fc_queue {
31 struct nvme_fc_ctrl *ctrl;
33 struct blk_mq_hw_ctx *hctx;
35 size_t cmnd_capsule_len;
44 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
46 enum nvme_fcop_flags {
47 FCOP_FLAGS_TERMIO = (1 << 0),
48 FCOP_FLAGS_AEN = (1 << 1),
51 struct nvmefc_ls_req_op {
52 struct nvmefc_ls_req ls_req;
54 struct nvme_fc_rport *rport;
55 struct nvme_fc_queue *queue;
60 struct completion ls_done;
61 struct list_head lsreq_list; /* rport->ls_req_list */
65 struct nvmefc_ls_rcv_op {
66 struct nvme_fc_rport *rport;
67 struct nvmefc_ls_rsp *lsrsp;
68 union nvmefc_ls_requests *rqstbuf;
69 union nvmefc_ls_responses *rspbuf;
73 struct list_head lsrcv_list; /* rport->ls_rcv_list */
74 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
76 enum nvme_fcpop_state {
77 FCPOP_STATE_UNINIT = 0,
79 FCPOP_STATE_ACTIVE = 2,
80 FCPOP_STATE_ABORTED = 3,
81 FCPOP_STATE_COMPLETE = 4,
84 struct nvme_fc_fcp_op {
85 struct nvme_request nreq; /*
88 * the 1st element in the
93 struct nvmefc_fcp_req fcp_req;
95 struct nvme_fc_ctrl *ctrl;
96 struct nvme_fc_queue *queue;
104 struct nvme_fc_cmd_iu cmd_iu;
105 struct nvme_fc_ersp_iu rsp_iu;
108 struct nvme_fcp_op_w_sgl {
109 struct nvme_fc_fcp_op op;
110 struct scatterlist sgl[NVME_INLINE_SG_CNT];
114 struct nvme_fc_lport {
115 struct nvme_fc_local_port localport;
118 struct list_head port_list; /* nvme_fc_port_list */
119 struct list_head endp_list;
120 struct device *dev; /* physical device for dma */
121 struct nvme_fc_port_template *ops;
123 atomic_t act_rport_cnt;
124 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
126 struct nvme_fc_rport {
127 struct nvme_fc_remote_port remoteport;
129 struct list_head endp_list; /* for lport->endp_list */
130 struct list_head ctrl_list;
131 struct list_head ls_req_list;
132 struct list_head ls_rcv_list;
133 struct list_head disc_list;
134 struct device *dev; /* physical device for dma */
135 struct nvme_fc_lport *lport;
138 atomic_t act_ctrl_cnt;
139 unsigned long dev_loss_end;
140 struct work_struct lsrcv_work;
141 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
143 /* fc_ctrl flags values - specified as bit positions */
144 #define ASSOC_ACTIVE 0
145 #define FCCTRL_TERMIO 1
147 struct nvme_fc_ctrl {
149 struct nvme_fc_queue *queues;
151 struct nvme_fc_lport *lport;
152 struct nvme_fc_rport *rport;
156 atomic_t err_work_active;
158 struct nvmefc_ls_rcv_op *rcv_disconn;
160 struct list_head ctrl_list; /* rport->ctrl_list */
162 struct blk_mq_tag_set admin_tag_set;
163 struct blk_mq_tag_set tag_set;
165 struct delayed_work connect_work;
166 struct work_struct err_work;
171 wait_queue_head_t ioabort_wait;
173 struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS];
175 struct nvme_ctrl ctrl;
178 static inline struct nvme_fc_ctrl *
179 to_fc_ctrl(struct nvme_ctrl *ctrl)
181 return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
184 static inline struct nvme_fc_lport *
185 localport_to_lport(struct nvme_fc_local_port *portptr)
187 return container_of(portptr, struct nvme_fc_lport, localport);
190 static inline struct nvme_fc_rport *
191 remoteport_to_rport(struct nvme_fc_remote_port *portptr)
193 return container_of(portptr, struct nvme_fc_rport, remoteport);
196 static inline struct nvmefc_ls_req_op *
197 ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
199 return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
202 static inline struct nvme_fc_fcp_op *
203 fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
205 return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
210 /* *************************** Globals **************************** */
213 static DEFINE_SPINLOCK(nvme_fc_lock);
215 static LIST_HEAD(nvme_fc_lport_list);
216 static DEFINE_IDA(nvme_fc_local_port_cnt);
217 static DEFINE_IDA(nvme_fc_ctrl_cnt);
219 static struct workqueue_struct *nvme_fc_wq;
221 static bool nvme_fc_waiting_to_unload;
222 static DECLARE_COMPLETION(nvme_fc_unload_proceed);
225 * These items are short-term. They will eventually be moved into
226 * a generic FC class. See comments in module init.
228 static struct device *fc_udev_device;
230 static void nvme_fc_complete_rq(struct request *rq);
232 /* *********************** FC-NVME Port Management ************************ */
234 static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
235 struct nvme_fc_queue *, unsigned int);
237 static void nvme_fc_handle_ls_rqst_work(struct work_struct *work);
241 nvme_fc_free_lport(struct kref *ref)
243 struct nvme_fc_lport *lport =
244 container_of(ref, struct nvme_fc_lport, ref);
247 WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
248 WARN_ON(!list_empty(&lport->endp_list));
250 /* remove from transport list */
251 spin_lock_irqsave(&nvme_fc_lock, flags);
252 list_del(&lport->port_list);
253 if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list))
254 complete(&nvme_fc_unload_proceed);
255 spin_unlock_irqrestore(&nvme_fc_lock, flags);
257 ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
258 ida_destroy(&lport->endp_cnt);
260 put_device(lport->dev);
266 nvme_fc_lport_put(struct nvme_fc_lport *lport)
268 kref_put(&lport->ref, nvme_fc_free_lport);
272 nvme_fc_lport_get(struct nvme_fc_lport *lport)
274 return kref_get_unless_zero(&lport->ref);
278 static struct nvme_fc_lport *
279 nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo,
280 struct nvme_fc_port_template *ops,
283 struct nvme_fc_lport *lport;
286 spin_lock_irqsave(&nvme_fc_lock, flags);
288 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
289 if (lport->localport.node_name != pinfo->node_name ||
290 lport->localport.port_name != pinfo->port_name)
293 if (lport->dev != dev) {
294 lport = ERR_PTR(-EXDEV);
298 if (lport->localport.port_state != FC_OBJSTATE_DELETED) {
299 lport = ERR_PTR(-EEXIST);
303 if (!nvme_fc_lport_get(lport)) {
305 * fails if ref cnt already 0. If so,
306 * act as if lport already deleted
312 /* resume the lport */
315 lport->localport.port_role = pinfo->port_role;
316 lport->localport.port_id = pinfo->port_id;
317 lport->localport.port_state = FC_OBJSTATE_ONLINE;
319 spin_unlock_irqrestore(&nvme_fc_lock, flags);
327 spin_unlock_irqrestore(&nvme_fc_lock, flags);
333 * nvme_fc_register_localport - transport entry point called by an
334 * LLDD to register the existence of a NVME
336 * @pinfo: pointer to information about the port to be registered
337 * @template: LLDD entrypoints and operational parameters for the port
338 * @dev: physical hardware device node port corresponds to. Will be
339 * used for DMA mappings
340 * @portptr: pointer to a local port pointer. Upon success, the routine
341 * will allocate a nvme_fc_local_port structure and place its
342 * address in the local port pointer. Upon failure, local port
343 * pointer will be set to 0.
346 * a completion status. Must be 0 upon success; a negative errno
347 * (ex: -ENXIO) upon failure.
350 nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
351 struct nvme_fc_port_template *template,
353 struct nvme_fc_local_port **portptr)
355 struct nvme_fc_lport *newrec;
359 if (!template->localport_delete || !template->remoteport_delete ||
360 !template->ls_req || !template->fcp_io ||
361 !template->ls_abort || !template->fcp_abort ||
362 !template->max_hw_queues || !template->max_sgl_segments ||
363 !template->max_dif_sgl_segments || !template->dma_boundary) {
365 goto out_reghost_failed;
369 * look to see if there is already a localport that had been
370 * deregistered and in the process of waiting for all the
371 * references to fully be removed. If the references haven't
372 * expired, we can simply re-enable the localport. Remoteports
373 * and controller reconnections should resume naturally.
375 newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev);
377 /* found an lport, but something about its state is bad */
378 if (IS_ERR(newrec)) {
379 ret = PTR_ERR(newrec);
380 goto out_reghost_failed;
382 /* found existing lport, which was resumed */
384 *portptr = &newrec->localport;
388 /* nothing found - allocate a new localport struct */
390 newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
394 goto out_reghost_failed;
397 idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
403 if (!get_device(dev) && dev) {
408 INIT_LIST_HEAD(&newrec->port_list);
409 INIT_LIST_HEAD(&newrec->endp_list);
410 kref_init(&newrec->ref);
411 atomic_set(&newrec->act_rport_cnt, 0);
412 newrec->ops = template;
414 ida_init(&newrec->endp_cnt);
415 if (template->local_priv_sz)
416 newrec->localport.private = &newrec[1];
418 newrec->localport.private = NULL;
419 newrec->localport.node_name = pinfo->node_name;
420 newrec->localport.port_name = pinfo->port_name;
421 newrec->localport.port_role = pinfo->port_role;
422 newrec->localport.port_id = pinfo->port_id;
423 newrec->localport.port_state = FC_OBJSTATE_ONLINE;
424 newrec->localport.port_num = idx;
426 spin_lock_irqsave(&nvme_fc_lock, flags);
427 list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
428 spin_unlock_irqrestore(&nvme_fc_lock, flags);
431 dma_set_seg_boundary(dev, template->dma_boundary);
433 *portptr = &newrec->localport;
437 ida_simple_remove(&nvme_fc_local_port_cnt, idx);
445 EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
448 * nvme_fc_unregister_localport - transport entry point called by an
449 * LLDD to deregister/remove a previously
450 * registered a NVME host FC port.
451 * @portptr: pointer to the (registered) local port that is to be deregistered.
454 * a completion status. Must be 0 upon success; a negative errno
455 * (ex: -ENXIO) upon failure.
458 nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
460 struct nvme_fc_lport *lport = localport_to_lport(portptr);
466 spin_lock_irqsave(&nvme_fc_lock, flags);
468 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
469 spin_unlock_irqrestore(&nvme_fc_lock, flags);
472 portptr->port_state = FC_OBJSTATE_DELETED;
474 spin_unlock_irqrestore(&nvme_fc_lock, flags);
476 if (atomic_read(&lport->act_rport_cnt) == 0)
477 lport->ops->localport_delete(&lport->localport);
479 nvme_fc_lport_put(lport);
483 EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
486 * TRADDR strings, per FC-NVME are fixed format:
487 * "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters
488 * udev event will only differ by prefix of what field is
490 * "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters
491 * 19 + 43 + null_fudge = 64 characters
493 #define FCNVME_TRADDR_LENGTH 64
496 nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport,
497 struct nvme_fc_rport *rport)
499 char hostaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_HOST_TRADDR=...*/
500 char tgtaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_TRADDR=...*/
501 char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL };
503 if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY))
506 snprintf(hostaddr, sizeof(hostaddr),
507 "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx",
508 lport->localport.node_name, lport->localport.port_name);
509 snprintf(tgtaddr, sizeof(tgtaddr),
510 "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx",
511 rport->remoteport.node_name, rport->remoteport.port_name);
512 kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp);
516 nvme_fc_free_rport(struct kref *ref)
518 struct nvme_fc_rport *rport =
519 container_of(ref, struct nvme_fc_rport, ref);
520 struct nvme_fc_lport *lport =
521 localport_to_lport(rport->remoteport.localport);
524 WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
525 WARN_ON(!list_empty(&rport->ctrl_list));
527 /* remove from lport list */
528 spin_lock_irqsave(&nvme_fc_lock, flags);
529 list_del(&rport->endp_list);
530 spin_unlock_irqrestore(&nvme_fc_lock, flags);
532 WARN_ON(!list_empty(&rport->disc_list));
533 ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
537 nvme_fc_lport_put(lport);
541 nvme_fc_rport_put(struct nvme_fc_rport *rport)
543 kref_put(&rport->ref, nvme_fc_free_rport);
547 nvme_fc_rport_get(struct nvme_fc_rport *rport)
549 return kref_get_unless_zero(&rport->ref);
553 nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
555 switch (ctrl->ctrl.state) {
557 case NVME_CTRL_CONNECTING:
559 * As all reconnects were suppressed, schedule a
562 dev_info(ctrl->ctrl.device,
563 "NVME-FC{%d}: connectivity re-established. "
564 "Attempting reconnect\n", ctrl->cnum);
566 queue_delayed_work(nvme_wq, &ctrl->connect_work, 0);
569 case NVME_CTRL_RESETTING:
571 * Controller is already in the process of terminating the
572 * association. No need to do anything further. The reconnect
573 * step will naturally occur after the reset completes.
578 /* no action to take - let it delete */
583 static struct nvme_fc_rport *
584 nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport,
585 struct nvme_fc_port_info *pinfo)
587 struct nvme_fc_rport *rport;
588 struct nvme_fc_ctrl *ctrl;
591 spin_lock_irqsave(&nvme_fc_lock, flags);
593 list_for_each_entry(rport, &lport->endp_list, endp_list) {
594 if (rport->remoteport.node_name != pinfo->node_name ||
595 rport->remoteport.port_name != pinfo->port_name)
598 if (!nvme_fc_rport_get(rport)) {
599 rport = ERR_PTR(-ENOLCK);
603 spin_unlock_irqrestore(&nvme_fc_lock, flags);
605 spin_lock_irqsave(&rport->lock, flags);
607 /* has it been unregistered */
608 if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) {
609 /* means lldd called us twice */
610 spin_unlock_irqrestore(&rport->lock, flags);
611 nvme_fc_rport_put(rport);
612 return ERR_PTR(-ESTALE);
615 rport->remoteport.port_role = pinfo->port_role;
616 rport->remoteport.port_id = pinfo->port_id;
617 rport->remoteport.port_state = FC_OBJSTATE_ONLINE;
618 rport->dev_loss_end = 0;
621 * kick off a reconnect attempt on all associations to the
622 * remote port. A successful reconnects will resume i/o.
624 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
625 nvme_fc_resume_controller(ctrl);
627 spin_unlock_irqrestore(&rport->lock, flags);
635 spin_unlock_irqrestore(&nvme_fc_lock, flags);
641 __nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport,
642 struct nvme_fc_port_info *pinfo)
644 if (pinfo->dev_loss_tmo)
645 rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo;
647 rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO;
651 * nvme_fc_register_remoteport - transport entry point called by an
652 * LLDD to register the existence of a NVME
653 * subsystem FC port on its fabric.
654 * @localport: pointer to the (registered) local port that the remote
655 * subsystem port is connected to.
656 * @pinfo: pointer to information about the port to be registered
657 * @portptr: pointer to a remote port pointer. Upon success, the routine
658 * will allocate a nvme_fc_remote_port structure and place its
659 * address in the remote port pointer. Upon failure, remote port
660 * pointer will be set to 0.
663 * a completion status. Must be 0 upon success; a negative errno
664 * (ex: -ENXIO) upon failure.
667 nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
668 struct nvme_fc_port_info *pinfo,
669 struct nvme_fc_remote_port **portptr)
671 struct nvme_fc_lport *lport = localport_to_lport(localport);
672 struct nvme_fc_rport *newrec;
676 if (!nvme_fc_lport_get(lport)) {
678 goto out_reghost_failed;
682 * look to see if there is already a remoteport that is waiting
683 * for a reconnect (within dev_loss_tmo) with the same WWN's.
684 * If so, transition to it and reconnect.
686 newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo);
688 /* found an rport, but something about its state is bad */
689 if (IS_ERR(newrec)) {
690 ret = PTR_ERR(newrec);
693 /* found existing rport, which was resumed */
695 nvme_fc_lport_put(lport);
696 __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
697 nvme_fc_signal_discovery_scan(lport, newrec);
698 *portptr = &newrec->remoteport;
702 /* nothing found - allocate a new remoteport struct */
704 newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
711 idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
714 goto out_kfree_rport;
717 INIT_LIST_HEAD(&newrec->endp_list);
718 INIT_LIST_HEAD(&newrec->ctrl_list);
719 INIT_LIST_HEAD(&newrec->ls_req_list);
720 INIT_LIST_HEAD(&newrec->disc_list);
721 kref_init(&newrec->ref);
722 atomic_set(&newrec->act_ctrl_cnt, 0);
723 spin_lock_init(&newrec->lock);
724 newrec->remoteport.localport = &lport->localport;
725 INIT_LIST_HEAD(&newrec->ls_rcv_list);
726 newrec->dev = lport->dev;
727 newrec->lport = lport;
728 if (lport->ops->remote_priv_sz)
729 newrec->remoteport.private = &newrec[1];
731 newrec->remoteport.private = NULL;
732 newrec->remoteport.port_role = pinfo->port_role;
733 newrec->remoteport.node_name = pinfo->node_name;
734 newrec->remoteport.port_name = pinfo->port_name;
735 newrec->remoteport.port_id = pinfo->port_id;
736 newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
737 newrec->remoteport.port_num = idx;
738 __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
739 INIT_WORK(&newrec->lsrcv_work, nvme_fc_handle_ls_rqst_work);
741 spin_lock_irqsave(&nvme_fc_lock, flags);
742 list_add_tail(&newrec->endp_list, &lport->endp_list);
743 spin_unlock_irqrestore(&nvme_fc_lock, flags);
745 nvme_fc_signal_discovery_scan(lport, newrec);
747 *portptr = &newrec->remoteport;
753 nvme_fc_lport_put(lport);
758 EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
761 nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
763 struct nvmefc_ls_req_op *lsop;
767 spin_lock_irqsave(&rport->lock, flags);
769 list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) {
770 if (!(lsop->flags & FCOP_FLAGS_TERMIO)) {
771 lsop->flags |= FCOP_FLAGS_TERMIO;
772 spin_unlock_irqrestore(&rport->lock, flags);
773 rport->lport->ops->ls_abort(&rport->lport->localport,
779 spin_unlock_irqrestore(&rport->lock, flags);
785 nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
787 dev_info(ctrl->ctrl.device,
788 "NVME-FC{%d}: controller connectivity lost. Awaiting "
789 "Reconnect", ctrl->cnum);
791 switch (ctrl->ctrl.state) {
795 * Schedule a controller reset. The reset will terminate the
796 * association and schedule the reconnect timer. Reconnects
797 * will be attempted until either the ctlr_loss_tmo
798 * (max_retries * connect_delay) expires or the remoteport's
799 * dev_loss_tmo expires.
801 if (nvme_reset_ctrl(&ctrl->ctrl)) {
802 dev_warn(ctrl->ctrl.device,
803 "NVME-FC{%d}: Couldn't schedule reset.\n",
805 nvme_delete_ctrl(&ctrl->ctrl);
809 case NVME_CTRL_CONNECTING:
811 * The association has already been terminated and the
812 * controller is attempting reconnects. No need to do anything
813 * futher. Reconnects will be attempted until either the
814 * ctlr_loss_tmo (max_retries * connect_delay) expires or the
815 * remoteport's dev_loss_tmo expires.
819 case NVME_CTRL_RESETTING:
821 * Controller is already in the process of terminating the
822 * association. No need to do anything further. The reconnect
823 * step will kick in naturally after the association is
828 case NVME_CTRL_DELETING:
829 case NVME_CTRL_DELETING_NOIO:
831 /* no action to take - let it delete */
837 * nvme_fc_unregister_remoteport - transport entry point called by an
838 * LLDD to deregister/remove a previously
839 * registered a NVME subsystem FC port.
840 * @portptr: pointer to the (registered) remote port that is to be
844 * a completion status. Must be 0 upon success; a negative errno
845 * (ex: -ENXIO) upon failure.
848 nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
850 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
851 struct nvme_fc_ctrl *ctrl;
857 spin_lock_irqsave(&rport->lock, flags);
859 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
860 spin_unlock_irqrestore(&rport->lock, flags);
863 portptr->port_state = FC_OBJSTATE_DELETED;
865 rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ);
867 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
868 /* if dev_loss_tmo==0, dev loss is immediate */
869 if (!portptr->dev_loss_tmo) {
870 dev_warn(ctrl->ctrl.device,
871 "NVME-FC{%d}: controller connectivity lost.\n",
873 nvme_delete_ctrl(&ctrl->ctrl);
875 nvme_fc_ctrl_connectivity_loss(ctrl);
878 spin_unlock_irqrestore(&rport->lock, flags);
880 nvme_fc_abort_lsops(rport);
882 if (atomic_read(&rport->act_ctrl_cnt) == 0)
883 rport->lport->ops->remoteport_delete(portptr);
886 * release the reference, which will allow, if all controllers
887 * go away, which should only occur after dev_loss_tmo occurs,
888 * for the rport to be torn down.
890 nvme_fc_rport_put(rport);
894 EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
897 * nvme_fc_rescan_remoteport - transport entry point called by an
898 * LLDD to request a nvme device rescan.
899 * @remoteport: pointer to the (registered) remote port that is to be
905 nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport)
907 struct nvme_fc_rport *rport = remoteport_to_rport(remoteport);
909 nvme_fc_signal_discovery_scan(rport->lport, rport);
911 EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport);
914 nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr,
917 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
920 spin_lock_irqsave(&rport->lock, flags);
922 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
923 spin_unlock_irqrestore(&rport->lock, flags);
927 /* a dev_loss_tmo of 0 (immediate) is allowed to be set */
928 rport->remoteport.dev_loss_tmo = dev_loss_tmo;
930 spin_unlock_irqrestore(&rport->lock, flags);
934 EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss);
937 /* *********************** FC-NVME DMA Handling **************************** */
940 * The fcloop device passes in a NULL device pointer. Real LLD's will
941 * pass in a valid device pointer. If NULL is passed to the dma mapping
942 * routines, depending on the platform, it may or may not succeed, and
946 * Wrapper all the dma routines and check the dev pointer.
948 * If simple mappings (return just a dma address, we'll noop them,
949 * returning a dma address of 0.
951 * On more complex mappings (dma_map_sg), a pseudo routine fills
952 * in the scatter list, setting all dma addresses to 0.
955 static inline dma_addr_t
956 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
957 enum dma_data_direction dir)
959 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
963 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
965 return dev ? dma_mapping_error(dev, dma_addr) : 0;
969 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
970 enum dma_data_direction dir)
973 dma_unmap_single(dev, addr, size, dir);
977 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
978 enum dma_data_direction dir)
981 dma_sync_single_for_cpu(dev, addr, size, dir);
985 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
986 enum dma_data_direction dir)
989 dma_sync_single_for_device(dev, addr, size, dir);
992 /* pseudo dma_map_sg call */
994 fc_map_sg(struct scatterlist *sg, int nents)
996 struct scatterlist *s;
999 WARN_ON(nents == 0 || sg[0].length == 0);
1001 for_each_sg(sg, s, nents, i) {
1002 s->dma_address = 0L;
1003 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1004 s->dma_length = s->length;
1011 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1012 enum dma_data_direction dir)
1014 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
1018 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
1019 enum dma_data_direction dir)
1022 dma_unmap_sg(dev, sg, nents, dir);
1025 /* *********************** FC-NVME LS Handling **************************** */
1027 static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
1028 static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
1030 static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
1033 __nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
1035 struct nvme_fc_rport *rport = lsop->rport;
1036 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1037 unsigned long flags;
1039 spin_lock_irqsave(&rport->lock, flags);
1041 if (!lsop->req_queued) {
1042 spin_unlock_irqrestore(&rport->lock, flags);
1046 list_del(&lsop->lsreq_list);
1048 lsop->req_queued = false;
1050 spin_unlock_irqrestore(&rport->lock, flags);
1052 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1053 (lsreq->rqstlen + lsreq->rsplen),
1056 nvme_fc_rport_put(rport);
1060 __nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
1061 struct nvmefc_ls_req_op *lsop,
1062 void (*done)(struct nvmefc_ls_req *req, int status))
1064 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1065 unsigned long flags;
1068 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
1069 return -ECONNREFUSED;
1071 if (!nvme_fc_rport_get(rport))
1075 lsop->rport = rport;
1076 lsop->req_queued = false;
1077 INIT_LIST_HEAD(&lsop->lsreq_list);
1078 init_completion(&lsop->ls_done);
1080 lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr,
1081 lsreq->rqstlen + lsreq->rsplen,
1083 if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) {
1087 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
1089 spin_lock_irqsave(&rport->lock, flags);
1091 list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);
1093 lsop->req_queued = true;
1095 spin_unlock_irqrestore(&rport->lock, flags);
1097 ret = rport->lport->ops->ls_req(&rport->lport->localport,
1098 &rport->remoteport, lsreq);
1105 lsop->ls_error = ret;
1106 spin_lock_irqsave(&rport->lock, flags);
1107 lsop->req_queued = false;
1108 list_del(&lsop->lsreq_list);
1109 spin_unlock_irqrestore(&rport->lock, flags);
1110 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1111 (lsreq->rqstlen + lsreq->rsplen),
1114 nvme_fc_rport_put(rport);
1120 nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
1122 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1124 lsop->ls_error = status;
1125 complete(&lsop->ls_done);
1129 nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
1131 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1132 struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
1135 ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);
1139 * No timeout/not interruptible as we need the struct
1140 * to exist until the lldd calls us back. Thus mandate
1141 * wait until driver calls back. lldd responsible for
1142 * the timeout action
1144 wait_for_completion(&lsop->ls_done);
1146 __nvme_fc_finish_ls_req(lsop);
1148 ret = lsop->ls_error;
1154 /* ACC or RJT payload ? */
1155 if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
1162 nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
1163 struct nvmefc_ls_req_op *lsop,
1164 void (*done)(struct nvmefc_ls_req *req, int status))
1166 /* don't wait for completion */
1168 return __nvme_fc_send_ls_req(rport, lsop, done);
1172 nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
1173 struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
1175 struct nvmefc_ls_req_op *lsop;
1176 struct nvmefc_ls_req *lsreq;
1177 struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
1178 struct fcnvme_ls_cr_assoc_acc *assoc_acc;
1179 unsigned long flags;
1182 lsop = kzalloc((sizeof(*lsop) +
1183 sizeof(*assoc_rqst) + sizeof(*assoc_acc) +
1184 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1186 dev_info(ctrl->ctrl.device,
1187 "NVME-FC{%d}: send Create Association failed: ENOMEM\n",
1193 assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)&lsop[1];
1194 assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
1195 lsreq = &lsop->ls_req;
1196 if (ctrl->lport->ops->lsrqst_priv_sz)
1197 lsreq->private = &assoc_acc[1];
1199 lsreq->private = NULL;
1201 assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
1202 assoc_rqst->desc_list_len =
1203 cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1205 assoc_rqst->assoc_cmd.desc_tag =
1206 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
1207 assoc_rqst->assoc_cmd.desc_len =
1209 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1211 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1212 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1);
1213 /* Linux supports only Dynamic controllers */
1214 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
1215 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
1216 strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
1217 min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
1218 strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
1219 min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
1221 lsop->queue = queue;
1222 lsreq->rqstaddr = assoc_rqst;
1223 lsreq->rqstlen = sizeof(*assoc_rqst);
1224 lsreq->rspaddr = assoc_acc;
1225 lsreq->rsplen = sizeof(*assoc_acc);
1226 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
1228 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1230 goto out_free_buffer;
1232 /* process connect LS completion */
1234 /* validate the ACC response */
1235 if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1237 else if (assoc_acc->hdr.desc_list_len !=
1239 sizeof(struct fcnvme_ls_cr_assoc_acc)))
1240 fcret = VERR_CR_ASSOC_ACC_LEN;
1241 else if (assoc_acc->hdr.rqst.desc_tag !=
1242 cpu_to_be32(FCNVME_LSDESC_RQST))
1243 fcret = VERR_LSDESC_RQST;
1244 else if (assoc_acc->hdr.rqst.desc_len !=
1245 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1246 fcret = VERR_LSDESC_RQST_LEN;
1247 else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
1248 fcret = VERR_CR_ASSOC;
1249 else if (assoc_acc->associd.desc_tag !=
1250 cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1251 fcret = VERR_ASSOC_ID;
1252 else if (assoc_acc->associd.desc_len !=
1254 sizeof(struct fcnvme_lsdesc_assoc_id)))
1255 fcret = VERR_ASSOC_ID_LEN;
1256 else if (assoc_acc->connectid.desc_tag !=
1257 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1258 fcret = VERR_CONN_ID;
1259 else if (assoc_acc->connectid.desc_len !=
1260 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1261 fcret = VERR_CONN_ID_LEN;
1266 "q %d Create Association LS failed: %s\n",
1267 queue->qnum, validation_errors[fcret]);
1269 spin_lock_irqsave(&ctrl->lock, flags);
1270 ctrl->association_id =
1271 be64_to_cpu(assoc_acc->associd.association_id);
1272 queue->connection_id =
1273 be64_to_cpu(assoc_acc->connectid.connection_id);
1274 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1275 spin_unlock_irqrestore(&ctrl->lock, flags);
1283 "queue %d connect admin queue failed (%d).\n",
1289 nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1290 u16 qsize, u16 ersp_ratio)
1292 struct nvmefc_ls_req_op *lsop;
1293 struct nvmefc_ls_req *lsreq;
1294 struct fcnvme_ls_cr_conn_rqst *conn_rqst;
1295 struct fcnvme_ls_cr_conn_acc *conn_acc;
1298 lsop = kzalloc((sizeof(*lsop) +
1299 sizeof(*conn_rqst) + sizeof(*conn_acc) +
1300 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1302 dev_info(ctrl->ctrl.device,
1303 "NVME-FC{%d}: send Create Connection failed: ENOMEM\n",
1309 conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)&lsop[1];
1310 conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
1311 lsreq = &lsop->ls_req;
1312 if (ctrl->lport->ops->lsrqst_priv_sz)
1313 lsreq->private = (void *)&conn_acc[1];
1315 lsreq->private = NULL;
1317 conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
1318 conn_rqst->desc_list_len = cpu_to_be32(
1319 sizeof(struct fcnvme_lsdesc_assoc_id) +
1320 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1322 conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1323 conn_rqst->associd.desc_len =
1325 sizeof(struct fcnvme_lsdesc_assoc_id));
1326 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1327 conn_rqst->connect_cmd.desc_tag =
1328 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
1329 conn_rqst->connect_cmd.desc_len =
1331 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1332 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1333 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
1334 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1);
1336 lsop->queue = queue;
1337 lsreq->rqstaddr = conn_rqst;
1338 lsreq->rqstlen = sizeof(*conn_rqst);
1339 lsreq->rspaddr = conn_acc;
1340 lsreq->rsplen = sizeof(*conn_acc);
1341 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
1343 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1345 goto out_free_buffer;
1347 /* process connect LS completion */
1349 /* validate the ACC response */
1350 if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1352 else if (conn_acc->hdr.desc_list_len !=
1353 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
1354 fcret = VERR_CR_CONN_ACC_LEN;
1355 else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
1356 fcret = VERR_LSDESC_RQST;
1357 else if (conn_acc->hdr.rqst.desc_len !=
1358 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1359 fcret = VERR_LSDESC_RQST_LEN;
1360 else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
1361 fcret = VERR_CR_CONN;
1362 else if (conn_acc->connectid.desc_tag !=
1363 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1364 fcret = VERR_CONN_ID;
1365 else if (conn_acc->connectid.desc_len !=
1366 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1367 fcret = VERR_CONN_ID_LEN;
1372 "q %d Create I/O Connection LS failed: %s\n",
1373 queue->qnum, validation_errors[fcret]);
1375 queue->connection_id =
1376 be64_to_cpu(conn_acc->connectid.connection_id);
1377 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1385 "queue %d connect I/O queue failed (%d).\n",
1391 nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
1393 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1395 __nvme_fc_finish_ls_req(lsop);
1397 /* fc-nvme initiator doesn't care about success or failure of cmd */
1403 * This routine sends a FC-NVME LS to disconnect (aka terminate)
1404 * the FC-NVME Association. Terminating the association also
1405 * terminates the FC-NVME connections (per queue, both admin and io
1406 * queues) that are part of the association. E.g. things are torn
1407 * down, and the related FC-NVME Association ID and Connection IDs
1410 * The behavior of the fc-nvme initiator is such that it's
1411 * understanding of the association and connections will implicitly
1412 * be torn down. The action is implicit as it may be due to a loss of
1413 * connectivity with the fc-nvme target, so you may never get a
1414 * response even if you tried. As such, the action of this routine
1415 * is to asynchronously send the LS, ignore any results of the LS, and
1416 * continue on with terminating the association. If the fc-nvme target
1417 * is present and receives the LS, it too can tear down.
1420 nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1422 struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst;
1423 struct fcnvme_ls_disconnect_assoc_acc *discon_acc;
1424 struct nvmefc_ls_req_op *lsop;
1425 struct nvmefc_ls_req *lsreq;
1428 lsop = kzalloc((sizeof(*lsop) +
1429 sizeof(*discon_rqst) + sizeof(*discon_acc) +
1430 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1432 dev_info(ctrl->ctrl.device,
1433 "NVME-FC{%d}: send Disconnect Association "
1439 discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1];
1440 discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
1441 lsreq = &lsop->ls_req;
1442 if (ctrl->lport->ops->lsrqst_priv_sz)
1443 lsreq->private = (void *)&discon_acc[1];
1445 lsreq->private = NULL;
1447 nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc,
1448 ctrl->association_id);
1450 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
1451 nvme_fc_disconnect_assoc_done);
1457 nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
1459 struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private;
1460 struct nvme_fc_rport *rport = lsop->rport;
1461 struct nvme_fc_lport *lport = rport->lport;
1462 unsigned long flags;
1464 spin_lock_irqsave(&rport->lock, flags);
1465 list_del(&lsop->lsrcv_list);
1466 spin_unlock_irqrestore(&rport->lock, flags);
1468 fc_dma_sync_single_for_cpu(lport->dev, lsop->rspdma,
1469 sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1470 fc_dma_unmap_single(lport->dev, lsop->rspdma,
1471 sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1475 nvme_fc_rport_put(rport);
1479 nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop)
1481 struct nvme_fc_rport *rport = lsop->rport;
1482 struct nvme_fc_lport *lport = rport->lport;
1483 struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0;
1486 fc_dma_sync_single_for_device(lport->dev, lsop->rspdma,
1487 sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1489 ret = lport->ops->xmt_ls_rsp(&lport->localport, &rport->remoteport,
1492 dev_warn(lport->dev,
1493 "LLDD rejected LS RSP xmt: LS %d status %d\n",
1495 nvme_fc_xmt_ls_rsp_done(lsop->lsrsp);
1500 static struct nvme_fc_ctrl *
1501 nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport,
1502 struct nvmefc_ls_rcv_op *lsop)
1504 struct fcnvme_ls_disconnect_assoc_rqst *rqst =
1505 &lsop->rqstbuf->rq_dis_assoc;
1506 struct nvme_fc_ctrl *ctrl, *ret = NULL;
1507 struct nvmefc_ls_rcv_op *oldls = NULL;
1508 u64 association_id = be64_to_cpu(rqst->associd.association_id);
1509 unsigned long flags;
1511 spin_lock_irqsave(&rport->lock, flags);
1513 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
1514 if (!nvme_fc_ctrl_get(ctrl))
1516 spin_lock(&ctrl->lock);
1517 if (association_id == ctrl->association_id) {
1518 oldls = ctrl->rcv_disconn;
1519 ctrl->rcv_disconn = lsop;
1522 spin_unlock(&ctrl->lock);
1524 /* leave the ctrl get reference */
1526 nvme_fc_ctrl_put(ctrl);
1529 spin_unlock_irqrestore(&rport->lock, flags);
1531 /* transmit a response for anything that was pending */
1533 dev_info(rport->lport->dev,
1534 "NVME-FC{%d}: Multiple Disconnect Association "
1535 "LS's received\n", ctrl->cnum);
1536 /* overwrite good response with bogus failure */
1537 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf,
1538 sizeof(*oldls->rspbuf),
1541 FCNVME_RJT_EXP_NONE, 0);
1542 nvme_fc_xmt_ls_rsp(oldls);
1549 * returns true to mean LS handled and ls_rsp can be sent
1550 * returns false to defer ls_rsp xmt (will be done as part of
1551 * association termination)
1554 nvme_fc_ls_disconnect_assoc(struct nvmefc_ls_rcv_op *lsop)
1556 struct nvme_fc_rport *rport = lsop->rport;
1557 struct fcnvme_ls_disconnect_assoc_rqst *rqst =
1558 &lsop->rqstbuf->rq_dis_assoc;
1559 struct fcnvme_ls_disconnect_assoc_acc *acc =
1560 &lsop->rspbuf->rsp_dis_assoc;
1561 struct nvme_fc_ctrl *ctrl = NULL;
1564 memset(acc, 0, sizeof(*acc));
1566 ret = nvmefc_vldt_lsreq_discon_assoc(lsop->rqstdatalen, rqst);
1568 /* match an active association */
1569 ctrl = nvme_fc_match_disconn_ls(rport, lsop);
1571 ret = VERR_NO_ASSOC;
1575 dev_info(rport->lport->dev,
1576 "Disconnect LS failed: %s\n",
1577 validation_errors[ret]);
1578 lsop->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1579 sizeof(*acc), rqst->w0.ls_cmd,
1580 (ret == VERR_NO_ASSOC) ?
1581 FCNVME_RJT_RC_INV_ASSOC :
1582 FCNVME_RJT_RC_LOGIC,
1583 FCNVME_RJT_EXP_NONE, 0);
1587 /* format an ACCept response */
1589 lsop->lsrsp->rsplen = sizeof(*acc);
1591 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1593 sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
1594 FCNVME_LS_DISCONNECT_ASSOC);
1597 * the transmit of the response will occur after the exchanges
1598 * for the association have been ABTS'd by
1599 * nvme_fc_delete_association().
1602 /* fail the association */
1603 nvme_fc_error_recovery(ctrl, "Disconnect Association LS received");
1605 /* release the reference taken by nvme_fc_match_disconn_ls() */
1606 nvme_fc_ctrl_put(ctrl);
1612 * Actual Processing routine for received FC-NVME LS Requests from the LLD
1613 * returns true if a response should be sent afterward, false if rsp will
1614 * be sent asynchronously.
1617 nvme_fc_handle_ls_rqst(struct nvmefc_ls_rcv_op *lsop)
1619 struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0;
1622 lsop->lsrsp->nvme_fc_private = lsop;
1623 lsop->lsrsp->rspbuf = lsop->rspbuf;
1624 lsop->lsrsp->rspdma = lsop->rspdma;
1625 lsop->lsrsp->done = nvme_fc_xmt_ls_rsp_done;
1626 /* Be preventative. handlers will later set to valid length */
1627 lsop->lsrsp->rsplen = 0;
1631 * parse request input, execute the request, and format the
1634 switch (w0->ls_cmd) {
1635 case FCNVME_LS_DISCONNECT_ASSOC:
1636 ret = nvme_fc_ls_disconnect_assoc(lsop);
1638 case FCNVME_LS_DISCONNECT_CONN:
1639 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
1640 sizeof(*lsop->rspbuf), w0->ls_cmd,
1641 FCNVME_RJT_RC_UNSUP, FCNVME_RJT_EXP_NONE, 0);
1643 case FCNVME_LS_CREATE_ASSOCIATION:
1644 case FCNVME_LS_CREATE_CONNECTION:
1645 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
1646 sizeof(*lsop->rspbuf), w0->ls_cmd,
1647 FCNVME_RJT_RC_LOGIC, FCNVME_RJT_EXP_NONE, 0);
1650 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
1651 sizeof(*lsop->rspbuf), w0->ls_cmd,
1652 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1660 nvme_fc_handle_ls_rqst_work(struct work_struct *work)
1662 struct nvme_fc_rport *rport =
1663 container_of(work, struct nvme_fc_rport, lsrcv_work);
1664 struct fcnvme_ls_rqst_w0 *w0;
1665 struct nvmefc_ls_rcv_op *lsop;
1666 unsigned long flags;
1671 spin_lock_irqsave(&rport->lock, flags);
1672 list_for_each_entry(lsop, &rport->ls_rcv_list, lsrcv_list) {
1676 lsop->handled = true;
1677 if (rport->remoteport.port_state == FC_OBJSTATE_ONLINE) {
1678 spin_unlock_irqrestore(&rport->lock, flags);
1679 sendrsp = nvme_fc_handle_ls_rqst(lsop);
1681 spin_unlock_irqrestore(&rport->lock, flags);
1682 w0 = &lsop->rqstbuf->w0;
1683 lsop->lsrsp->rsplen = nvme_fc_format_rjt(
1685 sizeof(*lsop->rspbuf),
1688 FCNVME_RJT_EXP_NONE, 0);
1691 nvme_fc_xmt_ls_rsp(lsop);
1694 spin_unlock_irqrestore(&rport->lock, flags);
1698 * nvme_fc_rcv_ls_req - transport entry point called by an LLDD
1699 * upon the reception of a NVME LS request.
1701 * The nvme-fc layer will copy payload to an internal structure for
1702 * processing. As such, upon completion of the routine, the LLDD may
1703 * immediately free/reuse the LS request buffer passed in the call.
1705 * If this routine returns error, the LLDD should abort the exchange.
1707 * @remoteport: pointer to the (registered) remote port that the LS
1708 * was received from. The remoteport is associated with
1709 * a specific localport.
1710 * @lsrsp: pointer to a nvmefc_ls_rsp response structure to be
1711 * used to reference the exchange corresponding to the LS
1712 * when issuing an ls response.
1713 * @lsreqbuf: pointer to the buffer containing the LS Request
1714 * @lsreqbuf_len: length, in bytes, of the received LS request
1717 nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *portptr,
1718 struct nvmefc_ls_rsp *lsrsp,
1719 void *lsreqbuf, u32 lsreqbuf_len)
1721 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
1722 struct nvme_fc_lport *lport = rport->lport;
1723 struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
1724 struct nvmefc_ls_rcv_op *lsop;
1725 unsigned long flags;
1728 nvme_fc_rport_get(rport);
1730 /* validate there's a routine to transmit a response */
1731 if (!lport->ops->xmt_ls_rsp) {
1732 dev_info(lport->dev,
1733 "RCV %s LS failed: no LLDD xmt_ls_rsp\n",
1734 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
1735 nvmefc_ls_names[w0->ls_cmd] : "");
1740 if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
1741 dev_info(lport->dev,
1742 "RCV %s LS failed: payload too large\n",
1743 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
1744 nvmefc_ls_names[w0->ls_cmd] : "");
1749 lsop = kzalloc(sizeof(*lsop) +
1750 sizeof(union nvmefc_ls_requests) +
1751 sizeof(union nvmefc_ls_responses),
1754 dev_info(lport->dev,
1755 "RCV %s LS failed: No memory\n",
1756 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
1757 nvmefc_ls_names[w0->ls_cmd] : "");
1761 lsop->rqstbuf = (union nvmefc_ls_requests *)&lsop[1];
1762 lsop->rspbuf = (union nvmefc_ls_responses *)&lsop->rqstbuf[1];
1764 lsop->rspdma = fc_dma_map_single(lport->dev, lsop->rspbuf,
1765 sizeof(*lsop->rspbuf),
1767 if (fc_dma_mapping_error(lport->dev, lsop->rspdma)) {
1768 dev_info(lport->dev,
1769 "RCV %s LS failed: DMA mapping failure\n",
1770 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
1771 nvmefc_ls_names[w0->ls_cmd] : "");
1776 lsop->rport = rport;
1777 lsop->lsrsp = lsrsp;
1779 memcpy(lsop->rqstbuf, lsreqbuf, lsreqbuf_len);
1780 lsop->rqstdatalen = lsreqbuf_len;
1782 spin_lock_irqsave(&rport->lock, flags);
1783 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) {
1784 spin_unlock_irqrestore(&rport->lock, flags);
1788 list_add_tail(&lsop->lsrcv_list, &rport->ls_rcv_list);
1789 spin_unlock_irqrestore(&rport->lock, flags);
1791 schedule_work(&rport->lsrcv_work);
1796 fc_dma_unmap_single(lport->dev, lsop->rspdma,
1797 sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1801 nvme_fc_rport_put(rport);
1804 EXPORT_SYMBOL_GPL(nvme_fc_rcv_ls_req);
1807 /* *********************** NVME Ctrl Routines **************************** */
1810 __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
1811 struct nvme_fc_fcp_op *op)
1813 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
1814 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1815 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
1816 sizeof(op->cmd_iu), DMA_TO_DEVICE);
1818 atomic_set(&op->state, FCPOP_STATE_UNINIT);
1822 nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
1823 unsigned int hctx_idx)
1825 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1827 return __nvme_fc_exit_request(set->driver_data, op);
1831 __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
1833 unsigned long flags;
1836 spin_lock_irqsave(&ctrl->lock, flags);
1837 opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
1838 if (opstate != FCPOP_STATE_ACTIVE)
1839 atomic_set(&op->state, opstate);
1840 else if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) {
1841 op->flags |= FCOP_FLAGS_TERMIO;
1844 spin_unlock_irqrestore(&ctrl->lock, flags);
1846 if (opstate != FCPOP_STATE_ACTIVE)
1849 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1850 &ctrl->rport->remoteport,
1851 op->queue->lldd_handle,
1858 nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
1860 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
1863 /* ensure we've initialized the ops once */
1864 if (!(aen_op->flags & FCOP_FLAGS_AEN))
1867 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
1868 __nvme_fc_abort_op(ctrl, aen_op);
1872 __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
1873 struct nvme_fc_fcp_op *op, int opstate)
1875 unsigned long flags;
1877 if (opstate == FCPOP_STATE_ABORTED) {
1878 spin_lock_irqsave(&ctrl->lock, flags);
1879 if (test_bit(FCCTRL_TERMIO, &ctrl->flags) &&
1880 op->flags & FCOP_FLAGS_TERMIO) {
1882 wake_up(&ctrl->ioabort_wait);
1884 spin_unlock_irqrestore(&ctrl->lock, flags);
1889 nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1891 struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
1892 struct request *rq = op->rq;
1893 struct nvmefc_fcp_req *freq = &op->fcp_req;
1894 struct nvme_fc_ctrl *ctrl = op->ctrl;
1895 struct nvme_fc_queue *queue = op->queue;
1896 struct nvme_completion *cqe = &op->rsp_iu.cqe;
1897 struct nvme_command *sqe = &op->cmd_iu.sqe;
1898 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
1899 union nvme_result result;
1900 bool terminate_assoc = true;
1905 * The current linux implementation of a nvme controller
1906 * allocates a single tag set for all io queues and sizes
1907 * the io queues to fully hold all possible tags. Thus, the
1908 * implementation does not reference or care about the sqhd
1909 * value as it never needs to use the sqhd/sqtail pointers
1910 * for submission pacing.
1912 * This affects the FC-NVME implementation in two ways:
1913 * 1) As the value doesn't matter, we don't need to waste
1914 * cycles extracting it from ERSPs and stamping it in the
1915 * cases where the transport fabricates CQEs on successful
1917 * 2) The FC-NVME implementation requires that delivery of
1918 * ERSP completions are to go back to the nvme layer in order
1919 * relative to the rsn, such that the sqhd value will always
1920 * be "in order" for the nvme layer. As the nvme layer in
1921 * linux doesn't care about sqhd, there's no need to return
1925 * As the core nvme layer in linux currently does not look at
1926 * every field in the cqe - in cases where the FC transport must
1927 * fabricate a CQE, the following fields will not be set as they
1928 * are not referenced:
1929 * cqe.sqid, cqe.sqhd, cqe.command_id
1931 * Failure or error of an individual i/o, in a transport
1932 * detected fashion unrelated to the nvme completion status,
1933 * potentially cause the initiator and target sides to get out
1934 * of sync on SQ head/tail (aka outstanding io count allowed).
1935 * Per FC-NVME spec, failure of an individual command requires
1936 * the connection to be terminated, which in turn requires the
1937 * association to be terminated.
1940 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
1942 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
1943 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1945 if (opstate == FCPOP_STATE_ABORTED)
1946 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1947 else if (freq->status) {
1948 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1949 dev_info(ctrl->ctrl.device,
1950 "NVME-FC{%d}: io failed due to lldd error %d\n",
1951 ctrl->cnum, freq->status);
1955 * For the linux implementation, if we have an unsuccesful
1956 * status, they blk-mq layer can typically be called with the
1957 * non-zero status and the content of the cqe isn't important.
1963 * command completed successfully relative to the wire
1964 * protocol. However, validate anything received and
1965 * extract the status and result from the cqe (create it
1969 switch (freq->rcv_rsplen) {
1972 case NVME_FC_SIZEOF_ZEROS_RSP:
1974 * No response payload or 12 bytes of payload (which
1975 * should all be zeros) are considered successful and
1976 * no payload in the CQE by the transport.
1978 if (freq->transferred_length !=
1979 be32_to_cpu(op->cmd_iu.data_len)) {
1980 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1981 dev_info(ctrl->ctrl.device,
1982 "NVME-FC{%d}: io failed due to bad transfer "
1983 "length: %d vs expected %d\n",
1984 ctrl->cnum, freq->transferred_length,
1985 be32_to_cpu(op->cmd_iu.data_len));
1991 case sizeof(struct nvme_fc_ersp_iu):
1993 * The ERSP IU contains a full completion with CQE.
1994 * Validate ERSP IU and look at cqe.
1996 if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
1997 (freq->rcv_rsplen / 4) ||
1998 be32_to_cpu(op->rsp_iu.xfrd_len) !=
1999 freq->transferred_length ||
2000 op->rsp_iu.ersp_result ||
2001 sqe->common.command_id != cqe->command_id)) {
2002 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
2003 dev_info(ctrl->ctrl.device,
2004 "NVME-FC{%d}: io failed due to bad NVMe_ERSP: "
2005 "iu len %d, xfr len %d vs %d, status code "
2006 "%d, cmdid %d vs %d\n",
2007 ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len),
2008 be32_to_cpu(op->rsp_iu.xfrd_len),
2009 freq->transferred_length,
2010 op->rsp_iu.ersp_result,
2011 sqe->common.command_id,
2015 result = cqe->result;
2016 status = cqe->status;
2020 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
2021 dev_info(ctrl->ctrl.device,
2022 "NVME-FC{%d}: io failed due to odd NVMe_xRSP iu "
2024 ctrl->cnum, freq->rcv_rsplen);
2028 terminate_assoc = false;
2031 if (op->flags & FCOP_FLAGS_AEN) {
2032 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
2033 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2034 atomic_set(&op->state, FCPOP_STATE_IDLE);
2035 op->flags = FCOP_FLAGS_AEN; /* clear other flags */
2036 nvme_fc_ctrl_put(ctrl);
2040 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2041 if (!nvme_try_complete_req(rq, status, result))
2042 nvme_fc_complete_rq(rq);
2045 if (terminate_assoc)
2046 nvme_fc_error_recovery(ctrl, "transport detected io error");
2050 __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
2051 struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
2052 struct request *rq, u32 rqno)
2054 struct nvme_fcp_op_w_sgl *op_w_sgl =
2055 container_of(op, typeof(*op_w_sgl), op);
2056 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2059 memset(op, 0, sizeof(*op));
2060 op->fcp_req.cmdaddr = &op->cmd_iu;
2061 op->fcp_req.cmdlen = sizeof(op->cmd_iu);
2062 op->fcp_req.rspaddr = &op->rsp_iu;
2063 op->fcp_req.rsplen = sizeof(op->rsp_iu);
2064 op->fcp_req.done = nvme_fc_fcpio_done;
2070 cmdiu->format_id = NVME_CMD_FORMAT_ID;
2071 cmdiu->fc_id = NVME_CMD_FC_ID;
2072 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
2074 cmdiu->rsv_cat = fccmnd_set_cat_css(0,
2075 (NVME_CC_CSS_NVM >> NVME_CC_CSS_SHIFT));
2077 cmdiu->rsv_cat = fccmnd_set_cat_admin(0);
2079 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
2080 &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
2081 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
2083 "FCP Op failed - cmdiu dma mapping failed.\n");
2088 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
2089 &op->rsp_iu, sizeof(op->rsp_iu),
2091 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
2093 "FCP Op failed - rspiu dma mapping failed.\n");
2097 atomic_set(&op->state, FCPOP_STATE_IDLE);
2103 nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
2104 unsigned int hctx_idx, unsigned int numa_node)
2106 struct nvme_fc_ctrl *ctrl = set->driver_data;
2107 struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq);
2108 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
2109 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
2112 res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
2115 op->op.fcp_req.first_sgl = op->sgl;
2116 op->op.fcp_req.private = &op->priv[0];
2117 nvme_req(rq)->ctrl = &ctrl->ctrl;
2122 nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
2124 struct nvme_fc_fcp_op *aen_op;
2125 struct nvme_fc_cmd_iu *cmdiu;
2126 struct nvme_command *sqe;
2127 void *private = NULL;
2130 aen_op = ctrl->aen_ops;
2131 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
2132 if (ctrl->lport->ops->fcprqst_priv_sz) {
2133 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
2139 cmdiu = &aen_op->cmd_iu;
2141 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
2142 aen_op, (struct request *)NULL,
2143 (NVME_AQ_BLK_MQ_DEPTH + i));
2149 aen_op->flags = FCOP_FLAGS_AEN;
2150 aen_op->fcp_req.private = private;
2152 memset(sqe, 0, sizeof(*sqe));
2153 sqe->common.opcode = nvme_admin_async_event;
2154 /* Note: core layer may overwrite the sqe.command_id value */
2155 sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i;
2161 nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
2163 struct nvme_fc_fcp_op *aen_op;
2166 cancel_work_sync(&ctrl->ctrl.async_event_work);
2167 aen_op = ctrl->aen_ops;
2168 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
2169 __nvme_fc_exit_request(ctrl, aen_op);
2171 kfree(aen_op->fcp_req.private);
2172 aen_op->fcp_req.private = NULL;
2177 __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
2180 struct nvme_fc_queue *queue = &ctrl->queues[qidx];
2182 hctx->driver_data = queue;
2187 nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
2188 unsigned int hctx_idx)
2190 struct nvme_fc_ctrl *ctrl = data;
2192 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
2198 nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
2199 unsigned int hctx_idx)
2201 struct nvme_fc_ctrl *ctrl = data;
2203 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
2209 nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
2211 struct nvme_fc_queue *queue;
2213 queue = &ctrl->queues[idx];
2214 memset(queue, 0, sizeof(*queue));
2217 atomic_set(&queue->csn, 0);
2218 queue->dev = ctrl->dev;
2221 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
2223 queue->cmnd_capsule_len = sizeof(struct nvme_command);
2226 * Considered whether we should allocate buffers for all SQEs
2227 * and CQEs and dma map them - mapping their respective entries
2228 * into the request structures (kernel vm addr and dma address)
2229 * thus the driver could use the buffers/mappings directly.
2230 * It only makes sense if the LLDD would use them for its
2231 * messaging api. It's very unlikely most adapter api's would use
2232 * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload
2233 * structures were used instead.
2238 * This routine terminates a queue at the transport level.
2239 * The transport has already ensured that all outstanding ios on
2240 * the queue have been terminated.
2241 * The transport will send a Disconnect LS request to terminate
2242 * the queue's connection. Termination of the admin queue will also
2243 * terminate the association at the target.
2246 nvme_fc_free_queue(struct nvme_fc_queue *queue)
2248 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
2251 clear_bit(NVME_FC_Q_LIVE, &queue->flags);
2253 * Current implementation never disconnects a single queue.
2254 * It always terminates a whole association. So there is never
2255 * a disconnect(queue) LS sent to the target.
2258 queue->connection_id = 0;
2259 atomic_set(&queue->csn, 0);
2263 __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
2264 struct nvme_fc_queue *queue, unsigned int qidx)
2266 if (ctrl->lport->ops->delete_queue)
2267 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
2268 queue->lldd_handle);
2269 queue->lldd_handle = NULL;
2273 nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
2277 for (i = 1; i < ctrl->ctrl.queue_count; i++)
2278 nvme_fc_free_queue(&ctrl->queues[i]);
2282 __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
2283 struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
2287 queue->lldd_handle = NULL;
2288 if (ctrl->lport->ops->create_queue)
2289 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
2290 qidx, qsize, &queue->lldd_handle);
2296 nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
2298 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1];
2301 for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--)
2302 __nvme_fc_delete_hw_queue(ctrl, queue, i);
2306 nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
2308 struct nvme_fc_queue *queue = &ctrl->queues[1];
2311 for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) {
2312 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
2321 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
2326 nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
2330 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
2331 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
2335 ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
2339 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
2346 nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
2350 for (i = 1; i < ctrl->ctrl.queue_count; i++)
2351 nvme_fc_init_queue(ctrl, i);
2355 nvme_fc_ctrl_free(struct kref *ref)
2357 struct nvme_fc_ctrl *ctrl =
2358 container_of(ref, struct nvme_fc_ctrl, ref);
2359 unsigned long flags;
2361 if (ctrl->ctrl.tagset) {
2362 blk_cleanup_queue(ctrl->ctrl.connect_q);
2363 blk_mq_free_tag_set(&ctrl->tag_set);
2366 /* remove from rport list */
2367 spin_lock_irqsave(&ctrl->rport->lock, flags);
2368 list_del(&ctrl->ctrl_list);
2369 spin_unlock_irqrestore(&ctrl->rport->lock, flags);
2371 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2372 blk_cleanup_queue(ctrl->ctrl.admin_q);
2373 blk_cleanup_queue(ctrl->ctrl.fabrics_q);
2374 blk_mq_free_tag_set(&ctrl->admin_tag_set);
2376 kfree(ctrl->queues);
2378 put_device(ctrl->dev);
2379 nvme_fc_rport_put(ctrl->rport);
2381 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
2382 if (ctrl->ctrl.opts)
2383 nvmf_free_options(ctrl->ctrl.opts);
2388 nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
2390 kref_put(&ctrl->ref, nvme_fc_ctrl_free);
2394 nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
2396 return kref_get_unless_zero(&ctrl->ref);
2400 * All accesses from nvme core layer done - can now free the
2401 * controller. Called after last nvme_put_ctrl() call
2404 nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
2406 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2408 WARN_ON(nctrl != &ctrl->ctrl);
2410 nvme_fc_ctrl_put(ctrl);
2414 nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
2419 * if an error (io timeout, etc) while (re)connecting,
2420 * it's an error on creating the new association.
2421 * Start the error recovery thread if it hasn't already
2422 * been started. It is expected there could be multiple
2423 * ios hitting this path before things are cleaned up.
2425 if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
2426 active = atomic_xchg(&ctrl->err_work_active, 1);
2427 if (!active && !queue_work(nvme_fc_wq, &ctrl->err_work)) {
2428 atomic_set(&ctrl->err_work_active, 0);
2434 /* Otherwise, only proceed if in LIVE state - e.g. on first error */
2435 if (ctrl->ctrl.state != NVME_CTRL_LIVE)
2438 dev_warn(ctrl->ctrl.device,
2439 "NVME-FC{%d}: transport association event: %s\n",
2440 ctrl->cnum, errmsg);
2441 dev_warn(ctrl->ctrl.device,
2442 "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
2444 nvme_reset_ctrl(&ctrl->ctrl);
2447 static enum blk_eh_timer_return
2448 nvme_fc_timeout(struct request *rq, bool reserved)
2450 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2451 struct nvme_fc_ctrl *ctrl = op->ctrl;
2452 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2453 struct nvme_command *sqe = &cmdiu->sqe;
2456 * Attempt to abort the offending command. Command completion
2457 * will detect the aborted io and will fail the connection.
2459 dev_info(ctrl->ctrl.device,
2460 "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d w10/11: "
2462 ctrl->cnum, op->queue->qnum, sqe->common.opcode,
2463 sqe->connect.fctype, sqe->common.cdw10, sqe->common.cdw11);
2464 if (__nvme_fc_abort_op(ctrl, op))
2465 nvme_fc_error_recovery(ctrl, "io timeout abort failed");
2468 * the io abort has been initiated. Have the reset timer
2469 * restarted and the abort completion will complete the io
2470 * shortly. Avoids a synchronous wait while the abort finishes.
2472 return BLK_EH_RESET_TIMER;
2476 nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2477 struct nvme_fc_fcp_op *op)
2479 struct nvmefc_fcp_req *freq = &op->fcp_req;
2484 if (!blk_rq_nr_phys_segments(rq))
2487 freq->sg_table.sgl = freq->first_sgl;
2488 ret = sg_alloc_table_chained(&freq->sg_table,
2489 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl,
2490 NVME_INLINE_SG_CNT);
2494 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
2495 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
2496 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
2497 op->nents, rq_dma_dir(rq));
2498 if (unlikely(freq->sg_cnt <= 0)) {
2499 sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
2505 * TODO: blk_integrity_rq(rq) for DIF
2511 nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2512 struct nvme_fc_fcp_op *op)
2514 struct nvmefc_fcp_req *freq = &op->fcp_req;
2519 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
2522 sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
2528 * In FC, the queue is a logical thing. At transport connect, the target
2529 * creates its "queue" and returns a handle that is to be given to the
2530 * target whenever it posts something to the corresponding SQ. When an
2531 * SQE is sent on a SQ, FC effectively considers the SQE, or rather the
2532 * command contained within the SQE, an io, and assigns a FC exchange
2533 * to it. The SQE and the associated SQ handle are sent in the initial
2534 * CMD IU sents on the exchange. All transfers relative to the io occur
2535 * as part of the exchange. The CQE is the last thing for the io,
2536 * which is transferred (explicitly or implicitly) with the RSP IU
2537 * sent on the exchange. After the CQE is received, the FC exchange is
2538 * terminaed and the Exchange may be used on a different io.
2540 * The transport to LLDD api has the transport making a request for a
2541 * new fcp io request to the LLDD. The LLDD then allocates a FC exchange
2542 * resource and transfers the command. The LLDD will then process all
2543 * steps to complete the io. Upon completion, the transport done routine
2546 * So - while the operation is outstanding to the LLDD, there is a link
2547 * level FC exchange resource that is also outstanding. This must be
2548 * considered in all cleanup operations.
2551 nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
2552 struct nvme_fc_fcp_op *op, u32 data_len,
2553 enum nvmefc_fcp_datadir io_dir)
2555 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2556 struct nvme_command *sqe = &cmdiu->sqe;
2560 * before attempting to send the io, check to see if we believe
2561 * the target device is present
2563 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
2564 return BLK_STS_RESOURCE;
2566 if (!nvme_fc_ctrl_get(ctrl))
2567 return BLK_STS_IOERR;
2569 /* format the FC-NVME CMD IU and fcp_req */
2570 cmdiu->connection_id = cpu_to_be64(queue->connection_id);
2571 cmdiu->data_len = cpu_to_be32(data_len);
2573 case NVMEFC_FCP_WRITE:
2574 cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
2576 case NVMEFC_FCP_READ:
2577 cmdiu->flags = FCNVME_CMD_FLAGS_READ;
2579 case NVMEFC_FCP_NODATA:
2583 op->fcp_req.payload_length = data_len;
2584 op->fcp_req.io_dir = io_dir;
2585 op->fcp_req.transferred_length = 0;
2586 op->fcp_req.rcv_rsplen = 0;
2587 op->fcp_req.status = NVME_SC_SUCCESS;
2588 op->fcp_req.sqid = cpu_to_le16(queue->qnum);
2591 * validate per fabric rules, set fields mandated by fabric spec
2592 * as well as those by FC-NVME spec.
2594 WARN_ON_ONCE(sqe->common.metadata);
2595 sqe->common.flags |= NVME_CMD_SGL_METABUF;
2598 * format SQE DPTR field per FC-NVME rules:
2599 * type=0x5 Transport SGL Data Block Descriptor
2600 * subtype=0xA Transport-specific value
2602 * length=length of the data series
2604 sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2605 NVME_SGL_FMT_TRANSPORT_A;
2606 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
2607 sqe->rw.dptr.sgl.addr = 0;
2609 if (!(op->flags & FCOP_FLAGS_AEN)) {
2610 ret = nvme_fc_map_data(ctrl, op->rq, op);
2612 nvme_cleanup_cmd(op->rq);
2613 nvme_fc_ctrl_put(ctrl);
2614 if (ret == -ENOMEM || ret == -EAGAIN)
2615 return BLK_STS_RESOURCE;
2616 return BLK_STS_IOERR;
2620 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
2621 sizeof(op->cmd_iu), DMA_TO_DEVICE);
2623 atomic_set(&op->state, FCPOP_STATE_ACTIVE);
2625 if (!(op->flags & FCOP_FLAGS_AEN))
2626 blk_mq_start_request(op->rq);
2628 cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn));
2629 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
2630 &ctrl->rport->remoteport,
2631 queue->lldd_handle, &op->fcp_req);
2635 * If the lld fails to send the command is there an issue with
2636 * the csn value? If the command that fails is the Connect,
2637 * no - as the connection won't be live. If it is a command
2638 * post-connect, it's possible a gap in csn may be created.
2639 * Does this matter? As Linux initiators don't send fused
2640 * commands, no. The gap would exist, but as there's nothing
2641 * that depends on csn order to be delivered on the target
2642 * side, it shouldn't hurt. It would be difficult for a
2643 * target to even detect the csn gap as it has no idea when the
2644 * cmd with the csn was supposed to arrive.
2646 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
2647 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2649 if (!(op->flags & FCOP_FLAGS_AEN)) {
2650 nvme_fc_unmap_data(ctrl, op->rq, op);
2651 nvme_cleanup_cmd(op->rq);
2654 nvme_fc_ctrl_put(ctrl);
2656 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
2658 return BLK_STS_IOERR;
2660 return BLK_STS_RESOURCE;
2667 nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
2668 const struct blk_mq_queue_data *bd)
2670 struct nvme_ns *ns = hctx->queue->queuedata;
2671 struct nvme_fc_queue *queue = hctx->driver_data;
2672 struct nvme_fc_ctrl *ctrl = queue->ctrl;
2673 struct request *rq = bd->rq;
2674 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2675 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2676 struct nvme_command *sqe = &cmdiu->sqe;
2677 enum nvmefc_fcp_datadir io_dir;
2678 bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags);
2682 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
2683 !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2684 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2686 ret = nvme_setup_cmd(ns, rq, sqe);
2691 * nvme core doesn't quite treat the rq opaquely. Commands such
2692 * as WRITE ZEROES will return a non-zero rq payload_bytes yet
2693 * there is no actual payload to be transferred.
2694 * To get it right, key data transmission on there being 1 or
2695 * more physical segments in the sg list. If there is no
2696 * physical segments, there is no payload.
2698 if (blk_rq_nr_phys_segments(rq)) {
2699 data_len = blk_rq_payload_bytes(rq);
2700 io_dir = ((rq_data_dir(rq) == WRITE) ?
2701 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
2704 io_dir = NVMEFC_FCP_NODATA;
2708 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
2712 nvme_fc_submit_async_event(struct nvme_ctrl *arg)
2714 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
2715 struct nvme_fc_fcp_op *aen_op;
2718 if (test_bit(FCCTRL_TERMIO, &ctrl->flags))
2721 aen_op = &ctrl->aen_ops[0];
2723 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
2726 dev_err(ctrl->ctrl.device,
2727 "failed async event work\n");
2731 nvme_fc_complete_rq(struct request *rq)
2733 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2734 struct nvme_fc_ctrl *ctrl = op->ctrl;
2736 atomic_set(&op->state, FCPOP_STATE_IDLE);
2737 op->flags &= ~FCOP_FLAGS_TERMIO;
2739 nvme_fc_unmap_data(ctrl, rq, op);
2740 nvme_complete_rq(rq);
2741 nvme_fc_ctrl_put(ctrl);
2745 * This routine is used by the transport when it needs to find active
2746 * io on a queue that is to be terminated. The transport uses
2747 * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
2748 * this routine to kill them on a 1 by 1 basis.
2750 * As FC allocates FC exchange for each io, the transport must contact
2751 * the LLDD to terminate the exchange, thus releasing the FC exchange.
2752 * After terminating the exchange the LLDD will call the transport's
2753 * normal io done path for the request, but it will have an aborted
2754 * status. The done path will return the io request back to the block
2755 * layer with an error status.
2758 nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
2760 struct nvme_ctrl *nctrl = data;
2761 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2762 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
2764 __nvme_fc_abort_op(ctrl, op);
2769 static const struct blk_mq_ops nvme_fc_mq_ops = {
2770 .queue_rq = nvme_fc_queue_rq,
2771 .complete = nvme_fc_complete_rq,
2772 .init_request = nvme_fc_init_request,
2773 .exit_request = nvme_fc_exit_request,
2774 .init_hctx = nvme_fc_init_hctx,
2775 .timeout = nvme_fc_timeout,
2779 nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2781 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2782 unsigned int nr_io_queues;
2785 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2786 ctrl->lport->ops->max_hw_queues);
2787 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2789 dev_info(ctrl->ctrl.device,
2790 "set_queue_count failed: %d\n", ret);
2794 ctrl->ctrl.queue_count = nr_io_queues + 1;
2798 nvme_fc_init_io_queues(ctrl);
2800 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
2801 ctrl->tag_set.ops = &nvme_fc_mq_ops;
2802 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
2803 ctrl->tag_set.reserved_tags = 1; /* fabric connect */
2804 ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
2805 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
2806 ctrl->tag_set.cmd_size =
2807 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
2808 ctrl->lport->ops->fcprqst_priv_sz);
2809 ctrl->tag_set.driver_data = ctrl;
2810 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
2811 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
2813 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
2817 ctrl->ctrl.tagset = &ctrl->tag_set;
2819 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
2820 if (IS_ERR(ctrl->ctrl.connect_q)) {
2821 ret = PTR_ERR(ctrl->ctrl.connect_q);
2822 goto out_free_tag_set;
2825 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2827 goto out_cleanup_blk_queue;
2829 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2831 goto out_delete_hw_queues;
2833 ctrl->ioq_live = true;
2837 out_delete_hw_queues:
2838 nvme_fc_delete_hw_io_queues(ctrl);
2839 out_cleanup_blk_queue:
2840 blk_cleanup_queue(ctrl->ctrl.connect_q);
2842 blk_mq_free_tag_set(&ctrl->tag_set);
2843 nvme_fc_free_io_queues(ctrl);
2845 /* force put free routine to ignore io queues */
2846 ctrl->ctrl.tagset = NULL;
2852 nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
2854 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2855 u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1;
2856 unsigned int nr_io_queues;
2859 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2860 ctrl->lport->ops->max_hw_queues);
2861 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2863 dev_info(ctrl->ctrl.device,
2864 "set_queue_count failed: %d\n", ret);
2868 if (!nr_io_queues && prior_ioq_cnt) {
2869 dev_info(ctrl->ctrl.device,
2870 "Fail Reconnect: At least 1 io queue "
2871 "required (was %d)\n", prior_ioq_cnt);
2875 ctrl->ctrl.queue_count = nr_io_queues + 1;
2876 /* check for io queues existing */
2877 if (ctrl->ctrl.queue_count == 1)
2880 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2882 goto out_free_io_queues;
2884 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2886 goto out_delete_hw_queues;
2888 if (prior_ioq_cnt != nr_io_queues) {
2889 dev_info(ctrl->ctrl.device,
2890 "reconnect: revising io queue count from %d to %d\n",
2891 prior_ioq_cnt, nr_io_queues);
2892 nvme_wait_freeze(&ctrl->ctrl);
2893 blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
2894 nvme_unfreeze(&ctrl->ctrl);
2899 out_delete_hw_queues:
2900 nvme_fc_delete_hw_io_queues(ctrl);
2902 nvme_fc_free_io_queues(ctrl);
2907 nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport)
2909 struct nvme_fc_lport *lport = rport->lport;
2911 atomic_inc(&lport->act_rport_cnt);
2915 nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport)
2917 struct nvme_fc_lport *lport = rport->lport;
2920 cnt = atomic_dec_return(&lport->act_rport_cnt);
2921 if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED)
2922 lport->ops->localport_delete(&lport->localport);
2926 nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl)
2928 struct nvme_fc_rport *rport = ctrl->rport;
2931 if (test_and_set_bit(ASSOC_ACTIVE, &ctrl->flags))
2934 cnt = atomic_inc_return(&rport->act_ctrl_cnt);
2936 nvme_fc_rport_active_on_lport(rport);
2942 nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl)
2944 struct nvme_fc_rport *rport = ctrl->rport;
2945 struct nvme_fc_lport *lport = rport->lport;
2948 /* clearing of ctrl->flags ASSOC_ACTIVE bit is in association delete */
2950 cnt = atomic_dec_return(&rport->act_ctrl_cnt);
2952 if (rport->remoteport.port_state == FC_OBJSTATE_DELETED)
2953 lport->ops->remoteport_delete(&rport->remoteport);
2954 nvme_fc_rport_inactive_on_lport(rport);
2961 * This routine restarts the controller on the host side, and
2962 * on the link side, recreates the controller association.
2965 nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
2967 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2968 struct nvmefc_ls_rcv_op *disls = NULL;
2969 unsigned long flags;
2973 ++ctrl->ctrl.nr_reconnects;
2975 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
2978 if (nvme_fc_ctlr_active_on_rport(ctrl))
2981 dev_info(ctrl->ctrl.device,
2982 "NVME-FC{%d}: create association : host wwpn 0x%016llx "
2983 " rport wwpn 0x%016llx: NQN \"%s\"\n",
2984 ctrl->cnum, ctrl->lport->localport.port_name,
2985 ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn);
2988 * Create the admin queue
2991 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
2994 goto out_free_queue;
2996 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
2997 NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4));
2999 goto out_delete_hw_queue;
3001 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
3003 goto out_disconnect_admin_queue;
3005 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
3008 * Check controller capabilities
3010 * todo:- add code to check if ctrl attributes changed from
3011 * prior connection values
3014 ret = nvme_enable_ctrl(&ctrl->ctrl);
3016 goto out_disconnect_admin_queue;
3018 ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments;
3019 ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments <<
3022 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
3024 ret = nvme_init_identify(&ctrl->ctrl);
3026 goto out_disconnect_admin_queue;
3030 /* FC-NVME does not have other data in the capsule */
3031 if (ctrl->ctrl.icdoff) {
3032 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
3034 goto out_disconnect_admin_queue;
3037 /* FC-NVME supports normal SGL Data Block Descriptors */
3039 if (opts->queue_size > ctrl->ctrl.maxcmd) {
3040 /* warn if maxcmd is lower than queue_size */
3041 dev_warn(ctrl->ctrl.device,
3042 "queue_size %zu > ctrl maxcmd %u, reducing "
3044 opts->queue_size, ctrl->ctrl.maxcmd);
3045 opts->queue_size = ctrl->ctrl.maxcmd;
3048 if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
3049 /* warn if sqsize is lower than queue_size */
3050 dev_warn(ctrl->ctrl.device,
3051 "queue_size %zu > ctrl sqsize %u, reducing "
3053 opts->queue_size, ctrl->ctrl.sqsize + 1);
3054 opts->queue_size = ctrl->ctrl.sqsize + 1;
3057 ret = nvme_fc_init_aen_ops(ctrl);
3059 goto out_term_aen_ops;
3062 * Create the io queues
3065 if (ctrl->ctrl.queue_count > 1) {
3066 if (!ctrl->ioq_live)
3067 ret = nvme_fc_create_io_queues(ctrl);
3069 ret = nvme_fc_recreate_io_queues(ctrl);
3071 goto out_term_aen_ops;
3074 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
3076 ctrl->ctrl.nr_reconnects = 0;
3079 nvme_start_ctrl(&ctrl->ctrl);
3081 return 0; /* Success */
3084 nvme_fc_term_aen_ops(ctrl);
3085 out_disconnect_admin_queue:
3086 /* send a Disconnect(association) LS to fc-nvme target */
3087 nvme_fc_xmt_disconnect_assoc(ctrl);
3088 spin_lock_irqsave(&ctrl->lock, flags);
3089 ctrl->association_id = 0;
3090 disls = ctrl->rcv_disconn;
3091 ctrl->rcv_disconn = NULL;
3092 spin_unlock_irqrestore(&ctrl->lock, flags);
3094 nvme_fc_xmt_ls_rsp(disls);
3095 out_delete_hw_queue:
3096 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
3098 nvme_fc_free_queue(&ctrl->queues[0]);
3099 clear_bit(ASSOC_ACTIVE, &ctrl->flags);
3100 nvme_fc_ctlr_inactive_on_rport(ctrl);
3107 * This routine runs through all outstanding commands on the association
3108 * and aborts them. This routine is typically be called by the
3109 * delete_association routine. It is also called due to an error during
3110 * reconnect. In that scenario, it is most likely a command that initializes
3111 * the controller, including fabric Connect commands on io queues, that
3112 * may have timed out or failed thus the io must be killed for the connect
3113 * thread to see the error.
3116 __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
3119 * If io queues are present, stop them and terminate all outstanding
3120 * ios on them. As FC allocates FC exchange for each io, the
3121 * transport must contact the LLDD to terminate the exchange,
3122 * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
3123 * to tell us what io's are busy and invoke a transport routine
3124 * to kill them with the LLDD. After terminating the exchange
3125 * the LLDD will call the transport's normal io done path, but it
3126 * will have an aborted status. The done path will return the
3127 * io requests back to the block layer as part of normal completions
3128 * (but with error status).
3130 if (ctrl->ctrl.queue_count > 1) {
3131 nvme_stop_queues(&ctrl->ctrl);
3132 blk_mq_tagset_busy_iter(&ctrl->tag_set,
3133 nvme_fc_terminate_exchange, &ctrl->ctrl);
3134 blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
3136 nvme_start_queues(&ctrl->ctrl);
3140 * Other transports, which don't have link-level contexts bound
3141 * to sqe's, would try to gracefully shutdown the controller by
3142 * writing the registers for shutdown and polling (call
3143 * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially
3144 * just aborted and we will wait on those contexts, and given
3145 * there was no indication of how live the controlelr is on the
3146 * link, don't send more io to create more contexts for the
3147 * shutdown. Let the controller fail via keepalive failure if
3148 * its still present.
3152 * clean up the admin queue. Same thing as above.
3154 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
3155 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
3156 nvme_fc_terminate_exchange, &ctrl->ctrl);
3157 blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
3161 * This routine stops operation of the controller on the host side.
3162 * On the host os stack side: Admin and IO queues are stopped,
3163 * outstanding ios on them terminated via FC ABTS.
3164 * On the link side: the association is terminated.
3167 nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
3169 struct nvmefc_ls_rcv_op *disls = NULL;
3170 unsigned long flags;
3172 if (!test_and_clear_bit(ASSOC_ACTIVE, &ctrl->flags))
3175 spin_lock_irqsave(&ctrl->lock, flags);
3176 set_bit(FCCTRL_TERMIO, &ctrl->flags);
3178 spin_unlock_irqrestore(&ctrl->lock, flags);
3180 __nvme_fc_abort_outstanding_ios(ctrl, false);
3182 /* kill the aens as they are a separate path */
3183 nvme_fc_abort_aen_ops(ctrl);
3185 /* wait for all io that had to be aborted */
3186 spin_lock_irq(&ctrl->lock);
3187 wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
3188 clear_bit(FCCTRL_TERMIO, &ctrl->flags);
3189 spin_unlock_irq(&ctrl->lock);
3191 nvme_fc_term_aen_ops(ctrl);
3194 * send a Disconnect(association) LS to fc-nvme target
3195 * Note: could have been sent at top of process, but
3196 * cleaner on link traffic if after the aborts complete.
3197 * Note: if association doesn't exist, association_id will be 0
3199 if (ctrl->association_id)
3200 nvme_fc_xmt_disconnect_assoc(ctrl);
3202 spin_lock_irqsave(&ctrl->lock, flags);
3203 ctrl->association_id = 0;
3204 disls = ctrl->rcv_disconn;
3205 ctrl->rcv_disconn = NULL;
3206 spin_unlock_irqrestore(&ctrl->lock, flags);
3209 * if a Disconnect Request was waiting for a response, send
3210 * now that all ABTS's have been issued (and are complete).
3212 nvme_fc_xmt_ls_rsp(disls);
3214 if (ctrl->ctrl.tagset) {
3215 nvme_fc_delete_hw_io_queues(ctrl);
3216 nvme_fc_free_io_queues(ctrl);
3219 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
3220 nvme_fc_free_queue(&ctrl->queues[0]);
3222 /* re-enable the admin_q so anything new can fast fail */
3223 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
3225 /* resume the io queues so that things will fast fail */
3226 nvme_start_queues(&ctrl->ctrl);
3228 nvme_fc_ctlr_inactive_on_rport(ctrl);
3232 nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
3234 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
3236 cancel_work_sync(&ctrl->err_work);
3237 cancel_delayed_work_sync(&ctrl->connect_work);
3239 * kill the association on the link side. this will block
3240 * waiting for io to terminate
3242 nvme_fc_delete_association(ctrl);
3246 nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
3248 struct nvme_fc_rport *rport = ctrl->rport;
3249 struct nvme_fc_remote_port *portptr = &rport->remoteport;
3250 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
3253 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
3256 if (portptr->port_state == FC_OBJSTATE_ONLINE)
3257 dev_info(ctrl->ctrl.device,
3258 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
3259 ctrl->cnum, status);
3260 else if (time_after_eq(jiffies, rport->dev_loss_end))
3263 if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
3264 if (portptr->port_state == FC_OBJSTATE_ONLINE)
3265 dev_info(ctrl->ctrl.device,
3266 "NVME-FC{%d}: Reconnect attempt in %ld "
3268 ctrl->cnum, recon_delay / HZ);
3269 else if (time_after(jiffies + recon_delay, rport->dev_loss_end))
3270 recon_delay = rport->dev_loss_end - jiffies;
3272 queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
3274 if (portptr->port_state == FC_OBJSTATE_ONLINE)
3275 dev_warn(ctrl->ctrl.device,
3276 "NVME-FC{%d}: Max reconnect attempts (%d) "
3278 ctrl->cnum, ctrl->ctrl.nr_reconnects);
3280 dev_warn(ctrl->ctrl.device,
3281 "NVME-FC{%d}: dev_loss_tmo (%d) expired "
3282 "while waiting for remoteport connectivity.\n",
3283 ctrl->cnum, min_t(int, portptr->dev_loss_tmo,
3284 (ctrl->ctrl.opts->max_reconnects *
3285 ctrl->ctrl.opts->reconnect_delay)));
3286 WARN_ON(nvme_delete_ctrl(&ctrl->ctrl));
3291 __nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl)
3294 * if state is CONNECTING - the error occurred as part of a
3295 * reconnect attempt. Abort any ios on the association and
3296 * let the create_association error paths resolve things.
3298 if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
3299 __nvme_fc_abort_outstanding_ios(ctrl, true);
3304 * For any other state, kill the association. As this routine
3305 * is a common io abort routine for resetting and such, after
3306 * the association is terminated, ensure that the state is set
3310 nvme_stop_keep_alive(&ctrl->ctrl);
3312 /* will block will waiting for io to terminate */
3313 nvme_fc_delete_association(ctrl);
3315 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING &&
3316 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
3317 dev_err(ctrl->ctrl.device,
3318 "NVME-FC{%d}: error_recovery: Couldn't change state "
3319 "to CONNECTING\n", ctrl->cnum);
3323 nvme_fc_reset_ctrl_work(struct work_struct *work)
3325 struct nvme_fc_ctrl *ctrl =
3326 container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
3329 __nvme_fc_terminate_io(ctrl);
3331 nvme_stop_ctrl(&ctrl->ctrl);
3333 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE)
3334 ret = nvme_fc_create_association(ctrl);
3339 nvme_fc_reconnect_or_delete(ctrl, ret);
3341 dev_info(ctrl->ctrl.device,
3342 "NVME-FC{%d}: controller reset complete\n",
3347 nvme_fc_connect_err_work(struct work_struct *work)
3349 struct nvme_fc_ctrl *ctrl =
3350 container_of(work, struct nvme_fc_ctrl, err_work);
3352 __nvme_fc_terminate_io(ctrl);
3354 atomic_set(&ctrl->err_work_active, 0);
3357 * Rescheduling the connection after recovering
3358 * from the io error is left to the reconnect work
3359 * item, which is what should have stalled waiting on
3360 * the io that had the error that scheduled this work.
3364 static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
3366 .module = THIS_MODULE,
3367 .flags = NVME_F_FABRICS,
3368 .reg_read32 = nvmf_reg_read32,
3369 .reg_read64 = nvmf_reg_read64,
3370 .reg_write32 = nvmf_reg_write32,
3371 .free_ctrl = nvme_fc_nvme_ctrl_freed,
3372 .submit_async_event = nvme_fc_submit_async_event,
3373 .delete_ctrl = nvme_fc_delete_ctrl,
3374 .get_address = nvmf_get_address,
3378 nvme_fc_connect_ctrl_work(struct work_struct *work)
3382 struct nvme_fc_ctrl *ctrl =
3383 container_of(to_delayed_work(work),
3384 struct nvme_fc_ctrl, connect_work);
3386 ret = nvme_fc_create_association(ctrl);
3388 nvme_fc_reconnect_or_delete(ctrl, ret);
3390 dev_info(ctrl->ctrl.device,
3391 "NVME-FC{%d}: controller connect complete\n",
3396 static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
3397 .queue_rq = nvme_fc_queue_rq,
3398 .complete = nvme_fc_complete_rq,
3399 .init_request = nvme_fc_init_request,
3400 .exit_request = nvme_fc_exit_request,
3401 .init_hctx = nvme_fc_init_admin_hctx,
3402 .timeout = nvme_fc_timeout,
3407 * Fails a controller request if it matches an existing controller
3408 * (association) with the same tuple:
3409 * <Host NQN, Host ID, local FC port, remote FC port, SUBSYS NQN>
3411 * The ports don't need to be compared as they are intrinsically
3412 * already matched by the port pointers supplied.
3415 nvme_fc_existing_controller(struct nvme_fc_rport *rport,
3416 struct nvmf_ctrl_options *opts)
3418 struct nvme_fc_ctrl *ctrl;
3419 unsigned long flags;
3422 spin_lock_irqsave(&rport->lock, flags);
3423 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
3424 found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts);
3428 spin_unlock_irqrestore(&rport->lock, flags);
3433 static struct nvme_ctrl *
3434 nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
3435 struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
3437 struct nvme_fc_ctrl *ctrl;
3438 unsigned long flags;
3441 if (!(rport->remoteport.port_role &
3442 (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
3447 if (!opts->duplicate_connect &&
3448 nvme_fc_existing_controller(rport, opts)) {
3453 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
3459 idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
3465 ctrl->ctrl.opts = opts;
3466 ctrl->ctrl.nr_reconnects = 0;
3468 ctrl->ctrl.numa_node = dev_to_node(lport->dev);
3470 ctrl->ctrl.numa_node = NUMA_NO_NODE;
3471 INIT_LIST_HEAD(&ctrl->ctrl_list);
3472 ctrl->lport = lport;
3473 ctrl->rport = rport;
3474 ctrl->dev = lport->dev;
3476 ctrl->ioq_live = false;
3477 atomic_set(&ctrl->err_work_active, 0);
3478 init_waitqueue_head(&ctrl->ioabort_wait);
3480 get_device(ctrl->dev);
3481 kref_init(&ctrl->ref);
3483 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
3484 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
3485 INIT_WORK(&ctrl->err_work, nvme_fc_connect_err_work);
3486 spin_lock_init(&ctrl->lock);
3488 /* io queue count */
3489 ctrl->ctrl.queue_count = min_t(unsigned int,
3491 lport->ops->max_hw_queues);
3492 ctrl->ctrl.queue_count++; /* +1 for admin queue */
3494 ctrl->ctrl.sqsize = opts->queue_size - 1;
3495 ctrl->ctrl.kato = opts->kato;
3496 ctrl->ctrl.cntlid = 0xffff;
3499 ctrl->queues = kcalloc(ctrl->ctrl.queue_count,
3500 sizeof(struct nvme_fc_queue), GFP_KERNEL);
3504 nvme_fc_init_queue(ctrl, 0);
3506 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
3507 ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
3508 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
3509 ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
3510 ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
3511 ctrl->admin_tag_set.cmd_size =
3512 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
3513 ctrl->lport->ops->fcprqst_priv_sz);
3514 ctrl->admin_tag_set.driver_data = ctrl;
3515 ctrl->admin_tag_set.nr_hw_queues = 1;
3516 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
3517 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
3519 ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
3521 goto out_free_queues;
3522 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
3524 ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3525 if (IS_ERR(ctrl->ctrl.fabrics_q)) {
3526 ret = PTR_ERR(ctrl->ctrl.fabrics_q);
3527 goto out_free_admin_tag_set;
3530 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3531 if (IS_ERR(ctrl->ctrl.admin_q)) {
3532 ret = PTR_ERR(ctrl->ctrl.admin_q);
3533 goto out_cleanup_fabrics_q;
3537 * Would have been nice to init io queues tag set as well.
3538 * However, we require interaction from the controller
3539 * for max io queue count before we can do so.
3540 * Defer this to the connect path.
3543 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
3545 goto out_cleanup_admin_q;
3547 /* at this point, teardown path changes to ref counting on nvme ctrl */
3549 spin_lock_irqsave(&rport->lock, flags);
3550 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
3551 spin_unlock_irqrestore(&rport->lock, flags);
3553 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) ||
3554 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
3555 dev_err(ctrl->ctrl.device,
3556 "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum);
3560 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
3561 dev_err(ctrl->ctrl.device,
3562 "NVME-FC{%d}: failed to schedule initial connect\n",
3567 flush_delayed_work(&ctrl->connect_work);
3569 dev_info(ctrl->ctrl.device,
3570 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
3571 ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
3576 nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
3577 cancel_work_sync(&ctrl->ctrl.reset_work);
3578 cancel_work_sync(&ctrl->err_work);
3579 cancel_delayed_work_sync(&ctrl->connect_work);
3581 ctrl->ctrl.opts = NULL;
3583 /* initiate nvme ctrl ref counting teardown */
3584 nvme_uninit_ctrl(&ctrl->ctrl);
3586 /* Remove core ctrl ref. */
3587 nvme_put_ctrl(&ctrl->ctrl);
3589 /* as we're past the point where we transition to the ref
3590 * counting teardown path, if we return a bad pointer here,
3591 * the calling routine, thinking it's prior to the
3592 * transition, will do an rport put. Since the teardown
3593 * path also does a rport put, we do an extra get here to
3594 * so proper order/teardown happens.
3596 nvme_fc_rport_get(rport);
3598 return ERR_PTR(-EIO);
3600 out_cleanup_admin_q:
3601 blk_cleanup_queue(ctrl->ctrl.admin_q);
3602 out_cleanup_fabrics_q:
3603 blk_cleanup_queue(ctrl->ctrl.fabrics_q);
3604 out_free_admin_tag_set:
3605 blk_mq_free_tag_set(&ctrl->admin_tag_set);
3607 kfree(ctrl->queues);
3609 put_device(ctrl->dev);
3610 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
3614 /* exit via here doesn't follow ctlr ref points */
3615 return ERR_PTR(ret);
3619 struct nvmet_fc_traddr {
3625 __nvme_fc_parse_u64(substring_t *sstr, u64 *val)
3629 if (match_u64(sstr, &token64))
3637 * This routine validates and extracts the WWN's from the TRADDR string.
3638 * As kernel parsers need the 0x to determine number base, universally
3639 * build string to parse with 0x prefix before parsing name strings.
3642 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
3644 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
3645 substring_t wwn = { name, &name[sizeof(name)-1] };
3646 int nnoffset, pnoffset;
3648 /* validate if string is one of the 2 allowed formats */
3649 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
3650 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
3651 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
3652 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
3653 nnoffset = NVME_FC_TRADDR_OXNNLEN;
3654 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
3655 NVME_FC_TRADDR_OXNNLEN;
3656 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
3657 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
3658 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
3659 "pn-", NVME_FC_TRADDR_NNLEN))) {
3660 nnoffset = NVME_FC_TRADDR_NNLEN;
3661 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
3667 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
3669 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3670 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
3673 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3674 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
3680 pr_warn("%s: bad traddr string\n", __func__);
3684 static struct nvme_ctrl *
3685 nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
3687 struct nvme_fc_lport *lport;
3688 struct nvme_fc_rport *rport;
3689 struct nvme_ctrl *ctrl;
3690 struct nvmet_fc_traddr laddr = { 0L, 0L };
3691 struct nvmet_fc_traddr raddr = { 0L, 0L };
3692 unsigned long flags;
3695 ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE);
3696 if (ret || !raddr.nn || !raddr.pn)
3697 return ERR_PTR(-EINVAL);
3699 ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE);
3700 if (ret || !laddr.nn || !laddr.pn)
3701 return ERR_PTR(-EINVAL);
3703 /* find the host and remote ports to connect together */
3704 spin_lock_irqsave(&nvme_fc_lock, flags);
3705 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3706 if (lport->localport.node_name != laddr.nn ||
3707 lport->localport.port_name != laddr.pn ||
3708 lport->localport.port_state != FC_OBJSTATE_ONLINE)
3711 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3712 if (rport->remoteport.node_name != raddr.nn ||
3713 rport->remoteport.port_name != raddr.pn ||
3714 rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
3717 /* if fail to get reference fall through. Will error */
3718 if (!nvme_fc_rport_get(rport))
3721 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3723 ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport);
3725 nvme_fc_rport_put(rport);
3729 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3731 pr_warn("%s: %s - %s combination not found\n",
3732 __func__, opts->traddr, opts->host_traddr);
3733 return ERR_PTR(-ENOENT);
3737 static struct nvmf_transport_ops nvme_fc_transport = {
3739 .module = THIS_MODULE,
3740 .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
3741 .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
3742 .create_ctrl = nvme_fc_create_ctrl,
3745 /* Arbitrary successive failures max. With lots of subsystems could be high */
3746 #define DISCOVERY_MAX_FAIL 20
3748 static ssize_t nvme_fc_nvme_discovery_store(struct device *dev,
3749 struct device_attribute *attr, const char *buf, size_t count)
3751 unsigned long flags;
3752 LIST_HEAD(local_disc_list);
3753 struct nvme_fc_lport *lport;
3754 struct nvme_fc_rport *rport;
3757 spin_lock_irqsave(&nvme_fc_lock, flags);
3759 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3760 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3761 if (!nvme_fc_lport_get(lport))
3763 if (!nvme_fc_rport_get(rport)) {
3765 * This is a temporary condition. Upon restart
3766 * this rport will be gone from the list.
3768 * Revert the lport put and retry. Anything
3769 * added to the list already will be skipped (as
3770 * they are no longer list_empty). Loops should
3771 * resume at rports that were not yet seen.
3773 nvme_fc_lport_put(lport);
3775 if (failcnt++ < DISCOVERY_MAX_FAIL)
3778 pr_err("nvme_discovery: too many reference "
3780 goto process_local_list;
3782 if (list_empty(&rport->disc_list))
3783 list_add_tail(&rport->disc_list,
3789 while (!list_empty(&local_disc_list)) {
3790 rport = list_first_entry(&local_disc_list,
3791 struct nvme_fc_rport, disc_list);
3792 list_del_init(&rport->disc_list);
3793 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3795 lport = rport->lport;
3796 /* signal discovery. Won't hurt if it repeats */
3797 nvme_fc_signal_discovery_scan(lport, rport);
3798 nvme_fc_rport_put(rport);
3799 nvme_fc_lport_put(lport);
3801 spin_lock_irqsave(&nvme_fc_lock, flags);
3803 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3807 static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store);
3809 static struct attribute *nvme_fc_attrs[] = {
3810 &dev_attr_nvme_discovery.attr,
3814 static struct attribute_group nvme_fc_attr_group = {
3815 .attrs = nvme_fc_attrs,
3818 static const struct attribute_group *nvme_fc_attr_groups[] = {
3819 &nvme_fc_attr_group,
3823 static struct class fc_class = {
3825 .dev_groups = nvme_fc_attr_groups,
3826 .owner = THIS_MODULE,
3829 static int __init nvme_fc_init_module(void)
3833 nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
3839 * It is expected that in the future the kernel will combine
3840 * the FC-isms that are currently under scsi and now being
3841 * added to by NVME into a new standalone FC class. The SCSI
3842 * and NVME protocols and their devices would be under this
3845 * As we need something to post FC-specific udev events to,
3846 * specifically for nvme probe events, start by creating the
3847 * new device class. When the new standalone FC class is
3848 * put in place, this code will move to a more generic
3849 * location for the class.
3851 ret = class_register(&fc_class);
3853 pr_err("couldn't register class fc\n");
3854 goto out_destroy_wq;
3858 * Create a device for the FC-centric udev events
3860 fc_udev_device = device_create(&fc_class, NULL, MKDEV(0, 0), NULL,
3862 if (IS_ERR(fc_udev_device)) {
3863 pr_err("couldn't create fc_udev device!\n");
3864 ret = PTR_ERR(fc_udev_device);
3865 goto out_destroy_class;
3868 ret = nvmf_register_transport(&nvme_fc_transport);
3870 goto out_destroy_device;
3875 device_destroy(&fc_class, MKDEV(0, 0));
3877 class_unregister(&fc_class);
3879 destroy_workqueue(nvme_fc_wq);
3885 nvme_fc_delete_controllers(struct nvme_fc_rport *rport)
3887 struct nvme_fc_ctrl *ctrl;
3889 spin_lock(&rport->lock);
3890 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
3891 dev_warn(ctrl->ctrl.device,
3892 "NVME-FC{%d}: transport unloading: deleting ctrl\n",
3894 nvme_delete_ctrl(&ctrl->ctrl);
3896 spin_unlock(&rport->lock);
3900 nvme_fc_cleanup_for_unload(void)
3902 struct nvme_fc_lport *lport;
3903 struct nvme_fc_rport *rport;
3905 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3906 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3907 nvme_fc_delete_controllers(rport);
3912 static void __exit nvme_fc_exit_module(void)
3914 unsigned long flags;
3915 bool need_cleanup = false;
3917 spin_lock_irqsave(&nvme_fc_lock, flags);
3918 nvme_fc_waiting_to_unload = true;
3919 if (!list_empty(&nvme_fc_lport_list)) {
3920 need_cleanup = true;
3921 nvme_fc_cleanup_for_unload();
3923 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3925 pr_info("%s: waiting for ctlr deletes\n", __func__);
3926 wait_for_completion(&nvme_fc_unload_proceed);
3927 pr_info("%s: ctrl deletes complete\n", __func__);
3930 nvmf_unregister_transport(&nvme_fc_transport);
3932 ida_destroy(&nvme_fc_local_port_cnt);
3933 ida_destroy(&nvme_fc_ctrl_cnt);
3935 device_destroy(&fc_class, MKDEV(0, 0));
3936 class_unregister(&fc_class);
3937 destroy_workqueue(nvme_fc_wq);
3940 module_init(nvme_fc_init_module);
3941 module_exit(nvme_fc_exit_module);
3943 MODULE_LICENSE("GPL v2");