nds32: fix build error "relocation truncated to fit: R_NDS32_25_PCREL_RELA" when
[linux-2.6-microblaze.git] / drivers / nvme / host / fc.c
1 /*
2  * Copyright (c) 2016 Avago Technologies.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful.
9  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10  * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11  * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12  * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13  * See the GNU General Public License for more details, a copy of which
14  * can be found in the file COPYING included with this package
15  *
16  */
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/module.h>
19 #include <linux/parser.h>
20 #include <uapi/scsi/fc/fc_fs.h>
21 #include <uapi/scsi/fc/fc_els.h>
22 #include <linux/delay.h>
23
24 #include "nvme.h"
25 #include "fabrics.h"
26 #include <linux/nvme-fc-driver.h>
27 #include <linux/nvme-fc.h>
28
29
30 /* *************************** Data Structures/Defines ****************** */
31
32
33 enum nvme_fc_queue_flags {
34         NVME_FC_Q_CONNECTED = 0,
35         NVME_FC_Q_LIVE,
36 };
37
38 #define NVME_FC_DEFAULT_DEV_LOSS_TMO    60      /* seconds */
39
40 struct nvme_fc_queue {
41         struct nvme_fc_ctrl     *ctrl;
42         struct device           *dev;
43         struct blk_mq_hw_ctx    *hctx;
44         void                    *lldd_handle;
45         size_t                  cmnd_capsule_len;
46         u32                     qnum;
47         u32                     rqcnt;
48         u32                     seqno;
49
50         u64                     connection_id;
51         atomic_t                csn;
52
53         unsigned long           flags;
54 } __aligned(sizeof(u64));       /* alignment for other things alloc'd with */
55
56 enum nvme_fcop_flags {
57         FCOP_FLAGS_TERMIO       = (1 << 0),
58         FCOP_FLAGS_AEN          = (1 << 1),
59 };
60
61 struct nvmefc_ls_req_op {
62         struct nvmefc_ls_req    ls_req;
63
64         struct nvme_fc_rport    *rport;
65         struct nvme_fc_queue    *queue;
66         struct request          *rq;
67         u32                     flags;
68
69         int                     ls_error;
70         struct completion       ls_done;
71         struct list_head        lsreq_list;     /* rport->ls_req_list */
72         bool                    req_queued;
73 };
74
75 enum nvme_fcpop_state {
76         FCPOP_STATE_UNINIT      = 0,
77         FCPOP_STATE_IDLE        = 1,
78         FCPOP_STATE_ACTIVE      = 2,
79         FCPOP_STATE_ABORTED     = 3,
80         FCPOP_STATE_COMPLETE    = 4,
81 };
82
83 struct nvme_fc_fcp_op {
84         struct nvme_request     nreq;           /*
85                                                  * nvme/host/core.c
86                                                  * requires this to be
87                                                  * the 1st element in the
88                                                  * private structure
89                                                  * associated with the
90                                                  * request.
91                                                  */
92         struct nvmefc_fcp_req   fcp_req;
93
94         struct nvme_fc_ctrl     *ctrl;
95         struct nvme_fc_queue    *queue;
96         struct request          *rq;
97
98         atomic_t                state;
99         u32                     flags;
100         u32                     rqno;
101         u32                     nents;
102
103         struct nvme_fc_cmd_iu   cmd_iu;
104         struct nvme_fc_ersp_iu  rsp_iu;
105 };
106
107 struct nvme_fc_lport {
108         struct nvme_fc_local_port       localport;
109
110         struct ida                      endp_cnt;
111         struct list_head                port_list;      /* nvme_fc_port_list */
112         struct list_head                endp_list;
113         struct device                   *dev;   /* physical device for dma */
114         struct nvme_fc_port_template    *ops;
115         struct kref                     ref;
116         atomic_t                        act_rport_cnt;
117 } __aligned(sizeof(u64));       /* alignment for other things alloc'd with */
118
119 struct nvme_fc_rport {
120         struct nvme_fc_remote_port      remoteport;
121
122         struct list_head                endp_list; /* for lport->endp_list */
123         struct list_head                ctrl_list;
124         struct list_head                ls_req_list;
125         struct device                   *dev;   /* physical device for dma */
126         struct nvme_fc_lport            *lport;
127         spinlock_t                      lock;
128         struct kref                     ref;
129         atomic_t                        act_ctrl_cnt;
130         unsigned long                   dev_loss_end;
131 } __aligned(sizeof(u64));       /* alignment for other things alloc'd with */
132
133 enum nvme_fcctrl_flags {
134         FCCTRL_TERMIO           = (1 << 0),
135 };
136
137 struct nvme_fc_ctrl {
138         spinlock_t              lock;
139         struct nvme_fc_queue    *queues;
140         struct device           *dev;
141         struct nvme_fc_lport    *lport;
142         struct nvme_fc_rport    *rport;
143         u32                     cnum;
144
145         bool                    ioq_live;
146         bool                    assoc_active;
147         u64                     association_id;
148
149         struct list_head        ctrl_list;      /* rport->ctrl_list */
150
151         struct blk_mq_tag_set   admin_tag_set;
152         struct blk_mq_tag_set   tag_set;
153
154         struct delayed_work     connect_work;
155
156         struct kref             ref;
157         u32                     flags;
158         u32                     iocnt;
159         wait_queue_head_t       ioabort_wait;
160
161         struct nvme_fc_fcp_op   aen_ops[NVME_NR_AEN_COMMANDS];
162
163         struct nvme_ctrl        ctrl;
164 };
165
166 static inline struct nvme_fc_ctrl *
167 to_fc_ctrl(struct nvme_ctrl *ctrl)
168 {
169         return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
170 }
171
172 static inline struct nvme_fc_lport *
173 localport_to_lport(struct nvme_fc_local_port *portptr)
174 {
175         return container_of(portptr, struct nvme_fc_lport, localport);
176 }
177
178 static inline struct nvme_fc_rport *
179 remoteport_to_rport(struct nvme_fc_remote_port *portptr)
180 {
181         return container_of(portptr, struct nvme_fc_rport, remoteport);
182 }
183
184 static inline struct nvmefc_ls_req_op *
185 ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
186 {
187         return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
188 }
189
190 static inline struct nvme_fc_fcp_op *
191 fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
192 {
193         return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
194 }
195
196
197
198 /* *************************** Globals **************************** */
199
200
201 static DEFINE_SPINLOCK(nvme_fc_lock);
202
203 static LIST_HEAD(nvme_fc_lport_list);
204 static DEFINE_IDA(nvme_fc_local_port_cnt);
205 static DEFINE_IDA(nvme_fc_ctrl_cnt);
206
207
208
209 /*
210  * These items are short-term. They will eventually be moved into
211  * a generic FC class. See comments in module init.
212  */
213 static struct class *fc_class;
214 static struct device *fc_udev_device;
215
216
217 /* *********************** FC-NVME Port Management ************************ */
218
219 static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
220                         struct nvme_fc_queue *, unsigned int);
221
222 static void
223 nvme_fc_free_lport(struct kref *ref)
224 {
225         struct nvme_fc_lport *lport =
226                 container_of(ref, struct nvme_fc_lport, ref);
227         unsigned long flags;
228
229         WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
230         WARN_ON(!list_empty(&lport->endp_list));
231
232         /* remove from transport list */
233         spin_lock_irqsave(&nvme_fc_lock, flags);
234         list_del(&lport->port_list);
235         spin_unlock_irqrestore(&nvme_fc_lock, flags);
236
237         ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
238         ida_destroy(&lport->endp_cnt);
239
240         put_device(lport->dev);
241
242         kfree(lport);
243 }
244
245 static void
246 nvme_fc_lport_put(struct nvme_fc_lport *lport)
247 {
248         kref_put(&lport->ref, nvme_fc_free_lport);
249 }
250
251 static int
252 nvme_fc_lport_get(struct nvme_fc_lport *lport)
253 {
254         return kref_get_unless_zero(&lport->ref);
255 }
256
257
258 static struct nvme_fc_lport *
259 nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo,
260                         struct nvme_fc_port_template *ops,
261                         struct device *dev)
262 {
263         struct nvme_fc_lport *lport;
264         unsigned long flags;
265
266         spin_lock_irqsave(&nvme_fc_lock, flags);
267
268         list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
269                 if (lport->localport.node_name != pinfo->node_name ||
270                     lport->localport.port_name != pinfo->port_name)
271                         continue;
272
273                 if (lport->dev != dev) {
274                         lport = ERR_PTR(-EXDEV);
275                         goto out_done;
276                 }
277
278                 if (lport->localport.port_state != FC_OBJSTATE_DELETED) {
279                         lport = ERR_PTR(-EEXIST);
280                         goto out_done;
281                 }
282
283                 if (!nvme_fc_lport_get(lport)) {
284                         /*
285                          * fails if ref cnt already 0. If so,
286                          * act as if lport already deleted
287                          */
288                         lport = NULL;
289                         goto out_done;
290                 }
291
292                 /* resume the lport */
293
294                 lport->ops = ops;
295                 lport->localport.port_role = pinfo->port_role;
296                 lport->localport.port_id = pinfo->port_id;
297                 lport->localport.port_state = FC_OBJSTATE_ONLINE;
298
299                 spin_unlock_irqrestore(&nvme_fc_lock, flags);
300
301                 return lport;
302         }
303
304         lport = NULL;
305
306 out_done:
307         spin_unlock_irqrestore(&nvme_fc_lock, flags);
308
309         return lport;
310 }
311
312 /**
313  * nvme_fc_register_localport - transport entry point called by an
314  *                              LLDD to register the existence of a NVME
315  *                              host FC port.
316  * @pinfo:     pointer to information about the port to be registered
317  * @template:  LLDD entrypoints and operational parameters for the port
318  * @dev:       physical hardware device node port corresponds to. Will be
319  *             used for DMA mappings
320  * @lport_p:   pointer to a local port pointer. Upon success, the routine
321  *             will allocate a nvme_fc_local_port structure and place its
322  *             address in the local port pointer. Upon failure, local port
323  *             pointer will be set to 0.
324  *
325  * Returns:
326  * a completion status. Must be 0 upon success; a negative errno
327  * (ex: -ENXIO) upon failure.
328  */
329 int
330 nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
331                         struct nvme_fc_port_template *template,
332                         struct device *dev,
333                         struct nvme_fc_local_port **portptr)
334 {
335         struct nvme_fc_lport *newrec;
336         unsigned long flags;
337         int ret, idx;
338
339         if (!template->localport_delete || !template->remoteport_delete ||
340             !template->ls_req || !template->fcp_io ||
341             !template->ls_abort || !template->fcp_abort ||
342             !template->max_hw_queues || !template->max_sgl_segments ||
343             !template->max_dif_sgl_segments || !template->dma_boundary) {
344                 ret = -EINVAL;
345                 goto out_reghost_failed;
346         }
347
348         /*
349          * look to see if there is already a localport that had been
350          * deregistered and in the process of waiting for all the
351          * references to fully be removed.  If the references haven't
352          * expired, we can simply re-enable the localport. Remoteports
353          * and controller reconnections should resume naturally.
354          */
355         newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev);
356
357         /* found an lport, but something about its state is bad */
358         if (IS_ERR(newrec)) {
359                 ret = PTR_ERR(newrec);
360                 goto out_reghost_failed;
361
362         /* found existing lport, which was resumed */
363         } else if (newrec) {
364                 *portptr = &newrec->localport;
365                 return 0;
366         }
367
368         /* nothing found - allocate a new localport struct */
369
370         newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
371                          GFP_KERNEL);
372         if (!newrec) {
373                 ret = -ENOMEM;
374                 goto out_reghost_failed;
375         }
376
377         idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
378         if (idx < 0) {
379                 ret = -ENOSPC;
380                 goto out_fail_kfree;
381         }
382
383         if (!get_device(dev) && dev) {
384                 ret = -ENODEV;
385                 goto out_ida_put;
386         }
387
388         INIT_LIST_HEAD(&newrec->port_list);
389         INIT_LIST_HEAD(&newrec->endp_list);
390         kref_init(&newrec->ref);
391         atomic_set(&newrec->act_rport_cnt, 0);
392         newrec->ops = template;
393         newrec->dev = dev;
394         ida_init(&newrec->endp_cnt);
395         newrec->localport.private = &newrec[1];
396         newrec->localport.node_name = pinfo->node_name;
397         newrec->localport.port_name = pinfo->port_name;
398         newrec->localport.port_role = pinfo->port_role;
399         newrec->localport.port_id = pinfo->port_id;
400         newrec->localport.port_state = FC_OBJSTATE_ONLINE;
401         newrec->localport.port_num = idx;
402
403         spin_lock_irqsave(&nvme_fc_lock, flags);
404         list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
405         spin_unlock_irqrestore(&nvme_fc_lock, flags);
406
407         if (dev)
408                 dma_set_seg_boundary(dev, template->dma_boundary);
409
410         *portptr = &newrec->localport;
411         return 0;
412
413 out_ida_put:
414         ida_simple_remove(&nvme_fc_local_port_cnt, idx);
415 out_fail_kfree:
416         kfree(newrec);
417 out_reghost_failed:
418         *portptr = NULL;
419
420         return ret;
421 }
422 EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
423
424 /**
425  * nvme_fc_unregister_localport - transport entry point called by an
426  *                              LLDD to deregister/remove a previously
427  *                              registered a NVME host FC port.
428  * @localport: pointer to the (registered) local port that is to be
429  *             deregistered.
430  *
431  * Returns:
432  * a completion status. Must be 0 upon success; a negative errno
433  * (ex: -ENXIO) upon failure.
434  */
435 int
436 nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
437 {
438         struct nvme_fc_lport *lport = localport_to_lport(portptr);
439         unsigned long flags;
440
441         if (!portptr)
442                 return -EINVAL;
443
444         spin_lock_irqsave(&nvme_fc_lock, flags);
445
446         if (portptr->port_state != FC_OBJSTATE_ONLINE) {
447                 spin_unlock_irqrestore(&nvme_fc_lock, flags);
448                 return -EINVAL;
449         }
450         portptr->port_state = FC_OBJSTATE_DELETED;
451
452         spin_unlock_irqrestore(&nvme_fc_lock, flags);
453
454         if (atomic_read(&lport->act_rport_cnt) == 0)
455                 lport->ops->localport_delete(&lport->localport);
456
457         nvme_fc_lport_put(lport);
458
459         return 0;
460 }
461 EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
462
463 /*
464  * TRADDR strings, per FC-NVME are fixed format:
465  *   "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters
466  * udev event will only differ by prefix of what field is
467  * being specified:
468  *    "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters
469  *  19 + 43 + null_fudge = 64 characters
470  */
471 #define FCNVME_TRADDR_LENGTH            64
472
473 static void
474 nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport,
475                 struct nvme_fc_rport *rport)
476 {
477         char hostaddr[FCNVME_TRADDR_LENGTH];    /* NVMEFC_HOST_TRADDR=...*/
478         char tgtaddr[FCNVME_TRADDR_LENGTH];     /* NVMEFC_TRADDR=...*/
479         char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL };
480
481         if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY))
482                 return;
483
484         snprintf(hostaddr, sizeof(hostaddr),
485                 "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx",
486                 lport->localport.node_name, lport->localport.port_name);
487         snprintf(tgtaddr, sizeof(tgtaddr),
488                 "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx",
489                 rport->remoteport.node_name, rport->remoteport.port_name);
490         kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp);
491 }
492
493 static void
494 nvme_fc_free_rport(struct kref *ref)
495 {
496         struct nvme_fc_rport *rport =
497                 container_of(ref, struct nvme_fc_rport, ref);
498         struct nvme_fc_lport *lport =
499                         localport_to_lport(rport->remoteport.localport);
500         unsigned long flags;
501
502         WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
503         WARN_ON(!list_empty(&rport->ctrl_list));
504
505         /* remove from lport list */
506         spin_lock_irqsave(&nvme_fc_lock, flags);
507         list_del(&rport->endp_list);
508         spin_unlock_irqrestore(&nvme_fc_lock, flags);
509
510         ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
511
512         kfree(rport);
513
514         nvme_fc_lport_put(lport);
515 }
516
517 static void
518 nvme_fc_rport_put(struct nvme_fc_rport *rport)
519 {
520         kref_put(&rport->ref, nvme_fc_free_rport);
521 }
522
523 static int
524 nvme_fc_rport_get(struct nvme_fc_rport *rport)
525 {
526         return kref_get_unless_zero(&rport->ref);
527 }
528
529 static void
530 nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
531 {
532         switch (ctrl->ctrl.state) {
533         case NVME_CTRL_NEW:
534         case NVME_CTRL_CONNECTING:
535                 /*
536                  * As all reconnects were suppressed, schedule a
537                  * connect.
538                  */
539                 dev_info(ctrl->ctrl.device,
540                         "NVME-FC{%d}: connectivity re-established. "
541                         "Attempting reconnect\n", ctrl->cnum);
542
543                 queue_delayed_work(nvme_wq, &ctrl->connect_work, 0);
544                 break;
545
546         case NVME_CTRL_RESETTING:
547                 /*
548                  * Controller is already in the process of terminating the
549                  * association. No need to do anything further. The reconnect
550                  * step will naturally occur after the reset completes.
551                  */
552                 break;
553
554         default:
555                 /* no action to take - let it delete */
556                 break;
557         }
558 }
559
560 static struct nvme_fc_rport *
561 nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport,
562                                 struct nvme_fc_port_info *pinfo)
563 {
564         struct nvme_fc_rport *rport;
565         struct nvme_fc_ctrl *ctrl;
566         unsigned long flags;
567
568         spin_lock_irqsave(&nvme_fc_lock, flags);
569
570         list_for_each_entry(rport, &lport->endp_list, endp_list) {
571                 if (rport->remoteport.node_name != pinfo->node_name ||
572                     rport->remoteport.port_name != pinfo->port_name)
573                         continue;
574
575                 if (!nvme_fc_rport_get(rport)) {
576                         rport = ERR_PTR(-ENOLCK);
577                         goto out_done;
578                 }
579
580                 spin_unlock_irqrestore(&nvme_fc_lock, flags);
581
582                 spin_lock_irqsave(&rport->lock, flags);
583
584                 /* has it been unregistered */
585                 if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) {
586                         /* means lldd called us twice */
587                         spin_unlock_irqrestore(&rport->lock, flags);
588                         nvme_fc_rport_put(rport);
589                         return ERR_PTR(-ESTALE);
590                 }
591
592                 rport->remoteport.port_role = pinfo->port_role;
593                 rport->remoteport.port_id = pinfo->port_id;
594                 rport->remoteport.port_state = FC_OBJSTATE_ONLINE;
595                 rport->dev_loss_end = 0;
596
597                 /*
598                  * kick off a reconnect attempt on all associations to the
599                  * remote port. A successful reconnects will resume i/o.
600                  */
601                 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
602                         nvme_fc_resume_controller(ctrl);
603
604                 spin_unlock_irqrestore(&rport->lock, flags);
605
606                 return rport;
607         }
608
609         rport = NULL;
610
611 out_done:
612         spin_unlock_irqrestore(&nvme_fc_lock, flags);
613
614         return rport;
615 }
616
617 static inline void
618 __nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport,
619                         struct nvme_fc_port_info *pinfo)
620 {
621         if (pinfo->dev_loss_tmo)
622                 rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo;
623         else
624                 rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO;
625 }
626
627 /**
628  * nvme_fc_register_remoteport - transport entry point called by an
629  *                              LLDD to register the existence of a NVME
630  *                              subsystem FC port on its fabric.
631  * @localport: pointer to the (registered) local port that the remote
632  *             subsystem port is connected to.
633  * @pinfo:     pointer to information about the port to be registered
634  * @rport_p:   pointer to a remote port pointer. Upon success, the routine
635  *             will allocate a nvme_fc_remote_port structure and place its
636  *             address in the remote port pointer. Upon failure, remote port
637  *             pointer will be set to 0.
638  *
639  * Returns:
640  * a completion status. Must be 0 upon success; a negative errno
641  * (ex: -ENXIO) upon failure.
642  */
643 int
644 nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
645                                 struct nvme_fc_port_info *pinfo,
646                                 struct nvme_fc_remote_port **portptr)
647 {
648         struct nvme_fc_lport *lport = localport_to_lport(localport);
649         struct nvme_fc_rport *newrec;
650         unsigned long flags;
651         int ret, idx;
652
653         if (!nvme_fc_lport_get(lport)) {
654                 ret = -ESHUTDOWN;
655                 goto out_reghost_failed;
656         }
657
658         /*
659          * look to see if there is already a remoteport that is waiting
660          * for a reconnect (within dev_loss_tmo) with the same WWN's.
661          * If so, transition to it and reconnect.
662          */
663         newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo);
664
665         /* found an rport, but something about its state is bad */
666         if (IS_ERR(newrec)) {
667                 ret = PTR_ERR(newrec);
668                 goto out_lport_put;
669
670         /* found existing rport, which was resumed */
671         } else if (newrec) {
672                 nvme_fc_lport_put(lport);
673                 __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
674                 nvme_fc_signal_discovery_scan(lport, newrec);
675                 *portptr = &newrec->remoteport;
676                 return 0;
677         }
678
679         /* nothing found - allocate a new remoteport struct */
680
681         newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
682                          GFP_KERNEL);
683         if (!newrec) {
684                 ret = -ENOMEM;
685                 goto out_lport_put;
686         }
687
688         idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
689         if (idx < 0) {
690                 ret = -ENOSPC;
691                 goto out_kfree_rport;
692         }
693
694         INIT_LIST_HEAD(&newrec->endp_list);
695         INIT_LIST_HEAD(&newrec->ctrl_list);
696         INIT_LIST_HEAD(&newrec->ls_req_list);
697         kref_init(&newrec->ref);
698         atomic_set(&newrec->act_ctrl_cnt, 0);
699         spin_lock_init(&newrec->lock);
700         newrec->remoteport.localport = &lport->localport;
701         newrec->dev = lport->dev;
702         newrec->lport = lport;
703         newrec->remoteport.private = &newrec[1];
704         newrec->remoteport.port_role = pinfo->port_role;
705         newrec->remoteport.node_name = pinfo->node_name;
706         newrec->remoteport.port_name = pinfo->port_name;
707         newrec->remoteport.port_id = pinfo->port_id;
708         newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
709         newrec->remoteport.port_num = idx;
710         __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
711
712         spin_lock_irqsave(&nvme_fc_lock, flags);
713         list_add_tail(&newrec->endp_list, &lport->endp_list);
714         spin_unlock_irqrestore(&nvme_fc_lock, flags);
715
716         nvme_fc_signal_discovery_scan(lport, newrec);
717
718         *portptr = &newrec->remoteport;
719         return 0;
720
721 out_kfree_rport:
722         kfree(newrec);
723 out_lport_put:
724         nvme_fc_lport_put(lport);
725 out_reghost_failed:
726         *portptr = NULL;
727         return ret;
728 }
729 EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
730
731 static int
732 nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
733 {
734         struct nvmefc_ls_req_op *lsop;
735         unsigned long flags;
736
737 restart:
738         spin_lock_irqsave(&rport->lock, flags);
739
740         list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) {
741                 if (!(lsop->flags & FCOP_FLAGS_TERMIO)) {
742                         lsop->flags |= FCOP_FLAGS_TERMIO;
743                         spin_unlock_irqrestore(&rport->lock, flags);
744                         rport->lport->ops->ls_abort(&rport->lport->localport,
745                                                 &rport->remoteport,
746                                                 &lsop->ls_req);
747                         goto restart;
748                 }
749         }
750         spin_unlock_irqrestore(&rport->lock, flags);
751
752         return 0;
753 }
754
755 static void
756 nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
757 {
758         dev_info(ctrl->ctrl.device,
759                 "NVME-FC{%d}: controller connectivity lost. Awaiting "
760                 "Reconnect", ctrl->cnum);
761
762         switch (ctrl->ctrl.state) {
763         case NVME_CTRL_NEW:
764         case NVME_CTRL_LIVE:
765                 /*
766                  * Schedule a controller reset. The reset will terminate the
767                  * association and schedule the reconnect timer.  Reconnects
768                  * will be attempted until either the ctlr_loss_tmo
769                  * (max_retries * connect_delay) expires or the remoteport's
770                  * dev_loss_tmo expires.
771                  */
772                 if (nvme_reset_ctrl(&ctrl->ctrl)) {
773                         dev_warn(ctrl->ctrl.device,
774                                 "NVME-FC{%d}: Couldn't schedule reset.\n",
775                                 ctrl->cnum);
776                         nvme_delete_ctrl(&ctrl->ctrl);
777                 }
778                 break;
779
780         case NVME_CTRL_CONNECTING:
781                 /*
782                  * The association has already been terminated and the
783                  * controller is attempting reconnects.  No need to do anything
784                  * futher.  Reconnects will be attempted until either the
785                  * ctlr_loss_tmo (max_retries * connect_delay) expires or the
786                  * remoteport's dev_loss_tmo expires.
787                  */
788                 break;
789
790         case NVME_CTRL_RESETTING:
791                 /*
792                  * Controller is already in the process of terminating the
793                  * association.  No need to do anything further. The reconnect
794                  * step will kick in naturally after the association is
795                  * terminated.
796                  */
797                 break;
798
799         case NVME_CTRL_DELETING:
800         default:
801                 /* no action to take - let it delete */
802                 break;
803         }
804 }
805
806 /**
807  * nvme_fc_unregister_remoteport - transport entry point called by an
808  *                              LLDD to deregister/remove a previously
809  *                              registered a NVME subsystem FC port.
810  * @remoteport: pointer to the (registered) remote port that is to be
811  *              deregistered.
812  *
813  * Returns:
814  * a completion status. Must be 0 upon success; a negative errno
815  * (ex: -ENXIO) upon failure.
816  */
817 int
818 nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
819 {
820         struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
821         struct nvme_fc_ctrl *ctrl;
822         unsigned long flags;
823
824         if (!portptr)
825                 return -EINVAL;
826
827         spin_lock_irqsave(&rport->lock, flags);
828
829         if (portptr->port_state != FC_OBJSTATE_ONLINE) {
830                 spin_unlock_irqrestore(&rport->lock, flags);
831                 return -EINVAL;
832         }
833         portptr->port_state = FC_OBJSTATE_DELETED;
834
835         rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ);
836
837         list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
838                 /* if dev_loss_tmo==0, dev loss is immediate */
839                 if (!portptr->dev_loss_tmo) {
840                         dev_warn(ctrl->ctrl.device,
841                                 "NVME-FC{%d}: controller connectivity lost.\n",
842                                 ctrl->cnum);
843                         nvme_delete_ctrl(&ctrl->ctrl);
844                 } else
845                         nvme_fc_ctrl_connectivity_loss(ctrl);
846         }
847
848         spin_unlock_irqrestore(&rport->lock, flags);
849
850         nvme_fc_abort_lsops(rport);
851
852         if (atomic_read(&rport->act_ctrl_cnt) == 0)
853                 rport->lport->ops->remoteport_delete(portptr);
854
855         /*
856          * release the reference, which will allow, if all controllers
857          * go away, which should only occur after dev_loss_tmo occurs,
858          * for the rport to be torn down.
859          */
860         nvme_fc_rport_put(rport);
861
862         return 0;
863 }
864 EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
865
866 /**
867  * nvme_fc_rescan_remoteport - transport entry point called by an
868  *                              LLDD to request a nvme device rescan.
869  * @remoteport: pointer to the (registered) remote port that is to be
870  *              rescanned.
871  *
872  * Returns: N/A
873  */
874 void
875 nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport)
876 {
877         struct nvme_fc_rport *rport = remoteport_to_rport(remoteport);
878
879         nvme_fc_signal_discovery_scan(rport->lport, rport);
880 }
881 EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport);
882
883 int
884 nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr,
885                         u32 dev_loss_tmo)
886 {
887         struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
888         unsigned long flags;
889
890         spin_lock_irqsave(&rport->lock, flags);
891
892         if (portptr->port_state != FC_OBJSTATE_ONLINE) {
893                 spin_unlock_irqrestore(&rport->lock, flags);
894                 return -EINVAL;
895         }
896
897         /* a dev_loss_tmo of 0 (immediate) is allowed to be set */
898         rport->remoteport.dev_loss_tmo = dev_loss_tmo;
899
900         spin_unlock_irqrestore(&rport->lock, flags);
901
902         return 0;
903 }
904 EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss);
905
906
907 /* *********************** FC-NVME DMA Handling **************************** */
908
909 /*
910  * The fcloop device passes in a NULL device pointer. Real LLD's will
911  * pass in a valid device pointer. If NULL is passed to the dma mapping
912  * routines, depending on the platform, it may or may not succeed, and
913  * may crash.
914  *
915  * As such:
916  * Wrapper all the dma routines and check the dev pointer.
917  *
918  * If simple mappings (return just a dma address, we'll noop them,
919  * returning a dma address of 0.
920  *
921  * On more complex mappings (dma_map_sg), a pseudo routine fills
922  * in the scatter list, setting all dma addresses to 0.
923  */
924
925 static inline dma_addr_t
926 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
927                 enum dma_data_direction dir)
928 {
929         return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
930 }
931
932 static inline int
933 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
934 {
935         return dev ? dma_mapping_error(dev, dma_addr) : 0;
936 }
937
938 static inline void
939 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
940         enum dma_data_direction dir)
941 {
942         if (dev)
943                 dma_unmap_single(dev, addr, size, dir);
944 }
945
946 static inline void
947 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
948                 enum dma_data_direction dir)
949 {
950         if (dev)
951                 dma_sync_single_for_cpu(dev, addr, size, dir);
952 }
953
954 static inline void
955 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
956                 enum dma_data_direction dir)
957 {
958         if (dev)
959                 dma_sync_single_for_device(dev, addr, size, dir);
960 }
961
962 /* pseudo dma_map_sg call */
963 static int
964 fc_map_sg(struct scatterlist *sg, int nents)
965 {
966         struct scatterlist *s;
967         int i;
968
969         WARN_ON(nents == 0 || sg[0].length == 0);
970
971         for_each_sg(sg, s, nents, i) {
972                 s->dma_address = 0L;
973 #ifdef CONFIG_NEED_SG_DMA_LENGTH
974                 s->dma_length = s->length;
975 #endif
976         }
977         return nents;
978 }
979
980 static inline int
981 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
982                 enum dma_data_direction dir)
983 {
984         return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
985 }
986
987 static inline void
988 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
989                 enum dma_data_direction dir)
990 {
991         if (dev)
992                 dma_unmap_sg(dev, sg, nents, dir);
993 }
994
995 /* *********************** FC-NVME LS Handling **************************** */
996
997 static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
998 static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
999
1000
1001 static void
1002 __nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
1003 {
1004         struct nvme_fc_rport *rport = lsop->rport;
1005         struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1006         unsigned long flags;
1007
1008         spin_lock_irqsave(&rport->lock, flags);
1009
1010         if (!lsop->req_queued) {
1011                 spin_unlock_irqrestore(&rport->lock, flags);
1012                 return;
1013         }
1014
1015         list_del(&lsop->lsreq_list);
1016
1017         lsop->req_queued = false;
1018
1019         spin_unlock_irqrestore(&rport->lock, flags);
1020
1021         fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1022                                   (lsreq->rqstlen + lsreq->rsplen),
1023                                   DMA_BIDIRECTIONAL);
1024
1025         nvme_fc_rport_put(rport);
1026 }
1027
1028 static int
1029 __nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
1030                 struct nvmefc_ls_req_op *lsop,
1031                 void (*done)(struct nvmefc_ls_req *req, int status))
1032 {
1033         struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1034         unsigned long flags;
1035         int ret = 0;
1036
1037         if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
1038                 return -ECONNREFUSED;
1039
1040         if (!nvme_fc_rport_get(rport))
1041                 return -ESHUTDOWN;
1042
1043         lsreq->done = done;
1044         lsop->rport = rport;
1045         lsop->req_queued = false;
1046         INIT_LIST_HEAD(&lsop->lsreq_list);
1047         init_completion(&lsop->ls_done);
1048
1049         lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr,
1050                                   lsreq->rqstlen + lsreq->rsplen,
1051                                   DMA_BIDIRECTIONAL);
1052         if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) {
1053                 ret = -EFAULT;
1054                 goto out_putrport;
1055         }
1056         lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
1057
1058         spin_lock_irqsave(&rport->lock, flags);
1059
1060         list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);
1061
1062         lsop->req_queued = true;
1063
1064         spin_unlock_irqrestore(&rport->lock, flags);
1065
1066         ret = rport->lport->ops->ls_req(&rport->lport->localport,
1067                                         &rport->remoteport, lsreq);
1068         if (ret)
1069                 goto out_unlink;
1070
1071         return 0;
1072
1073 out_unlink:
1074         lsop->ls_error = ret;
1075         spin_lock_irqsave(&rport->lock, flags);
1076         lsop->req_queued = false;
1077         list_del(&lsop->lsreq_list);
1078         spin_unlock_irqrestore(&rport->lock, flags);
1079         fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1080                                   (lsreq->rqstlen + lsreq->rsplen),
1081                                   DMA_BIDIRECTIONAL);
1082 out_putrport:
1083         nvme_fc_rport_put(rport);
1084
1085         return ret;
1086 }
1087
1088 static void
1089 nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
1090 {
1091         struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1092
1093         lsop->ls_error = status;
1094         complete(&lsop->ls_done);
1095 }
1096
1097 static int
1098 nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
1099 {
1100         struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1101         struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
1102         int ret;
1103
1104         ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);
1105
1106         if (!ret) {
1107                 /*
1108                  * No timeout/not interruptible as we need the struct
1109                  * to exist until the lldd calls us back. Thus mandate
1110                  * wait until driver calls back. lldd responsible for
1111                  * the timeout action
1112                  */
1113                 wait_for_completion(&lsop->ls_done);
1114
1115                 __nvme_fc_finish_ls_req(lsop);
1116
1117                 ret = lsop->ls_error;
1118         }
1119
1120         if (ret)
1121                 return ret;
1122
1123         /* ACC or RJT payload ? */
1124         if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
1125                 return -ENXIO;
1126
1127         return 0;
1128 }
1129
1130 static int
1131 nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
1132                 struct nvmefc_ls_req_op *lsop,
1133                 void (*done)(struct nvmefc_ls_req *req, int status))
1134 {
1135         /* don't wait for completion */
1136
1137         return __nvme_fc_send_ls_req(rport, lsop, done);
1138 }
1139
1140 /* Validation Error indexes into the string table below */
1141 enum {
1142         VERR_NO_ERROR           = 0,
1143         VERR_LSACC              = 1,
1144         VERR_LSDESC_RQST        = 2,
1145         VERR_LSDESC_RQST_LEN    = 3,
1146         VERR_ASSOC_ID           = 4,
1147         VERR_ASSOC_ID_LEN       = 5,
1148         VERR_CONN_ID            = 6,
1149         VERR_CONN_ID_LEN        = 7,
1150         VERR_CR_ASSOC           = 8,
1151         VERR_CR_ASSOC_ACC_LEN   = 9,
1152         VERR_CR_CONN            = 10,
1153         VERR_CR_CONN_ACC_LEN    = 11,
1154         VERR_DISCONN            = 12,
1155         VERR_DISCONN_ACC_LEN    = 13,
1156 };
1157
1158 static char *validation_errors[] = {
1159         "OK",
1160         "Not LS_ACC",
1161         "Not LSDESC_RQST",
1162         "Bad LSDESC_RQST Length",
1163         "Not Association ID",
1164         "Bad Association ID Length",
1165         "Not Connection ID",
1166         "Bad Connection ID Length",
1167         "Not CR_ASSOC Rqst",
1168         "Bad CR_ASSOC ACC Length",
1169         "Not CR_CONN Rqst",
1170         "Bad CR_CONN ACC Length",
1171         "Not Disconnect Rqst",
1172         "Bad Disconnect ACC Length",
1173 };
1174
1175 static int
1176 nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
1177         struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
1178 {
1179         struct nvmefc_ls_req_op *lsop;
1180         struct nvmefc_ls_req *lsreq;
1181         struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
1182         struct fcnvme_ls_cr_assoc_acc *assoc_acc;
1183         int ret, fcret = 0;
1184
1185         lsop = kzalloc((sizeof(*lsop) +
1186                          ctrl->lport->ops->lsrqst_priv_sz +
1187                          sizeof(*assoc_rqst) + sizeof(*assoc_acc)), GFP_KERNEL);
1188         if (!lsop) {
1189                 ret = -ENOMEM;
1190                 goto out_no_memory;
1191         }
1192         lsreq = &lsop->ls_req;
1193
1194         lsreq->private = (void *)&lsop[1];
1195         assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)
1196                         (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1197         assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
1198
1199         assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
1200         assoc_rqst->desc_list_len =
1201                         cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1202
1203         assoc_rqst->assoc_cmd.desc_tag =
1204                         cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
1205         assoc_rqst->assoc_cmd.desc_len =
1206                         fcnvme_lsdesc_len(
1207                                 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1208
1209         assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1210         assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1);
1211         /* Linux supports only Dynamic controllers */
1212         assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
1213         uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
1214         strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
1215                 min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
1216         strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
1217                 min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
1218
1219         lsop->queue = queue;
1220         lsreq->rqstaddr = assoc_rqst;
1221         lsreq->rqstlen = sizeof(*assoc_rqst);
1222         lsreq->rspaddr = assoc_acc;
1223         lsreq->rsplen = sizeof(*assoc_acc);
1224         lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1225
1226         ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1227         if (ret)
1228                 goto out_free_buffer;
1229
1230         /* process connect LS completion */
1231
1232         /* validate the ACC response */
1233         if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1234                 fcret = VERR_LSACC;
1235         else if (assoc_acc->hdr.desc_list_len !=
1236                         fcnvme_lsdesc_len(
1237                                 sizeof(struct fcnvme_ls_cr_assoc_acc)))
1238                 fcret = VERR_CR_ASSOC_ACC_LEN;
1239         else if (assoc_acc->hdr.rqst.desc_tag !=
1240                         cpu_to_be32(FCNVME_LSDESC_RQST))
1241                 fcret = VERR_LSDESC_RQST;
1242         else if (assoc_acc->hdr.rqst.desc_len !=
1243                         fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1244                 fcret = VERR_LSDESC_RQST_LEN;
1245         else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
1246                 fcret = VERR_CR_ASSOC;
1247         else if (assoc_acc->associd.desc_tag !=
1248                         cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1249                 fcret = VERR_ASSOC_ID;
1250         else if (assoc_acc->associd.desc_len !=
1251                         fcnvme_lsdesc_len(
1252                                 sizeof(struct fcnvme_lsdesc_assoc_id)))
1253                 fcret = VERR_ASSOC_ID_LEN;
1254         else if (assoc_acc->connectid.desc_tag !=
1255                         cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1256                 fcret = VERR_CONN_ID;
1257         else if (assoc_acc->connectid.desc_len !=
1258                         fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1259                 fcret = VERR_CONN_ID_LEN;
1260
1261         if (fcret) {
1262                 ret = -EBADF;
1263                 dev_err(ctrl->dev,
1264                         "q %d connect failed: %s\n",
1265                         queue->qnum, validation_errors[fcret]);
1266         } else {
1267                 ctrl->association_id =
1268                         be64_to_cpu(assoc_acc->associd.association_id);
1269                 queue->connection_id =
1270                         be64_to_cpu(assoc_acc->connectid.connection_id);
1271                 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1272         }
1273
1274 out_free_buffer:
1275         kfree(lsop);
1276 out_no_memory:
1277         if (ret)
1278                 dev_err(ctrl->dev,
1279                         "queue %d connect admin queue failed (%d).\n",
1280                         queue->qnum, ret);
1281         return ret;
1282 }
1283
1284 static int
1285 nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1286                         u16 qsize, u16 ersp_ratio)
1287 {
1288         struct nvmefc_ls_req_op *lsop;
1289         struct nvmefc_ls_req *lsreq;
1290         struct fcnvme_ls_cr_conn_rqst *conn_rqst;
1291         struct fcnvme_ls_cr_conn_acc *conn_acc;
1292         int ret, fcret = 0;
1293
1294         lsop = kzalloc((sizeof(*lsop) +
1295                          ctrl->lport->ops->lsrqst_priv_sz +
1296                          sizeof(*conn_rqst) + sizeof(*conn_acc)), GFP_KERNEL);
1297         if (!lsop) {
1298                 ret = -ENOMEM;
1299                 goto out_no_memory;
1300         }
1301         lsreq = &lsop->ls_req;
1302
1303         lsreq->private = (void *)&lsop[1];
1304         conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)
1305                         (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1306         conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
1307
1308         conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
1309         conn_rqst->desc_list_len = cpu_to_be32(
1310                                 sizeof(struct fcnvme_lsdesc_assoc_id) +
1311                                 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1312
1313         conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1314         conn_rqst->associd.desc_len =
1315                         fcnvme_lsdesc_len(
1316                                 sizeof(struct fcnvme_lsdesc_assoc_id));
1317         conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1318         conn_rqst->connect_cmd.desc_tag =
1319                         cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
1320         conn_rqst->connect_cmd.desc_len =
1321                         fcnvme_lsdesc_len(
1322                                 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1323         conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1324         conn_rqst->connect_cmd.qid  = cpu_to_be16(queue->qnum);
1325         conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1);
1326
1327         lsop->queue = queue;
1328         lsreq->rqstaddr = conn_rqst;
1329         lsreq->rqstlen = sizeof(*conn_rqst);
1330         lsreq->rspaddr = conn_acc;
1331         lsreq->rsplen = sizeof(*conn_acc);
1332         lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1333
1334         ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1335         if (ret)
1336                 goto out_free_buffer;
1337
1338         /* process connect LS completion */
1339
1340         /* validate the ACC response */
1341         if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1342                 fcret = VERR_LSACC;
1343         else if (conn_acc->hdr.desc_list_len !=
1344                         fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
1345                 fcret = VERR_CR_CONN_ACC_LEN;
1346         else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
1347                 fcret = VERR_LSDESC_RQST;
1348         else if (conn_acc->hdr.rqst.desc_len !=
1349                         fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1350                 fcret = VERR_LSDESC_RQST_LEN;
1351         else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
1352                 fcret = VERR_CR_CONN;
1353         else if (conn_acc->connectid.desc_tag !=
1354                         cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1355                 fcret = VERR_CONN_ID;
1356         else if (conn_acc->connectid.desc_len !=
1357                         fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1358                 fcret = VERR_CONN_ID_LEN;
1359
1360         if (fcret) {
1361                 ret = -EBADF;
1362                 dev_err(ctrl->dev,
1363                         "q %d connect failed: %s\n",
1364                         queue->qnum, validation_errors[fcret]);
1365         } else {
1366                 queue->connection_id =
1367                         be64_to_cpu(conn_acc->connectid.connection_id);
1368                 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1369         }
1370
1371 out_free_buffer:
1372         kfree(lsop);
1373 out_no_memory:
1374         if (ret)
1375                 dev_err(ctrl->dev,
1376                         "queue %d connect command failed (%d).\n",
1377                         queue->qnum, ret);
1378         return ret;
1379 }
1380
1381 static void
1382 nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
1383 {
1384         struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1385
1386         __nvme_fc_finish_ls_req(lsop);
1387
1388         /* fc-nvme iniator doesn't care about success or failure of cmd */
1389
1390         kfree(lsop);
1391 }
1392
1393 /*
1394  * This routine sends a FC-NVME LS to disconnect (aka terminate)
1395  * the FC-NVME Association.  Terminating the association also
1396  * terminates the FC-NVME connections (per queue, both admin and io
1397  * queues) that are part of the association. E.g. things are torn
1398  * down, and the related FC-NVME Association ID and Connection IDs
1399  * become invalid.
1400  *
1401  * The behavior of the fc-nvme initiator is such that it's
1402  * understanding of the association and connections will implicitly
1403  * be torn down. The action is implicit as it may be due to a loss of
1404  * connectivity with the fc-nvme target, so you may never get a
1405  * response even if you tried.  As such, the action of this routine
1406  * is to asynchronously send the LS, ignore any results of the LS, and
1407  * continue on with terminating the association. If the fc-nvme target
1408  * is present and receives the LS, it too can tear down.
1409  */
1410 static void
1411 nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1412 {
1413         struct fcnvme_ls_disconnect_rqst *discon_rqst;
1414         struct fcnvme_ls_disconnect_acc *discon_acc;
1415         struct nvmefc_ls_req_op *lsop;
1416         struct nvmefc_ls_req *lsreq;
1417         int ret;
1418
1419         lsop = kzalloc((sizeof(*lsop) +
1420                          ctrl->lport->ops->lsrqst_priv_sz +
1421                          sizeof(*discon_rqst) + sizeof(*discon_acc)),
1422                         GFP_KERNEL);
1423         if (!lsop)
1424                 /* couldn't sent it... too bad */
1425                 return;
1426
1427         lsreq = &lsop->ls_req;
1428
1429         lsreq->private = (void *)&lsop[1];
1430         discon_rqst = (struct fcnvme_ls_disconnect_rqst *)
1431                         (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1432         discon_acc = (struct fcnvme_ls_disconnect_acc *)&discon_rqst[1];
1433
1434         discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT;
1435         discon_rqst->desc_list_len = cpu_to_be32(
1436                                 sizeof(struct fcnvme_lsdesc_assoc_id) +
1437                                 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1438
1439         discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1440         discon_rqst->associd.desc_len =
1441                         fcnvme_lsdesc_len(
1442                                 sizeof(struct fcnvme_lsdesc_assoc_id));
1443
1444         discon_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1445
1446         discon_rqst->discon_cmd.desc_tag = cpu_to_be32(
1447                                                 FCNVME_LSDESC_DISCONN_CMD);
1448         discon_rqst->discon_cmd.desc_len =
1449                         fcnvme_lsdesc_len(
1450                                 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1451         discon_rqst->discon_cmd.scope = FCNVME_DISCONN_ASSOCIATION;
1452         discon_rqst->discon_cmd.id = cpu_to_be64(ctrl->association_id);
1453
1454         lsreq->rqstaddr = discon_rqst;
1455         lsreq->rqstlen = sizeof(*discon_rqst);
1456         lsreq->rspaddr = discon_acc;
1457         lsreq->rsplen = sizeof(*discon_acc);
1458         lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1459
1460         ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
1461                                 nvme_fc_disconnect_assoc_done);
1462         if (ret)
1463                 kfree(lsop);
1464
1465         /* only meaningful part to terminating the association */
1466         ctrl->association_id = 0;
1467 }
1468
1469
1470 /* *********************** NVME Ctrl Routines **************************** */
1471
1472 static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
1473
1474 static void
1475 __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
1476                 struct nvme_fc_fcp_op *op)
1477 {
1478         fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
1479                                 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1480         fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
1481                                 sizeof(op->cmd_iu), DMA_TO_DEVICE);
1482
1483         atomic_set(&op->state, FCPOP_STATE_UNINIT);
1484 }
1485
1486 static void
1487 nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
1488                 unsigned int hctx_idx)
1489 {
1490         struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1491
1492         return __nvme_fc_exit_request(set->driver_data, op);
1493 }
1494
1495 static int
1496 __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
1497 {
1498         unsigned long flags;
1499         int opstate;
1500
1501         spin_lock_irqsave(&ctrl->lock, flags);
1502         opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
1503         if (opstate != FCPOP_STATE_ACTIVE)
1504                 atomic_set(&op->state, opstate);
1505         else if (ctrl->flags & FCCTRL_TERMIO)
1506                 ctrl->iocnt++;
1507         spin_unlock_irqrestore(&ctrl->lock, flags);
1508
1509         if (opstate != FCPOP_STATE_ACTIVE)
1510                 return -ECANCELED;
1511
1512         ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1513                                         &ctrl->rport->remoteport,
1514                                         op->queue->lldd_handle,
1515                                         &op->fcp_req);
1516
1517         return 0;
1518 }
1519
1520 static void
1521 nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
1522 {
1523         struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
1524         int i;
1525
1526         for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
1527                 __nvme_fc_abort_op(ctrl, aen_op);
1528 }
1529
1530 static inline void
1531 __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
1532                 struct nvme_fc_fcp_op *op, int opstate)
1533 {
1534         unsigned long flags;
1535
1536         if (opstate == FCPOP_STATE_ABORTED) {
1537                 spin_lock_irqsave(&ctrl->lock, flags);
1538                 if (ctrl->flags & FCCTRL_TERMIO) {
1539                         if (!--ctrl->iocnt)
1540                                 wake_up(&ctrl->ioabort_wait);
1541                 }
1542                 spin_unlock_irqrestore(&ctrl->lock, flags);
1543         }
1544 }
1545
1546 static void
1547 nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1548 {
1549         struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
1550         struct request *rq = op->rq;
1551         struct nvmefc_fcp_req *freq = &op->fcp_req;
1552         struct nvme_fc_ctrl *ctrl = op->ctrl;
1553         struct nvme_fc_queue *queue = op->queue;
1554         struct nvme_completion *cqe = &op->rsp_iu.cqe;
1555         struct nvme_command *sqe = &op->cmd_iu.sqe;
1556         __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
1557         union nvme_result result;
1558         bool terminate_assoc = true;
1559         int opstate;
1560
1561         /*
1562          * WARNING:
1563          * The current linux implementation of a nvme controller
1564          * allocates a single tag set for all io queues and sizes
1565          * the io queues to fully hold all possible tags. Thus, the
1566          * implementation does not reference or care about the sqhd
1567          * value as it never needs to use the sqhd/sqtail pointers
1568          * for submission pacing.
1569          *
1570          * This affects the FC-NVME implementation in two ways:
1571          * 1) As the value doesn't matter, we don't need to waste
1572          *    cycles extracting it from ERSPs and stamping it in the
1573          *    cases where the transport fabricates CQEs on successful
1574          *    completions.
1575          * 2) The FC-NVME implementation requires that delivery of
1576          *    ERSP completions are to go back to the nvme layer in order
1577          *    relative to the rsn, such that the sqhd value will always
1578          *    be "in order" for the nvme layer. As the nvme layer in
1579          *    linux doesn't care about sqhd, there's no need to return
1580          *    them in order.
1581          *
1582          * Additionally:
1583          * As the core nvme layer in linux currently does not look at
1584          * every field in the cqe - in cases where the FC transport must
1585          * fabricate a CQE, the following fields will not be set as they
1586          * are not referenced:
1587          *      cqe.sqid,  cqe.sqhd,  cqe.command_id
1588          *
1589          * Failure or error of an individual i/o, in a transport
1590          * detected fashion unrelated to the nvme completion status,
1591          * potentially cause the initiator and target sides to get out
1592          * of sync on SQ head/tail (aka outstanding io count allowed).
1593          * Per FC-NVME spec, failure of an individual command requires
1594          * the connection to be terminated, which in turn requires the
1595          * association to be terminated.
1596          */
1597
1598         opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
1599
1600         fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
1601                                 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1602
1603         if (opstate == FCPOP_STATE_ABORTED)
1604                 status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
1605         else if (freq->status)
1606                 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1607
1608         /*
1609          * For the linux implementation, if we have an unsuccesful
1610          * status, they blk-mq layer can typically be called with the
1611          * non-zero status and the content of the cqe isn't important.
1612          */
1613         if (status)
1614                 goto done;
1615
1616         /*
1617          * command completed successfully relative to the wire
1618          * protocol. However, validate anything received and
1619          * extract the status and result from the cqe (create it
1620          * where necessary).
1621          */
1622
1623         switch (freq->rcv_rsplen) {
1624
1625         case 0:
1626         case NVME_FC_SIZEOF_ZEROS_RSP:
1627                 /*
1628                  * No response payload or 12 bytes of payload (which
1629                  * should all be zeros) are considered successful and
1630                  * no payload in the CQE by the transport.
1631                  */
1632                 if (freq->transferred_length !=
1633                         be32_to_cpu(op->cmd_iu.data_len)) {
1634                         status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1635                         goto done;
1636                 }
1637                 result.u64 = 0;
1638                 break;
1639
1640         case sizeof(struct nvme_fc_ersp_iu):
1641                 /*
1642                  * The ERSP IU contains a full completion with CQE.
1643                  * Validate ERSP IU and look at cqe.
1644                  */
1645                 if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
1646                                         (freq->rcv_rsplen / 4) ||
1647                              be32_to_cpu(op->rsp_iu.xfrd_len) !=
1648                                         freq->transferred_length ||
1649                              op->rsp_iu.status_code ||
1650                              sqe->common.command_id != cqe->command_id)) {
1651                         status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1652                         goto done;
1653                 }
1654                 result = cqe->result;
1655                 status = cqe->status;
1656                 break;
1657
1658         default:
1659                 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1660                 goto done;
1661         }
1662
1663         terminate_assoc = false;
1664
1665 done:
1666         if (op->flags & FCOP_FLAGS_AEN) {
1667                 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
1668                 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
1669                 atomic_set(&op->state, FCPOP_STATE_IDLE);
1670                 op->flags = FCOP_FLAGS_AEN;     /* clear other flags */
1671                 nvme_fc_ctrl_put(ctrl);
1672                 goto check_error;
1673         }
1674
1675         __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
1676         nvme_end_request(rq, status, result);
1677
1678 check_error:
1679         if (terminate_assoc)
1680                 nvme_fc_error_recovery(ctrl, "transport detected io error");
1681 }
1682
1683 static int
1684 __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
1685                 struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
1686                 struct request *rq, u32 rqno)
1687 {
1688         struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1689         int ret = 0;
1690
1691         memset(op, 0, sizeof(*op));
1692         op->fcp_req.cmdaddr = &op->cmd_iu;
1693         op->fcp_req.cmdlen = sizeof(op->cmd_iu);
1694         op->fcp_req.rspaddr = &op->rsp_iu;
1695         op->fcp_req.rsplen = sizeof(op->rsp_iu);
1696         op->fcp_req.done = nvme_fc_fcpio_done;
1697         op->fcp_req.first_sgl = (struct scatterlist *)&op[1];
1698         op->fcp_req.private = &op->fcp_req.first_sgl[SG_CHUNK_SIZE];
1699         op->ctrl = ctrl;
1700         op->queue = queue;
1701         op->rq = rq;
1702         op->rqno = rqno;
1703
1704         cmdiu->scsi_id = NVME_CMD_SCSI_ID;
1705         cmdiu->fc_id = NVME_CMD_FC_ID;
1706         cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
1707
1708         op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
1709                                 &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
1710         if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
1711                 dev_err(ctrl->dev,
1712                         "FCP Op failed - cmdiu dma mapping failed.\n");
1713                 ret = EFAULT;
1714                 goto out_on_error;
1715         }
1716
1717         op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
1718                                 &op->rsp_iu, sizeof(op->rsp_iu),
1719                                 DMA_FROM_DEVICE);
1720         if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
1721                 dev_err(ctrl->dev,
1722                         "FCP Op failed - rspiu dma mapping failed.\n");
1723                 ret = EFAULT;
1724         }
1725
1726         atomic_set(&op->state, FCPOP_STATE_IDLE);
1727 out_on_error:
1728         return ret;
1729 }
1730
1731 static int
1732 nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
1733                 unsigned int hctx_idx, unsigned int numa_node)
1734 {
1735         struct nvme_fc_ctrl *ctrl = set->driver_data;
1736         struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1737         int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
1738         struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
1739
1740         return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
1741 }
1742
1743 static int
1744 nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
1745 {
1746         struct nvme_fc_fcp_op *aen_op;
1747         struct nvme_fc_cmd_iu *cmdiu;
1748         struct nvme_command *sqe;
1749         void *private;
1750         int i, ret;
1751
1752         aen_op = ctrl->aen_ops;
1753         for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
1754                 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
1755                                                 GFP_KERNEL);
1756                 if (!private)
1757                         return -ENOMEM;
1758
1759                 cmdiu = &aen_op->cmd_iu;
1760                 sqe = &cmdiu->sqe;
1761                 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
1762                                 aen_op, (struct request *)NULL,
1763                                 (NVME_AQ_BLK_MQ_DEPTH + i));
1764                 if (ret) {
1765                         kfree(private);
1766                         return ret;
1767                 }
1768
1769                 aen_op->flags = FCOP_FLAGS_AEN;
1770                 aen_op->fcp_req.first_sgl = NULL; /* no sg list */
1771                 aen_op->fcp_req.private = private;
1772
1773                 memset(sqe, 0, sizeof(*sqe));
1774                 sqe->common.opcode = nvme_admin_async_event;
1775                 /* Note: core layer may overwrite the sqe.command_id value */
1776                 sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i;
1777         }
1778         return 0;
1779 }
1780
1781 static void
1782 nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
1783 {
1784         struct nvme_fc_fcp_op *aen_op;
1785         int i;
1786
1787         aen_op = ctrl->aen_ops;
1788         for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
1789                 if (!aen_op->fcp_req.private)
1790                         continue;
1791
1792                 __nvme_fc_exit_request(ctrl, aen_op);
1793
1794                 kfree(aen_op->fcp_req.private);
1795                 aen_op->fcp_req.private = NULL;
1796         }
1797 }
1798
1799 static inline void
1800 __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
1801                 unsigned int qidx)
1802 {
1803         struct nvme_fc_queue *queue = &ctrl->queues[qidx];
1804
1805         hctx->driver_data = queue;
1806         queue->hctx = hctx;
1807 }
1808
1809 static int
1810 nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1811                 unsigned int hctx_idx)
1812 {
1813         struct nvme_fc_ctrl *ctrl = data;
1814
1815         __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
1816
1817         return 0;
1818 }
1819
1820 static int
1821 nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1822                 unsigned int hctx_idx)
1823 {
1824         struct nvme_fc_ctrl *ctrl = data;
1825
1826         __nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
1827
1828         return 0;
1829 }
1830
1831 static void
1832 nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
1833 {
1834         struct nvme_fc_queue *queue;
1835
1836         queue = &ctrl->queues[idx];
1837         memset(queue, 0, sizeof(*queue));
1838         queue->ctrl = ctrl;
1839         queue->qnum = idx;
1840         atomic_set(&queue->csn, 1);
1841         queue->dev = ctrl->dev;
1842
1843         if (idx > 0)
1844                 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
1845         else
1846                 queue->cmnd_capsule_len = sizeof(struct nvme_command);
1847
1848         /*
1849          * Considered whether we should allocate buffers for all SQEs
1850          * and CQEs and dma map them - mapping their respective entries
1851          * into the request structures (kernel vm addr and dma address)
1852          * thus the driver could use the buffers/mappings directly.
1853          * It only makes sense if the LLDD would use them for its
1854          * messaging api. It's very unlikely most adapter api's would use
1855          * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload
1856          * structures were used instead.
1857          */
1858 }
1859
1860 /*
1861  * This routine terminates a queue at the transport level.
1862  * The transport has already ensured that all outstanding ios on
1863  * the queue have been terminated.
1864  * The transport will send a Disconnect LS request to terminate
1865  * the queue's connection. Termination of the admin queue will also
1866  * terminate the association at the target.
1867  */
1868 static void
1869 nvme_fc_free_queue(struct nvme_fc_queue *queue)
1870 {
1871         if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
1872                 return;
1873
1874         clear_bit(NVME_FC_Q_LIVE, &queue->flags);
1875         /*
1876          * Current implementation never disconnects a single queue.
1877          * It always terminates a whole association. So there is never
1878          * a disconnect(queue) LS sent to the target.
1879          */
1880
1881         queue->connection_id = 0;
1882         atomic_set(&queue->csn, 1);
1883 }
1884
1885 static void
1886 __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
1887         struct nvme_fc_queue *queue, unsigned int qidx)
1888 {
1889         if (ctrl->lport->ops->delete_queue)
1890                 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
1891                                 queue->lldd_handle);
1892         queue->lldd_handle = NULL;
1893 }
1894
1895 static void
1896 nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
1897 {
1898         int i;
1899
1900         for (i = 1; i < ctrl->ctrl.queue_count; i++)
1901                 nvme_fc_free_queue(&ctrl->queues[i]);
1902 }
1903
1904 static int
1905 __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
1906         struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
1907 {
1908         int ret = 0;
1909
1910         queue->lldd_handle = NULL;
1911         if (ctrl->lport->ops->create_queue)
1912                 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
1913                                 qidx, qsize, &queue->lldd_handle);
1914
1915         return ret;
1916 }
1917
1918 static void
1919 nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
1920 {
1921         struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1];
1922         int i;
1923
1924         for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--)
1925                 __nvme_fc_delete_hw_queue(ctrl, queue, i);
1926 }
1927
1928 static int
1929 nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1930 {
1931         struct nvme_fc_queue *queue = &ctrl->queues[1];
1932         int i, ret;
1933
1934         for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) {
1935                 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
1936                 if (ret)
1937                         goto delete_queues;
1938         }
1939
1940         return 0;
1941
1942 delete_queues:
1943         for (; i >= 0; i--)
1944                 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
1945         return ret;
1946 }
1947
1948 static int
1949 nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1950 {
1951         int i, ret = 0;
1952
1953         for (i = 1; i < ctrl->ctrl.queue_count; i++) {
1954                 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
1955                                         (qsize / 5));
1956                 if (ret)
1957                         break;
1958                 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
1959                 if (ret)
1960                         break;
1961
1962                 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
1963         }
1964
1965         return ret;
1966 }
1967
1968 static void
1969 nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
1970 {
1971         int i;
1972
1973         for (i = 1; i < ctrl->ctrl.queue_count; i++)
1974                 nvme_fc_init_queue(ctrl, i);
1975 }
1976
1977 static void
1978 nvme_fc_ctrl_free(struct kref *ref)
1979 {
1980         struct nvme_fc_ctrl *ctrl =
1981                 container_of(ref, struct nvme_fc_ctrl, ref);
1982         unsigned long flags;
1983
1984         if (ctrl->ctrl.tagset) {
1985                 blk_cleanup_queue(ctrl->ctrl.connect_q);
1986                 blk_mq_free_tag_set(&ctrl->tag_set);
1987         }
1988
1989         /* remove from rport list */
1990         spin_lock_irqsave(&ctrl->rport->lock, flags);
1991         list_del(&ctrl->ctrl_list);
1992         spin_unlock_irqrestore(&ctrl->rport->lock, flags);
1993
1994         blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
1995         blk_cleanup_queue(ctrl->ctrl.admin_q);
1996         blk_mq_free_tag_set(&ctrl->admin_tag_set);
1997
1998         kfree(ctrl->queues);
1999
2000         put_device(ctrl->dev);
2001         nvme_fc_rport_put(ctrl->rport);
2002
2003         ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
2004         if (ctrl->ctrl.opts)
2005                 nvmf_free_options(ctrl->ctrl.opts);
2006         kfree(ctrl);
2007 }
2008
2009 static void
2010 nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
2011 {
2012         kref_put(&ctrl->ref, nvme_fc_ctrl_free);
2013 }
2014
2015 static int
2016 nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
2017 {
2018         return kref_get_unless_zero(&ctrl->ref);
2019 }
2020
2021 /*
2022  * All accesses from nvme core layer done - can now free the
2023  * controller. Called after last nvme_put_ctrl() call
2024  */
2025 static void
2026 nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
2027 {
2028         struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2029
2030         WARN_ON(nctrl != &ctrl->ctrl);
2031
2032         nvme_fc_ctrl_put(ctrl);
2033 }
2034
2035 static void
2036 nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
2037 {
2038         /* only proceed if in LIVE state - e.g. on first error */
2039         if (ctrl->ctrl.state != NVME_CTRL_LIVE)
2040                 return;
2041
2042         dev_warn(ctrl->ctrl.device,
2043                 "NVME-FC{%d}: transport association error detected: %s\n",
2044                 ctrl->cnum, errmsg);
2045         dev_warn(ctrl->ctrl.device,
2046                 "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
2047
2048         nvme_reset_ctrl(&ctrl->ctrl);
2049 }
2050
2051 static enum blk_eh_timer_return
2052 nvme_fc_timeout(struct request *rq, bool reserved)
2053 {
2054         struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2055         struct nvme_fc_ctrl *ctrl = op->ctrl;
2056
2057         /*
2058          * we can't individually ABTS an io without affecting the queue,
2059          * thus killing the queue, and thus the association.
2060          * So resolve by performing a controller reset, which will stop
2061          * the host/io stack, terminate the association on the link,
2062          * and recreate an association on the link.
2063          */
2064         nvme_fc_error_recovery(ctrl, "io timeout error");
2065
2066         /*
2067          * the io abort has been initiated. Have the reset timer
2068          * restarted and the abort completion will complete the io
2069          * shortly. Avoids a synchronous wait while the abort finishes.
2070          */
2071         return BLK_EH_RESET_TIMER;
2072 }
2073
2074 static int
2075 nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2076                 struct nvme_fc_fcp_op *op)
2077 {
2078         struct nvmefc_fcp_req *freq = &op->fcp_req;
2079         enum dma_data_direction dir;
2080         int ret;
2081
2082         freq->sg_cnt = 0;
2083
2084         if (!blk_rq_payload_bytes(rq))
2085                 return 0;
2086
2087         freq->sg_table.sgl = freq->first_sgl;
2088         ret = sg_alloc_table_chained(&freq->sg_table,
2089                         blk_rq_nr_phys_segments(rq), freq->sg_table.sgl);
2090         if (ret)
2091                 return -ENOMEM;
2092
2093         op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
2094         WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
2095         dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
2096         freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
2097                                 op->nents, dir);
2098         if (unlikely(freq->sg_cnt <= 0)) {
2099                 sg_free_table_chained(&freq->sg_table, true);
2100                 freq->sg_cnt = 0;
2101                 return -EFAULT;
2102         }
2103
2104         /*
2105          * TODO: blk_integrity_rq(rq)  for DIF
2106          */
2107         return 0;
2108 }
2109
2110 static void
2111 nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2112                 struct nvme_fc_fcp_op *op)
2113 {
2114         struct nvmefc_fcp_req *freq = &op->fcp_req;
2115
2116         if (!freq->sg_cnt)
2117                 return;
2118
2119         fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
2120                                 ((rq_data_dir(rq) == WRITE) ?
2121                                         DMA_TO_DEVICE : DMA_FROM_DEVICE));
2122
2123         nvme_cleanup_cmd(rq);
2124
2125         sg_free_table_chained(&freq->sg_table, true);
2126
2127         freq->sg_cnt = 0;
2128 }
2129
2130 /*
2131  * In FC, the queue is a logical thing. At transport connect, the target
2132  * creates its "queue" and returns a handle that is to be given to the
2133  * target whenever it posts something to the corresponding SQ.  When an
2134  * SQE is sent on a SQ, FC effectively considers the SQE, or rather the
2135  * command contained within the SQE, an io, and assigns a FC exchange
2136  * to it. The SQE and the associated SQ handle are sent in the initial
2137  * CMD IU sents on the exchange. All transfers relative to the io occur
2138  * as part of the exchange.  The CQE is the last thing for the io,
2139  * which is transferred (explicitly or implicitly) with the RSP IU
2140  * sent on the exchange. After the CQE is received, the FC exchange is
2141  * terminaed and the Exchange may be used on a different io.
2142  *
2143  * The transport to LLDD api has the transport making a request for a
2144  * new fcp io request to the LLDD. The LLDD then allocates a FC exchange
2145  * resource and transfers the command. The LLDD will then process all
2146  * steps to complete the io. Upon completion, the transport done routine
2147  * is called.
2148  *
2149  * So - while the operation is outstanding to the LLDD, there is a link
2150  * level FC exchange resource that is also outstanding. This must be
2151  * considered in all cleanup operations.
2152  */
2153 static blk_status_t
2154 nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
2155         struct nvme_fc_fcp_op *op, u32 data_len,
2156         enum nvmefc_fcp_datadir io_dir)
2157 {
2158         struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2159         struct nvme_command *sqe = &cmdiu->sqe;
2160         u32 csn;
2161         int ret, opstate;
2162
2163         /*
2164          * before attempting to send the io, check to see if we believe
2165          * the target device is present
2166          */
2167         if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
2168                 return BLK_STS_RESOURCE;
2169
2170         if (!nvme_fc_ctrl_get(ctrl))
2171                 return BLK_STS_IOERR;
2172
2173         /* format the FC-NVME CMD IU and fcp_req */
2174         cmdiu->connection_id = cpu_to_be64(queue->connection_id);
2175         csn = atomic_inc_return(&queue->csn);
2176         cmdiu->csn = cpu_to_be32(csn);
2177         cmdiu->data_len = cpu_to_be32(data_len);
2178         switch (io_dir) {
2179         case NVMEFC_FCP_WRITE:
2180                 cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
2181                 break;
2182         case NVMEFC_FCP_READ:
2183                 cmdiu->flags = FCNVME_CMD_FLAGS_READ;
2184                 break;
2185         case NVMEFC_FCP_NODATA:
2186                 cmdiu->flags = 0;
2187                 break;
2188         }
2189         op->fcp_req.payload_length = data_len;
2190         op->fcp_req.io_dir = io_dir;
2191         op->fcp_req.transferred_length = 0;
2192         op->fcp_req.rcv_rsplen = 0;
2193         op->fcp_req.status = NVME_SC_SUCCESS;
2194         op->fcp_req.sqid = cpu_to_le16(queue->qnum);
2195
2196         /*
2197          * validate per fabric rules, set fields mandated by fabric spec
2198          * as well as those by FC-NVME spec.
2199          */
2200         WARN_ON_ONCE(sqe->common.metadata);
2201         sqe->common.flags |= NVME_CMD_SGL_METABUF;
2202
2203         /*
2204          * format SQE DPTR field per FC-NVME rules:
2205          *    type=0x5     Transport SGL Data Block Descriptor
2206          *    subtype=0xA  Transport-specific value
2207          *    address=0
2208          *    length=length of the data series
2209          */
2210         sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2211                                         NVME_SGL_FMT_TRANSPORT_A;
2212         sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
2213         sqe->rw.dptr.sgl.addr = 0;
2214
2215         if (!(op->flags & FCOP_FLAGS_AEN)) {
2216                 ret = nvme_fc_map_data(ctrl, op->rq, op);
2217                 if (ret < 0) {
2218                         nvme_cleanup_cmd(op->rq);
2219                         nvme_fc_ctrl_put(ctrl);
2220                         if (ret == -ENOMEM || ret == -EAGAIN)
2221                                 return BLK_STS_RESOURCE;
2222                         return BLK_STS_IOERR;
2223                 }
2224         }
2225
2226         fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
2227                                   sizeof(op->cmd_iu), DMA_TO_DEVICE);
2228
2229         atomic_set(&op->state, FCPOP_STATE_ACTIVE);
2230
2231         if (!(op->flags & FCOP_FLAGS_AEN))
2232                 blk_mq_start_request(op->rq);
2233
2234         ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
2235                                         &ctrl->rport->remoteport,
2236                                         queue->lldd_handle, &op->fcp_req);
2237
2238         if (ret) {
2239                 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
2240                 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2241
2242                 if (!(op->flags & FCOP_FLAGS_AEN))
2243                         nvme_fc_unmap_data(ctrl, op->rq, op);
2244
2245                 nvme_fc_ctrl_put(ctrl);
2246
2247                 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
2248                                 ret != -EBUSY)
2249                         return BLK_STS_IOERR;
2250
2251                 return BLK_STS_RESOURCE;
2252         }
2253
2254         return BLK_STS_OK;
2255 }
2256
2257 static blk_status_t
2258 nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
2259                         const struct blk_mq_queue_data *bd)
2260 {
2261         struct nvme_ns *ns = hctx->queue->queuedata;
2262         struct nvme_fc_queue *queue = hctx->driver_data;
2263         struct nvme_fc_ctrl *ctrl = queue->ctrl;
2264         struct request *rq = bd->rq;
2265         struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2266         struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2267         struct nvme_command *sqe = &cmdiu->sqe;
2268         enum nvmefc_fcp_datadir io_dir;
2269         bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags);
2270         u32 data_len;
2271         blk_status_t ret;
2272
2273         if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
2274             !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2275                 return nvmf_fail_nonready_command(rq);
2276
2277         ret = nvme_setup_cmd(ns, rq, sqe);
2278         if (ret)
2279                 return ret;
2280
2281         data_len = blk_rq_payload_bytes(rq);
2282         if (data_len)
2283                 io_dir = ((rq_data_dir(rq) == WRITE) ?
2284                                         NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
2285         else
2286                 io_dir = NVMEFC_FCP_NODATA;
2287
2288         return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
2289 }
2290
2291 static struct blk_mq_tags *
2292 nvme_fc_tagset(struct nvme_fc_queue *queue)
2293 {
2294         if (queue->qnum == 0)
2295                 return queue->ctrl->admin_tag_set.tags[queue->qnum];
2296
2297         return queue->ctrl->tag_set.tags[queue->qnum - 1];
2298 }
2299
2300 static int
2301 nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
2302
2303 {
2304         struct nvme_fc_queue *queue = hctx->driver_data;
2305         struct nvme_fc_ctrl *ctrl = queue->ctrl;
2306         struct request *req;
2307         struct nvme_fc_fcp_op *op;
2308
2309         req = blk_mq_tag_to_rq(nvme_fc_tagset(queue), tag);
2310         if (!req)
2311                 return 0;
2312
2313         op = blk_mq_rq_to_pdu(req);
2314
2315         if ((atomic_read(&op->state) == FCPOP_STATE_ACTIVE) &&
2316                  (ctrl->lport->ops->poll_queue))
2317                 ctrl->lport->ops->poll_queue(&ctrl->lport->localport,
2318                                                  queue->lldd_handle);
2319
2320         return ((atomic_read(&op->state) != FCPOP_STATE_ACTIVE));
2321 }
2322
2323 static void
2324 nvme_fc_submit_async_event(struct nvme_ctrl *arg)
2325 {
2326         struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
2327         struct nvme_fc_fcp_op *aen_op;
2328         unsigned long flags;
2329         bool terminating = false;
2330         blk_status_t ret;
2331
2332         spin_lock_irqsave(&ctrl->lock, flags);
2333         if (ctrl->flags & FCCTRL_TERMIO)
2334                 terminating = true;
2335         spin_unlock_irqrestore(&ctrl->lock, flags);
2336
2337         if (terminating)
2338                 return;
2339
2340         aen_op = &ctrl->aen_ops[0];
2341
2342         ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
2343                                         NVMEFC_FCP_NODATA);
2344         if (ret)
2345                 dev_err(ctrl->ctrl.device,
2346                         "failed async event work\n");
2347 }
2348
2349 static void
2350 nvme_fc_complete_rq(struct request *rq)
2351 {
2352         struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2353         struct nvme_fc_ctrl *ctrl = op->ctrl;
2354
2355         atomic_set(&op->state, FCPOP_STATE_IDLE);
2356
2357         nvme_fc_unmap_data(ctrl, rq, op);
2358         nvme_complete_rq(rq);
2359         nvme_fc_ctrl_put(ctrl);
2360 }
2361
2362 /*
2363  * This routine is used by the transport when it needs to find active
2364  * io on a queue that is to be terminated. The transport uses
2365  * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
2366  * this routine to kill them on a 1 by 1 basis.
2367  *
2368  * As FC allocates FC exchange for each io, the transport must contact
2369  * the LLDD to terminate the exchange, thus releasing the FC exchange.
2370  * After terminating the exchange the LLDD will call the transport's
2371  * normal io done path for the request, but it will have an aborted
2372  * status. The done path will return the io request back to the block
2373  * layer with an error status.
2374  */
2375 static void
2376 nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
2377 {
2378         struct nvme_ctrl *nctrl = data;
2379         struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2380         struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
2381
2382         __nvme_fc_abort_op(ctrl, op);
2383 }
2384
2385
2386 static const struct blk_mq_ops nvme_fc_mq_ops = {
2387         .queue_rq       = nvme_fc_queue_rq,
2388         .complete       = nvme_fc_complete_rq,
2389         .init_request   = nvme_fc_init_request,
2390         .exit_request   = nvme_fc_exit_request,
2391         .init_hctx      = nvme_fc_init_hctx,
2392         .poll           = nvme_fc_poll,
2393         .timeout        = nvme_fc_timeout,
2394 };
2395
2396 static int
2397 nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2398 {
2399         struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2400         unsigned int nr_io_queues;
2401         int ret;
2402
2403         nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2404                                 ctrl->lport->ops->max_hw_queues);
2405         ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2406         if (ret) {
2407                 dev_info(ctrl->ctrl.device,
2408                         "set_queue_count failed: %d\n", ret);
2409                 return ret;
2410         }
2411
2412         ctrl->ctrl.queue_count = nr_io_queues + 1;
2413         if (!nr_io_queues)
2414                 return 0;
2415
2416         nvme_fc_init_io_queues(ctrl);
2417
2418         memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
2419         ctrl->tag_set.ops = &nvme_fc_mq_ops;
2420         ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
2421         ctrl->tag_set.reserved_tags = 1; /* fabric connect */
2422         ctrl->tag_set.numa_node = NUMA_NO_NODE;
2423         ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
2424         ctrl->tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
2425                                         (SG_CHUNK_SIZE *
2426                                                 sizeof(struct scatterlist)) +
2427                                         ctrl->lport->ops->fcprqst_priv_sz;
2428         ctrl->tag_set.driver_data = ctrl;
2429         ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
2430         ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
2431
2432         ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
2433         if (ret)
2434                 return ret;
2435
2436         ctrl->ctrl.tagset = &ctrl->tag_set;
2437
2438         ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
2439         if (IS_ERR(ctrl->ctrl.connect_q)) {
2440                 ret = PTR_ERR(ctrl->ctrl.connect_q);
2441                 goto out_free_tag_set;
2442         }
2443
2444         ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2445         if (ret)
2446                 goto out_cleanup_blk_queue;
2447
2448         ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2449         if (ret)
2450                 goto out_delete_hw_queues;
2451
2452         ctrl->ioq_live = true;
2453
2454         return 0;
2455
2456 out_delete_hw_queues:
2457         nvme_fc_delete_hw_io_queues(ctrl);
2458 out_cleanup_blk_queue:
2459         blk_cleanup_queue(ctrl->ctrl.connect_q);
2460 out_free_tag_set:
2461         blk_mq_free_tag_set(&ctrl->tag_set);
2462         nvme_fc_free_io_queues(ctrl);
2463
2464         /* force put free routine to ignore io queues */
2465         ctrl->ctrl.tagset = NULL;
2466
2467         return ret;
2468 }
2469
2470 static int
2471 nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
2472 {
2473         struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2474         unsigned int nr_io_queues;
2475         int ret;
2476
2477         nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2478                                 ctrl->lport->ops->max_hw_queues);
2479         ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2480         if (ret) {
2481                 dev_info(ctrl->ctrl.device,
2482                         "set_queue_count failed: %d\n", ret);
2483                 return ret;
2484         }
2485
2486         ctrl->ctrl.queue_count = nr_io_queues + 1;
2487         /* check for io queues existing */
2488         if (ctrl->ctrl.queue_count == 1)
2489                 return 0;
2490
2491         ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2492         if (ret)
2493                 goto out_free_io_queues;
2494
2495         ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2496         if (ret)
2497                 goto out_delete_hw_queues;
2498
2499         blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
2500
2501         return 0;
2502
2503 out_delete_hw_queues:
2504         nvme_fc_delete_hw_io_queues(ctrl);
2505 out_free_io_queues:
2506         nvme_fc_free_io_queues(ctrl);
2507         return ret;
2508 }
2509
2510 static void
2511 nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport)
2512 {
2513         struct nvme_fc_lport *lport = rport->lport;
2514
2515         atomic_inc(&lport->act_rport_cnt);
2516 }
2517
2518 static void
2519 nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport)
2520 {
2521         struct nvme_fc_lport *lport = rport->lport;
2522         u32 cnt;
2523
2524         cnt = atomic_dec_return(&lport->act_rport_cnt);
2525         if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED)
2526                 lport->ops->localport_delete(&lport->localport);
2527 }
2528
2529 static int
2530 nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl)
2531 {
2532         struct nvme_fc_rport *rport = ctrl->rport;
2533         u32 cnt;
2534
2535         if (ctrl->assoc_active)
2536                 return 1;
2537
2538         ctrl->assoc_active = true;
2539         cnt = atomic_inc_return(&rport->act_ctrl_cnt);
2540         if (cnt == 1)
2541                 nvme_fc_rport_active_on_lport(rport);
2542
2543         return 0;
2544 }
2545
2546 static int
2547 nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl)
2548 {
2549         struct nvme_fc_rport *rport = ctrl->rport;
2550         struct nvme_fc_lport *lport = rport->lport;
2551         u32 cnt;
2552
2553         /* ctrl->assoc_active=false will be set independently */
2554
2555         cnt = atomic_dec_return(&rport->act_ctrl_cnt);
2556         if (cnt == 0) {
2557                 if (rport->remoteport.port_state == FC_OBJSTATE_DELETED)
2558                         lport->ops->remoteport_delete(&rport->remoteport);
2559                 nvme_fc_rport_inactive_on_lport(rport);
2560         }
2561
2562         return 0;
2563 }
2564
2565 /*
2566  * This routine restarts the controller on the host side, and
2567  * on the link side, recreates the controller association.
2568  */
2569 static int
2570 nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
2571 {
2572         struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2573         int ret;
2574         bool changed;
2575
2576         ++ctrl->ctrl.nr_reconnects;
2577
2578         if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
2579                 return -ENODEV;
2580
2581         if (nvme_fc_ctlr_active_on_rport(ctrl))
2582                 return -ENOTUNIQ;
2583
2584         /*
2585          * Create the admin queue
2586          */
2587
2588         ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
2589                                 NVME_AQ_DEPTH);
2590         if (ret)
2591                 goto out_free_queue;
2592
2593         ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
2594                                 NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4));
2595         if (ret)
2596                 goto out_delete_hw_queue;
2597
2598         blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2599
2600         ret = nvmf_connect_admin_queue(&ctrl->ctrl);
2601         if (ret)
2602                 goto out_disconnect_admin_queue;
2603
2604         set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
2605
2606         /*
2607          * Check controller capabilities
2608          *
2609          * todo:- add code to check if ctrl attributes changed from
2610          * prior connection values
2611          */
2612
2613         ret = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
2614         if (ret) {
2615                 dev_err(ctrl->ctrl.device,
2616                         "prop_get NVME_REG_CAP failed\n");
2617                 goto out_disconnect_admin_queue;
2618         }
2619
2620         ctrl->ctrl.sqsize =
2621                 min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
2622
2623         ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
2624         if (ret)
2625                 goto out_disconnect_admin_queue;
2626
2627         ctrl->ctrl.max_hw_sectors =
2628                 (ctrl->lport->ops->max_sgl_segments - 1) << (PAGE_SHIFT - 9);
2629
2630         ret = nvme_init_identify(&ctrl->ctrl);
2631         if (ret)
2632                 goto out_disconnect_admin_queue;
2633
2634         /* sanity checks */
2635
2636         /* FC-NVME does not have other data in the capsule */
2637         if (ctrl->ctrl.icdoff) {
2638                 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
2639                                 ctrl->ctrl.icdoff);
2640                 goto out_disconnect_admin_queue;
2641         }
2642
2643         /* FC-NVME supports normal SGL Data Block Descriptors */
2644
2645         if (opts->queue_size > ctrl->ctrl.maxcmd) {
2646                 /* warn if maxcmd is lower than queue_size */
2647                 dev_warn(ctrl->ctrl.device,
2648                         "queue_size %zu > ctrl maxcmd %u, reducing "
2649                         "to queue_size\n",
2650                         opts->queue_size, ctrl->ctrl.maxcmd);
2651                 opts->queue_size = ctrl->ctrl.maxcmd;
2652         }
2653
2654         if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
2655                 /* warn if sqsize is lower than queue_size */
2656                 dev_warn(ctrl->ctrl.device,
2657                         "queue_size %zu > ctrl sqsize %u, clamping down\n",
2658                         opts->queue_size, ctrl->ctrl.sqsize + 1);
2659                 opts->queue_size = ctrl->ctrl.sqsize + 1;
2660         }
2661
2662         ret = nvme_fc_init_aen_ops(ctrl);
2663         if (ret)
2664                 goto out_term_aen_ops;
2665
2666         /*
2667          * Create the io queues
2668          */
2669
2670         if (ctrl->ctrl.queue_count > 1) {
2671                 if (!ctrl->ioq_live)
2672                         ret = nvme_fc_create_io_queues(ctrl);
2673                 else
2674                         ret = nvme_fc_recreate_io_queues(ctrl);
2675                 if (ret)
2676                         goto out_term_aen_ops;
2677         }
2678
2679         changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
2680
2681         ctrl->ctrl.nr_reconnects = 0;
2682
2683         if (changed)
2684                 nvme_start_ctrl(&ctrl->ctrl);
2685
2686         return 0;       /* Success */
2687
2688 out_term_aen_ops:
2689         nvme_fc_term_aen_ops(ctrl);
2690 out_disconnect_admin_queue:
2691         /* send a Disconnect(association) LS to fc-nvme target */
2692         nvme_fc_xmt_disconnect_assoc(ctrl);
2693 out_delete_hw_queue:
2694         __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2695 out_free_queue:
2696         nvme_fc_free_queue(&ctrl->queues[0]);
2697         ctrl->assoc_active = false;
2698         nvme_fc_ctlr_inactive_on_rport(ctrl);
2699
2700         return ret;
2701 }
2702
2703 /*
2704  * This routine stops operation of the controller on the host side.
2705  * On the host os stack side: Admin and IO queues are stopped,
2706  *   outstanding ios on them terminated via FC ABTS.
2707  * On the link side: the association is terminated.
2708  */
2709 static void
2710 nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
2711 {
2712         unsigned long flags;
2713
2714         if (!ctrl->assoc_active)
2715                 return;
2716         ctrl->assoc_active = false;
2717
2718         spin_lock_irqsave(&ctrl->lock, flags);
2719         ctrl->flags |= FCCTRL_TERMIO;
2720         ctrl->iocnt = 0;
2721         spin_unlock_irqrestore(&ctrl->lock, flags);
2722
2723         /*
2724          * If io queues are present, stop them and terminate all outstanding
2725          * ios on them. As FC allocates FC exchange for each io, the
2726          * transport must contact the LLDD to terminate the exchange,
2727          * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
2728          * to tell us what io's are busy and invoke a transport routine
2729          * to kill them with the LLDD.  After terminating the exchange
2730          * the LLDD will call the transport's normal io done path, but it
2731          * will have an aborted status. The done path will return the
2732          * io requests back to the block layer as part of normal completions
2733          * (but with error status).
2734          */
2735         if (ctrl->ctrl.queue_count > 1) {
2736                 nvme_stop_queues(&ctrl->ctrl);
2737                 blk_mq_tagset_busy_iter(&ctrl->tag_set,
2738                                 nvme_fc_terminate_exchange, &ctrl->ctrl);
2739         }
2740
2741         /*
2742          * Other transports, which don't have link-level contexts bound
2743          * to sqe's, would try to gracefully shutdown the controller by
2744          * writing the registers for shutdown and polling (call
2745          * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially
2746          * just aborted and we will wait on those contexts, and given
2747          * there was no indication of how live the controlelr is on the
2748          * link, don't send more io to create more contexts for the
2749          * shutdown. Let the controller fail via keepalive failure if
2750          * its still present.
2751          */
2752
2753         /*
2754          * clean up the admin queue. Same thing as above.
2755          * use blk_mq_tagset_busy_itr() and the transport routine to
2756          * terminate the exchanges.
2757          */
2758         blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
2759         blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
2760                                 nvme_fc_terminate_exchange, &ctrl->ctrl);
2761
2762         /* kill the aens as they are a separate path */
2763         nvme_fc_abort_aen_ops(ctrl);
2764
2765         /* wait for all io that had to be aborted */
2766         spin_lock_irq(&ctrl->lock);
2767         wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
2768         ctrl->flags &= ~FCCTRL_TERMIO;
2769         spin_unlock_irq(&ctrl->lock);
2770
2771         nvme_fc_term_aen_ops(ctrl);
2772
2773         /*
2774          * send a Disconnect(association) LS to fc-nvme target
2775          * Note: could have been sent at top of process, but
2776          * cleaner on link traffic if after the aborts complete.
2777          * Note: if association doesn't exist, association_id will be 0
2778          */
2779         if (ctrl->association_id)
2780                 nvme_fc_xmt_disconnect_assoc(ctrl);
2781
2782         if (ctrl->ctrl.tagset) {
2783                 nvme_fc_delete_hw_io_queues(ctrl);
2784                 nvme_fc_free_io_queues(ctrl);
2785         }
2786
2787         __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2788         nvme_fc_free_queue(&ctrl->queues[0]);
2789
2790         /* re-enable the admin_q so anything new can fast fail */
2791         blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2792
2793         nvme_fc_ctlr_inactive_on_rport(ctrl);
2794 }
2795
2796 static void
2797 nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
2798 {
2799         struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2800
2801         cancel_delayed_work_sync(&ctrl->connect_work);
2802         /*
2803          * kill the association on the link side.  this will block
2804          * waiting for io to terminate
2805          */
2806         nvme_fc_delete_association(ctrl);
2807
2808         /* resume the io queues so that things will fast fail */
2809         nvme_start_queues(nctrl);
2810 }
2811
2812 static void
2813 nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
2814 {
2815         struct nvme_fc_rport *rport = ctrl->rport;
2816         struct nvme_fc_remote_port *portptr = &rport->remoteport;
2817         unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
2818         bool recon = true;
2819
2820         if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
2821                 return;
2822
2823         if (portptr->port_state == FC_OBJSTATE_ONLINE)
2824                 dev_info(ctrl->ctrl.device,
2825                         "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
2826                         ctrl->cnum, status);
2827         else if (time_after_eq(jiffies, rport->dev_loss_end))
2828                 recon = false;
2829
2830         if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
2831                 if (portptr->port_state == FC_OBJSTATE_ONLINE)
2832                         dev_info(ctrl->ctrl.device,
2833                                 "NVME-FC{%d}: Reconnect attempt in %ld "
2834                                 "seconds\n",
2835                                 ctrl->cnum, recon_delay / HZ);
2836                 else if (time_after(jiffies + recon_delay, rport->dev_loss_end))
2837                         recon_delay = rport->dev_loss_end - jiffies;
2838
2839                 queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
2840         } else {
2841                 if (portptr->port_state == FC_OBJSTATE_ONLINE)
2842                         dev_warn(ctrl->ctrl.device,
2843                                 "NVME-FC{%d}: Max reconnect attempts (%d) "
2844                                 "reached.\n",
2845                                 ctrl->cnum, ctrl->ctrl.nr_reconnects);
2846                 else
2847                         dev_warn(ctrl->ctrl.device,
2848                                 "NVME-FC{%d}: dev_loss_tmo (%d) expired "
2849                                 "while waiting for remoteport connectivity.\n",
2850                                 ctrl->cnum, portptr->dev_loss_tmo);
2851                 WARN_ON(nvme_delete_ctrl(&ctrl->ctrl));
2852         }
2853 }
2854
2855 static void
2856 nvme_fc_reset_ctrl_work(struct work_struct *work)
2857 {
2858         struct nvme_fc_ctrl *ctrl =
2859                 container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
2860         int ret;
2861
2862         nvme_stop_ctrl(&ctrl->ctrl);
2863
2864         /* will block will waiting for io to terminate */
2865         nvme_fc_delete_association(ctrl);
2866
2867         if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2868                 dev_err(ctrl->ctrl.device,
2869                         "NVME-FC{%d}: error_recovery: Couldn't change state "
2870                         "to CONNECTING\n", ctrl->cnum);
2871                 return;
2872         }
2873
2874         if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE)
2875                 ret = nvme_fc_create_association(ctrl);
2876         else
2877                 ret = -ENOTCONN;
2878
2879         if (ret)
2880                 nvme_fc_reconnect_or_delete(ctrl, ret);
2881         else
2882                 dev_info(ctrl->ctrl.device,
2883                         "NVME-FC{%d}: controller reset complete\n",
2884                         ctrl->cnum);
2885 }
2886
2887 static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
2888         .name                   = "fc",
2889         .module                 = THIS_MODULE,
2890         .flags                  = NVME_F_FABRICS,
2891         .reg_read32             = nvmf_reg_read32,
2892         .reg_read64             = nvmf_reg_read64,
2893         .reg_write32            = nvmf_reg_write32,
2894         .free_ctrl              = nvme_fc_nvme_ctrl_freed,
2895         .submit_async_event     = nvme_fc_submit_async_event,
2896         .delete_ctrl            = nvme_fc_delete_ctrl,
2897         .get_address            = nvmf_get_address,
2898 };
2899
2900 static void
2901 nvme_fc_connect_ctrl_work(struct work_struct *work)
2902 {
2903         int ret;
2904
2905         struct nvme_fc_ctrl *ctrl =
2906                         container_of(to_delayed_work(work),
2907                                 struct nvme_fc_ctrl, connect_work);
2908
2909         ret = nvme_fc_create_association(ctrl);
2910         if (ret)
2911                 nvme_fc_reconnect_or_delete(ctrl, ret);
2912         else
2913                 dev_info(ctrl->ctrl.device,
2914                         "NVME-FC{%d}: controller connect complete\n",
2915                         ctrl->cnum);
2916 }
2917
2918
2919 static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
2920         .queue_rq       = nvme_fc_queue_rq,
2921         .complete       = nvme_fc_complete_rq,
2922         .init_request   = nvme_fc_init_request,
2923         .exit_request   = nvme_fc_exit_request,
2924         .init_hctx      = nvme_fc_init_admin_hctx,
2925         .timeout        = nvme_fc_timeout,
2926 };
2927
2928
2929 /*
2930  * Fails a controller request if it matches an existing controller
2931  * (association) with the same tuple:
2932  * <Host NQN, Host ID, local FC port, remote FC port, SUBSYS NQN>
2933  *
2934  * The ports don't need to be compared as they are intrinsically
2935  * already matched by the port pointers supplied.
2936  */
2937 static bool
2938 nvme_fc_existing_controller(struct nvme_fc_rport *rport,
2939                 struct nvmf_ctrl_options *opts)
2940 {
2941         struct nvme_fc_ctrl *ctrl;
2942         unsigned long flags;
2943         bool found = false;
2944
2945         spin_lock_irqsave(&rport->lock, flags);
2946         list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
2947                 found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts);
2948                 if (found)
2949                         break;
2950         }
2951         spin_unlock_irqrestore(&rport->lock, flags);
2952
2953         return found;
2954 }
2955
2956 static struct nvme_ctrl *
2957 nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
2958         struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
2959 {
2960         struct nvme_fc_ctrl *ctrl;
2961         unsigned long flags;
2962         int ret, idx;
2963
2964         if (!(rport->remoteport.port_role &
2965             (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
2966                 ret = -EBADR;
2967                 goto out_fail;
2968         }
2969
2970         if (!opts->duplicate_connect &&
2971             nvme_fc_existing_controller(rport, opts)) {
2972                 ret = -EALREADY;
2973                 goto out_fail;
2974         }
2975
2976         ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2977         if (!ctrl) {
2978                 ret = -ENOMEM;
2979                 goto out_fail;
2980         }
2981
2982         idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
2983         if (idx < 0) {
2984                 ret = -ENOSPC;
2985                 goto out_free_ctrl;
2986         }
2987
2988         ctrl->ctrl.opts = opts;
2989         ctrl->ctrl.nr_reconnects = 0;
2990         INIT_LIST_HEAD(&ctrl->ctrl_list);
2991         ctrl->lport = lport;
2992         ctrl->rport = rport;
2993         ctrl->dev = lport->dev;
2994         ctrl->cnum = idx;
2995         ctrl->ioq_live = false;
2996         ctrl->assoc_active = false;
2997         init_waitqueue_head(&ctrl->ioabort_wait);
2998
2999         get_device(ctrl->dev);
3000         kref_init(&ctrl->ref);
3001
3002         INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
3003         INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
3004         spin_lock_init(&ctrl->lock);
3005
3006         /* io queue count */
3007         ctrl->ctrl.queue_count = min_t(unsigned int,
3008                                 opts->nr_io_queues,
3009                                 lport->ops->max_hw_queues);
3010         ctrl->ctrl.queue_count++;       /* +1 for admin queue */
3011
3012         ctrl->ctrl.sqsize = opts->queue_size - 1;
3013         ctrl->ctrl.kato = opts->kato;
3014         ctrl->ctrl.cntlid = 0xffff;
3015
3016         ret = -ENOMEM;
3017         ctrl->queues = kcalloc(ctrl->ctrl.queue_count,
3018                                 sizeof(struct nvme_fc_queue), GFP_KERNEL);
3019         if (!ctrl->queues)
3020                 goto out_free_ida;
3021
3022         nvme_fc_init_queue(ctrl, 0);
3023
3024         memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
3025         ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
3026         ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
3027         ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
3028         ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
3029         ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
3030                                         (SG_CHUNK_SIZE *
3031                                                 sizeof(struct scatterlist)) +
3032                                         ctrl->lport->ops->fcprqst_priv_sz;
3033         ctrl->admin_tag_set.driver_data = ctrl;
3034         ctrl->admin_tag_set.nr_hw_queues = 1;
3035         ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
3036         ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
3037
3038         ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
3039         if (ret)
3040                 goto out_free_queues;
3041         ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
3042
3043         ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3044         if (IS_ERR(ctrl->ctrl.admin_q)) {
3045                 ret = PTR_ERR(ctrl->ctrl.admin_q);
3046                 goto out_free_admin_tag_set;
3047         }
3048
3049         /*
3050          * Would have been nice to init io queues tag set as well.
3051          * However, we require interaction from the controller
3052          * for max io queue count before we can do so.
3053          * Defer this to the connect path.
3054          */
3055
3056         ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
3057         if (ret)
3058                 goto out_cleanup_admin_q;
3059
3060         /* at this point, teardown path changes to ref counting on nvme ctrl */
3061
3062         spin_lock_irqsave(&rport->lock, flags);
3063         list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
3064         spin_unlock_irqrestore(&rport->lock, flags);
3065
3066         if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) ||
3067             !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
3068                 dev_err(ctrl->ctrl.device,
3069                         "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum);
3070                 goto fail_ctrl;
3071         }
3072
3073         nvme_get_ctrl(&ctrl->ctrl);
3074
3075         if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
3076                 nvme_put_ctrl(&ctrl->ctrl);
3077                 dev_err(ctrl->ctrl.device,
3078                         "NVME-FC{%d}: failed to schedule initial connect\n",
3079                         ctrl->cnum);
3080                 goto fail_ctrl;
3081         }
3082
3083         flush_delayed_work(&ctrl->connect_work);
3084
3085         dev_info(ctrl->ctrl.device,
3086                 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
3087                 ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
3088
3089         return &ctrl->ctrl;
3090
3091 fail_ctrl:
3092         nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
3093         cancel_work_sync(&ctrl->ctrl.reset_work);
3094         cancel_delayed_work_sync(&ctrl->connect_work);
3095
3096         ctrl->ctrl.opts = NULL;
3097
3098         /* initiate nvme ctrl ref counting teardown */
3099         nvme_uninit_ctrl(&ctrl->ctrl);
3100
3101         /* Remove core ctrl ref. */
3102         nvme_put_ctrl(&ctrl->ctrl);
3103
3104         /* as we're past the point where we transition to the ref
3105          * counting teardown path, if we return a bad pointer here,
3106          * the calling routine, thinking it's prior to the
3107          * transition, will do an rport put. Since the teardown
3108          * path also does a rport put, we do an extra get here to
3109          * so proper order/teardown happens.
3110          */
3111         nvme_fc_rport_get(rport);
3112
3113         return ERR_PTR(-EIO);
3114
3115 out_cleanup_admin_q:
3116         blk_cleanup_queue(ctrl->ctrl.admin_q);
3117 out_free_admin_tag_set:
3118         blk_mq_free_tag_set(&ctrl->admin_tag_set);
3119 out_free_queues:
3120         kfree(ctrl->queues);
3121 out_free_ida:
3122         put_device(ctrl->dev);
3123         ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
3124 out_free_ctrl:
3125         kfree(ctrl);
3126 out_fail:
3127         /* exit via here doesn't follow ctlr ref points */
3128         return ERR_PTR(ret);
3129 }
3130
3131
3132 struct nvmet_fc_traddr {
3133         u64     nn;
3134         u64     pn;
3135 };
3136
3137 static int
3138 __nvme_fc_parse_u64(substring_t *sstr, u64 *val)
3139 {
3140         u64 token64;
3141
3142         if (match_u64(sstr, &token64))
3143                 return -EINVAL;
3144         *val = token64;
3145
3146         return 0;
3147 }
3148
3149 /*
3150  * This routine validates and extracts the WWN's from the TRADDR string.
3151  * As kernel parsers need the 0x to determine number base, universally
3152  * build string to parse with 0x prefix before parsing name strings.
3153  */
3154 static int
3155 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
3156 {
3157         char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
3158         substring_t wwn = { name, &name[sizeof(name)-1] };
3159         int nnoffset, pnoffset;
3160
3161         /* validate it string one of the 2 allowed formats */
3162         if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
3163                         !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
3164                         !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
3165                                 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
3166                 nnoffset = NVME_FC_TRADDR_OXNNLEN;
3167                 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
3168                                                 NVME_FC_TRADDR_OXNNLEN;
3169         } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
3170                         !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
3171                         !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
3172                                 "pn-", NVME_FC_TRADDR_NNLEN))) {
3173                 nnoffset = NVME_FC_TRADDR_NNLEN;
3174                 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
3175         } else
3176                 goto out_einval;
3177
3178         name[0] = '0';
3179         name[1] = 'x';
3180         name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
3181
3182         memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3183         if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
3184                 goto out_einval;
3185
3186         memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3187         if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
3188                 goto out_einval;
3189
3190         return 0;
3191
3192 out_einval:
3193         pr_warn("%s: bad traddr string\n", __func__);
3194         return -EINVAL;
3195 }
3196
3197 static struct nvme_ctrl *
3198 nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
3199 {
3200         struct nvme_fc_lport *lport;
3201         struct nvme_fc_rport *rport;
3202         struct nvme_ctrl *ctrl;
3203         struct nvmet_fc_traddr laddr = { 0L, 0L };
3204         struct nvmet_fc_traddr raddr = { 0L, 0L };
3205         unsigned long flags;
3206         int ret;
3207
3208         ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE);
3209         if (ret || !raddr.nn || !raddr.pn)
3210                 return ERR_PTR(-EINVAL);
3211
3212         ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE);
3213         if (ret || !laddr.nn || !laddr.pn)
3214                 return ERR_PTR(-EINVAL);
3215
3216         /* find the host and remote ports to connect together */
3217         spin_lock_irqsave(&nvme_fc_lock, flags);
3218         list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3219                 if (lport->localport.node_name != laddr.nn ||
3220                     lport->localport.port_name != laddr.pn)
3221                         continue;
3222
3223                 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3224                         if (rport->remoteport.node_name != raddr.nn ||
3225                             rport->remoteport.port_name != raddr.pn)
3226                                 continue;
3227
3228                         /* if fail to get reference fall through. Will error */
3229                         if (!nvme_fc_rport_get(rport))
3230                                 break;
3231
3232                         spin_unlock_irqrestore(&nvme_fc_lock, flags);
3233
3234                         ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport);
3235                         if (IS_ERR(ctrl))
3236                                 nvme_fc_rport_put(rport);
3237                         return ctrl;
3238                 }
3239         }
3240         spin_unlock_irqrestore(&nvme_fc_lock, flags);
3241
3242         pr_warn("%s: %s - %s combination not found\n",
3243                 __func__, opts->traddr, opts->host_traddr);
3244         return ERR_PTR(-ENOENT);
3245 }
3246
3247
3248 static struct nvmf_transport_ops nvme_fc_transport = {
3249         .name           = "fc",
3250         .module         = THIS_MODULE,
3251         .required_opts  = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
3252         .allowed_opts   = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
3253         .create_ctrl    = nvme_fc_create_ctrl,
3254 };
3255
3256 static int __init nvme_fc_init_module(void)
3257 {
3258         int ret;
3259
3260         /*
3261          * NOTE:
3262          * It is expected that in the future the kernel will combine
3263          * the FC-isms that are currently under scsi and now being
3264          * added to by NVME into a new standalone FC class. The SCSI
3265          * and NVME protocols and their devices would be under this
3266          * new FC class.
3267          *
3268          * As we need something to post FC-specific udev events to,
3269          * specifically for nvme probe events, start by creating the
3270          * new device class.  When the new standalone FC class is
3271          * put in place, this code will move to a more generic
3272          * location for the class.
3273          */
3274         fc_class = class_create(THIS_MODULE, "fc");
3275         if (IS_ERR(fc_class)) {
3276                 pr_err("couldn't register class fc\n");
3277                 return PTR_ERR(fc_class);
3278         }
3279
3280         /*
3281          * Create a device for the FC-centric udev events
3282          */
3283         fc_udev_device = device_create(fc_class, NULL, MKDEV(0, 0), NULL,
3284                                 "fc_udev_device");
3285         if (IS_ERR(fc_udev_device)) {
3286                 pr_err("couldn't create fc_udev device!\n");
3287                 ret = PTR_ERR(fc_udev_device);
3288                 goto out_destroy_class;
3289         }
3290
3291         ret = nvmf_register_transport(&nvme_fc_transport);
3292         if (ret)
3293                 goto out_destroy_device;
3294
3295         return 0;
3296
3297 out_destroy_device:
3298         device_destroy(fc_class, MKDEV(0, 0));
3299 out_destroy_class:
3300         class_destroy(fc_class);
3301         return ret;
3302 }
3303
3304 static void __exit nvme_fc_exit_module(void)
3305 {
3306         /* sanity check - all lports should be removed */
3307         if (!list_empty(&nvme_fc_lport_list))
3308                 pr_warn("%s: localport list not empty\n", __func__);
3309
3310         nvmf_unregister_transport(&nvme_fc_transport);
3311
3312         ida_destroy(&nvme_fc_local_port_cnt);
3313         ida_destroy(&nvme_fc_ctrl_cnt);
3314
3315         device_destroy(fc_class, MKDEV(0, 0));
3316         class_destroy(fc_class);
3317 }
3318
3319 module_init(nvme_fc_init_module);
3320 module_exit(nvme_fc_exit_module);
3321
3322 MODULE_LICENSE("GPL v2");