41c85da3ab32a272cb4920d593cdf8349c7e6375
[linux-2.6-microblaze.git] / drivers / scsi / qla2xxx / qla_nvme.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2017 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_nvme.h"
8 #include <linux/scatterlist.h>
9 #include <linux/delay.h>
10 #include <linux/nvme.h>
11 #include <linux/nvme-fc.h>
12
13 static struct nvme_fc_port_template qla_nvme_fc_transport;
14
15 static void qla_nvme_unregister_remote_port(struct work_struct *);
16
17 int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
18 {
19         struct qla_nvme_rport *rport;
20         struct nvme_fc_port_info req;
21         int ret;
22
23         if (!IS_ENABLED(CONFIG_NVME_FC))
24                 return 0;
25
26         if (!vha->flags.nvme_enabled) {
27                 ql_log(ql_log_info, vha, 0x2100,
28                     "%s: Not registering target since Host NVME is not enabled\n",
29                     __func__);
30                 return 0;
31         }
32
33         if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
34                 return 0;
35
36         if (!(fcport->nvme_prli_service_param &
37             (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
38                 (fcport->nvme_flag & NVME_FLAG_REGISTERED))
39                 return 0;
40
41         INIT_WORK(&fcport->nvme_del_work, qla_nvme_unregister_remote_port);
42         fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
43
44         memset(&req, 0, sizeof(struct nvme_fc_port_info));
45         req.port_name = wwn_to_u64(fcport->port_name);
46         req.node_name = wwn_to_u64(fcport->node_name);
47         req.port_role = 0;
48         req.dev_loss_tmo = NVME_FC_DEV_LOSS_TMO;
49
50         if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
51                 req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
52
53         if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
54                 req.port_role |= FC_PORT_ROLE_NVME_TARGET;
55
56         if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
57                 req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
58
59         req.port_id = fcport->d_id.b24;
60
61         ql_log(ql_log_info, vha, 0x2102,
62             "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
63             __func__, req.node_name, req.port_name,
64             req.port_id);
65
66         ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
67             &fcport->nvme_remote_port);
68         if (ret) {
69                 ql_log(ql_log_warn, vha, 0x212e,
70                     "Failed to register remote port. Transport returned %d\n",
71                     ret);
72                 return ret;
73         }
74
75         rport = fcport->nvme_remote_port->private;
76         rport->fcport = fcport;
77         list_add_tail(&rport->list, &vha->nvme_rport_list);
78
79         fcport->nvme_flag |= NVME_FLAG_REGISTERED;
80         return 0;
81 }
82
83 /* Allocate a queue for NVMe traffic */
84 static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
85     unsigned int qidx, u16 qsize, void **handle)
86 {
87         struct scsi_qla_host *vha;
88         struct qla_hw_data *ha;
89         struct qla_qpair *qpair;
90
91         if (!qidx)
92                 qidx++;
93
94         vha = (struct scsi_qla_host *)lport->private;
95         ha = vha->hw;
96
97         ql_log(ql_log_info, vha, 0x2104,
98             "%s: handle %p, idx =%d, qsize %d\n",
99             __func__, handle, qidx, qsize);
100
101         if (qidx > qla_nvme_fc_transport.max_hw_queues) {
102                 ql_log(ql_log_warn, vha, 0x212f,
103                     "%s: Illegal qidx=%d. Max=%d\n",
104                     __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
105                 return -EINVAL;
106         }
107
108         if (ha->queue_pair_map[qidx]) {
109                 *handle = ha->queue_pair_map[qidx];
110                 ql_log(ql_log_info, vha, 0x2121,
111                     "Returning existing qpair of %p for idx=%x\n",
112                     *handle, qidx);
113                 return 0;
114         }
115
116         qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
117         if (qpair == NULL) {
118                 ql_log(ql_log_warn, vha, 0x2122,
119                     "Failed to allocate qpair\n");
120                 return -EINVAL;
121         }
122         *handle = qpair;
123
124         return 0;
125 }
126
127 static void qla_nvme_sp_ls_done(void *ptr, int res)
128 {
129         srb_t *sp = ptr;
130         struct srb_iocb *nvme;
131         struct nvmefc_ls_req   *fd;
132         struct nvme_private *priv;
133
134         if (atomic_read(&sp->ref_count) == 0) {
135                 ql_log(ql_log_warn, sp->fcport->vha, 0x2123,
136                     "SP reference-count to ZERO on LS_done -- sp=%p.\n", sp);
137                 return;
138         }
139
140         if (!atomic_dec_and_test(&sp->ref_count))
141                 return;
142
143         if (res)
144                 res = -EINVAL;
145
146         nvme = &sp->u.iocb_cmd;
147         fd = nvme->u.nvme.desc;
148         priv = fd->private;
149         priv->comp_status = res;
150         schedule_work(&priv->ls_work);
151         /* work schedule doesn't need the sp */
152         qla2x00_rel_sp(sp);
153 }
154
155 static void qla_nvme_sp_done(void *ptr, int res)
156 {
157         srb_t *sp = ptr;
158         struct srb_iocb *nvme;
159         struct nvmefc_fcp_req *fd;
160
161         nvme = &sp->u.iocb_cmd;
162         fd = nvme->u.nvme.desc;
163
164         if (!atomic_dec_and_test(&sp->ref_count))
165                 return;
166
167         if (res == QLA_SUCCESS)
168                 fd->status = 0;
169         else
170                 fd->status = NVME_SC_INTERNAL;
171
172         fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
173         fd->done(fd);
174         qla2xxx_rel_qpair_sp(sp->qpair, sp);
175
176         return;
177 }
178
179 static void qla_nvme_abort_work(struct work_struct *work)
180 {
181         struct nvme_private *priv =
182                 container_of(work, struct nvme_private, abort_work);
183         srb_t *sp = priv->sp;
184         fc_port_t *fcport = sp->fcport;
185         struct qla_hw_data *ha = fcport->vha->hw;
186         int rval;
187
188         if (fcport)
189                 ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
190                     "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n",
191                     __func__, sp, sp->handle, fcport, fcport->deleted);
192
193         if (!ha->flags.fw_started && (fcport && fcport->deleted))
194                 return;
195
196         rval = ha->isp_ops->abort_command(sp);
197
198         ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
199             "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
200             __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
201             sp, sp->handle, fcport, rval);
202 }
203
204 static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
205     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
206 {
207         struct nvme_private *priv = fd->private;
208
209         INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
210         schedule_work(&priv->abort_work);
211 }
212
213 static void qla_nvme_ls_complete(struct work_struct *work)
214 {
215         struct nvme_private *priv =
216             container_of(work, struct nvme_private, ls_work);
217         struct nvmefc_ls_req *fd = priv->fd;
218
219         fd->done(fd, priv->comp_status);
220 }
221
222 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
223     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
224 {
225         struct qla_nvme_rport *qla_rport = rport->private;
226         fc_port_t *fcport = qla_rport->fcport;
227         struct srb_iocb   *nvme;
228         struct nvme_private *priv = fd->private;
229         struct scsi_qla_host *vha;
230         int     rval = QLA_FUNCTION_FAILED;
231         struct qla_hw_data *ha;
232         srb_t           *sp;
233
234         vha = fcport->vha;
235         ha = vha->hw;
236         /* Alloc SRB structure */
237         sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
238         if (!sp)
239                 return rval;
240
241         sp->type = SRB_NVME_LS;
242         sp->name = "nvme_ls";
243         sp->done = qla_nvme_sp_ls_done;
244         atomic_set(&sp->ref_count, 1);
245         nvme = &sp->u.iocb_cmd;
246         priv->sp = sp;
247         priv->fd = fd;
248         INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
249         nvme->u.nvme.desc = fd;
250         nvme->u.nvme.dir = 0;
251         nvme->u.nvme.dl = 0;
252         nvme->u.nvme.cmd_len = fd->rqstlen;
253         nvme->u.nvme.rsp_len = fd->rsplen;
254         nvme->u.nvme.rsp_dma = fd->rspdma;
255         nvme->u.nvme.timeout_sec = fd->timeout;
256         nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
257             fd->rqstlen, DMA_TO_DEVICE);
258         dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
259             fd->rqstlen, DMA_TO_DEVICE);
260
261         rval = qla2x00_start_sp(sp);
262         if (rval != QLA_SUCCESS) {
263                 ql_log(ql_log_warn, vha, 0x700e,
264                     "qla2x00_start_sp failed = %d\n", rval);
265                 atomic_dec(&sp->ref_count);
266                 wake_up(&sp->nvme_ls_waitq);
267                 return rval;
268         }
269
270         return rval;
271 }
272
273 static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
274     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
275     struct nvmefc_fcp_req *fd)
276 {
277         struct nvme_private *priv = fd->private;
278
279         INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
280         schedule_work(&priv->abort_work);
281 }
282
283 static inline int qla2x00_start_nvme_mq(srb_t *sp)
284 {
285         unsigned long   flags;
286         uint32_t        *clr_ptr;
287         uint32_t        index;
288         uint32_t        handle;
289         struct cmd_nvme *cmd_pkt;
290         uint16_t        cnt, i;
291         uint16_t        req_cnt;
292         uint16_t        tot_dsds;
293         uint16_t        avail_dsds;
294         uint32_t        *cur_dsd;
295         struct req_que *req = NULL;
296         struct scsi_qla_host *vha = sp->fcport->vha;
297         struct qla_hw_data *ha = vha->hw;
298         struct qla_qpair *qpair = sp->qpair;
299         struct srb_iocb *nvme = &sp->u.iocb_cmd;
300         struct scatterlist *sgl, *sg;
301         struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
302         uint32_t        rval = QLA_SUCCESS;
303
304         /* Setup qpair pointers */
305         req = qpair->req;
306         tot_dsds = fd->sg_cnt;
307
308         /* Acquire qpair specific lock */
309         spin_lock_irqsave(&qpair->qp_lock, flags);
310
311         /* Check for room in outstanding command list. */
312         handle = req->current_outstanding_cmd;
313         for (index = 1; index < req->num_outstanding_cmds; index++) {
314                 handle++;
315                 if (handle == req->num_outstanding_cmds)
316                         handle = 1;
317                 if (!req->outstanding_cmds[handle])
318                         break;
319         }
320
321         if (index == req->num_outstanding_cmds) {
322                 rval = -EBUSY;
323                 goto queuing_error;
324         }
325         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
326         if (req->cnt < (req_cnt + 2)) {
327                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
328                     RD_REG_DWORD_RELAXED(req->req_q_out);
329
330                 if (req->ring_index < cnt)
331                         req->cnt = cnt - req->ring_index;
332                 else
333                         req->cnt = req->length - (req->ring_index - cnt);
334
335                 if (req->cnt < (req_cnt + 2)){
336                         rval = -EBUSY;
337                         goto queuing_error;
338                 }
339         }
340
341         if (unlikely(!fd->sqid)) {
342                 struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
343                 if (cmd->sqe.common.opcode == nvme_admin_async_event) {
344                         nvme->u.nvme.aen_op = 1;
345                         atomic_inc(&ha->nvme_active_aen_cnt);
346                 }
347         }
348
349         /* Build command packet. */
350         req->current_outstanding_cmd = handle;
351         req->outstanding_cmds[handle] = sp;
352         sp->handle = handle;
353         req->cnt -= req_cnt;
354
355         cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
356         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
357
358         /* Zero out remaining portion of packet. */
359         clr_ptr = (uint32_t *)cmd_pkt + 2;
360         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
361
362         cmd_pkt->entry_status = 0;
363
364         /* Update entry type to indicate Command NVME IOCB */
365         cmd_pkt->entry_type = COMMAND_NVME;
366
367         /* No data transfer how do we check buffer len == 0?? */
368         if (fd->io_dir == NVMEFC_FCP_READ) {
369                 cmd_pkt->control_flags = CF_READ_DATA;
370                 vha->qla_stats.input_bytes += fd->payload_length;
371                 vha->qla_stats.input_requests++;
372         } else if (fd->io_dir == NVMEFC_FCP_WRITE) {
373                 cmd_pkt->control_flags = CF_WRITE_DATA;
374                 if ((vha->flags.nvme_first_burst) &&
375                     (sp->fcport->nvme_prli_service_param &
376                         NVME_PRLI_SP_FIRST_BURST)) {
377                         if ((fd->payload_length <=
378                             sp->fcport->nvme_first_burst_size) ||
379                                 (sp->fcport->nvme_first_burst_size == 0))
380                                 cmd_pkt->control_flags |=
381                                     CF_NVME_FIRST_BURST_ENABLE;
382                 }
383                 vha->qla_stats.output_bytes += fd->payload_length;
384                 vha->qla_stats.output_requests++;
385         } else if (fd->io_dir == 0) {
386                 cmd_pkt->control_flags = 0;
387         }
388
389         /* Set NPORT-ID */
390         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
391         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
392         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
393         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
394         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
395
396         /* NVME RSP IU */
397         cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
398         cmd_pkt->nvme_rsp_dseg_address[0] = cpu_to_le32(LSD(fd->rspdma));
399         cmd_pkt->nvme_rsp_dseg_address[1] = cpu_to_le32(MSD(fd->rspdma));
400
401         /* NVME CNMD IU */
402         cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
403         cmd_pkt->nvme_cmnd_dseg_address[0] = cpu_to_le32(LSD(fd->cmddma));
404         cmd_pkt->nvme_cmnd_dseg_address[1] = cpu_to_le32(MSD(fd->cmddma));
405
406         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
407         cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
408
409         /* One DSD is available in the Command Type NVME IOCB */
410         avail_dsds = 1;
411         cur_dsd = (uint32_t *)&cmd_pkt->nvme_data_dseg_address[0];
412         sgl = fd->first_sgl;
413
414         /* Load data segments */
415         for_each_sg(sgl, sg, tot_dsds, i) {
416                 dma_addr_t      sle_dma;
417                 cont_a64_entry_t *cont_pkt;
418
419                 /* Allocate additional continuation packets? */
420                 if (avail_dsds == 0) {
421                         /*
422                          * Five DSDs are available in the Continuation
423                          * Type 1 IOCB.
424                          */
425
426                         /* Adjust ring index */
427                         req->ring_index++;
428                         if (req->ring_index == req->length) {
429                                 req->ring_index = 0;
430                                 req->ring_ptr = req->ring;
431                         } else {
432                                 req->ring_ptr++;
433                         }
434                         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
435                         *((uint32_t *)(&cont_pkt->entry_type)) =
436                             cpu_to_le32(CONTINUE_A64_TYPE);
437
438                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
439                         avail_dsds = 5;
440                 }
441
442                 sle_dma = sg_dma_address(sg);
443                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
444                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
445                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
446                 avail_dsds--;
447         }
448
449         /* Set total entry count. */
450         cmd_pkt->entry_count = (uint8_t)req_cnt;
451         wmb();
452
453         /* Adjust ring index. */
454         req->ring_index++;
455         if (req->ring_index == req->length) {
456                 req->ring_index = 0;
457                 req->ring_ptr = req->ring;
458         } else {
459                 req->ring_ptr++;
460         }
461
462         /* Set chip new ring index. */
463         WRT_REG_DWORD(req->req_q_in, req->ring_index);
464
465 queuing_error:
466         spin_unlock_irqrestore(&qpair->qp_lock, flags);
467         return rval;
468 }
469
470 /* Post a command */
471 static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
472     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
473     struct nvmefc_fcp_req *fd)
474 {
475         fc_port_t *fcport;
476         struct srb_iocb *nvme;
477         struct scsi_qla_host *vha;
478         int rval = -ENODEV;
479         srb_t *sp;
480         struct qla_qpair *qpair = hw_queue_handle;
481         struct nvme_private *priv = fd->private;
482         struct qla_nvme_rport *qla_rport = rport->private;
483
484         fcport = qla_rport->fcport;
485
486         vha = fcport->vha;
487
488         if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
489                 return rval;
490
491         /*
492          * If we know the dev is going away while the transport is still sending
493          * IO's return busy back to stall the IO Q.  This happens when the
494          * link goes away and fw hasn't notified us yet, but IO's are being
495          * returned. If the dev comes back quickly we won't exhaust the IO
496          * retry count at the core.
497          */
498         if (fcport->nvme_flag & NVME_FLAG_RESETTING)
499                 return -EBUSY;
500
501         /* Alloc SRB structure */
502         sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
503         if (!sp)
504                 return -EBUSY;
505
506         atomic_set(&sp->ref_count, 1);
507         init_waitqueue_head(&sp->nvme_ls_waitq);
508         priv->sp = sp;
509         sp->type = SRB_NVME_CMD;
510         sp->name = "nvme_cmd";
511         sp->done = qla_nvme_sp_done;
512         sp->qpair = qpair;
513         sp->vha = vha;
514         nvme = &sp->u.iocb_cmd;
515         nvme->u.nvme.desc = fd;
516
517         rval = qla2x00_start_nvme_mq(sp);
518         if (rval != QLA_SUCCESS) {
519                 ql_log(ql_log_warn, vha, 0x212d,
520                     "qla2x00_start_nvme_mq failed = %d\n", rval);
521                 atomic_dec(&sp->ref_count);
522                 wake_up(&sp->nvme_ls_waitq);
523         }
524
525         return rval;
526 }
527
528 static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
529 {
530         struct scsi_qla_host *vha = lport->private;
531
532         ql_log(ql_log_info, vha, 0x210f,
533             "localport delete of %p completed.\n", vha->nvme_local_port);
534         vha->nvme_local_port = NULL;
535         complete(&vha->nvme_del_done);
536 }
537
538 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
539 {
540         fc_port_t *fcport;
541         struct qla_nvme_rport *qla_rport = rport->private, *trport;
542
543         fcport = qla_rport->fcport;
544         fcport->nvme_remote_port = NULL;
545         fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
546
547         list_for_each_entry_safe(qla_rport, trport,
548             &fcport->vha->nvme_rport_list, list) {
549                 if (qla_rport->fcport == fcport) {
550                         list_del(&qla_rport->list);
551                         break;
552                 }
553         }
554         complete(&fcport->nvme_del_done);
555
556         if (!test_bit(UNLOADING, &fcport->vha->dpc_flags)) {
557                 INIT_WORK(&fcport->free_work, qlt_free_session_done);
558                 schedule_work(&fcport->free_work);
559         }
560
561         fcport->nvme_flag &= ~NVME_FLAG_DELETING;
562         ql_log(ql_log_info, fcport->vha, 0x2110,
563             "remoteport_delete of %p completed.\n", fcport);
564 }
565
566 static struct nvme_fc_port_template qla_nvme_fc_transport = {
567         .localport_delete = qla_nvme_localport_delete,
568         .remoteport_delete = qla_nvme_remoteport_delete,
569         .create_queue   = qla_nvme_alloc_queue,
570         .delete_queue   = NULL,
571         .ls_req         = qla_nvme_ls_req,
572         .ls_abort       = qla_nvme_ls_abort,
573         .fcp_io         = qla_nvme_post_cmd,
574         .fcp_abort      = qla_nvme_fcp_abort,
575         .max_hw_queues  = 8,
576         .max_sgl_segments = 128,
577         .max_dif_sgl_segments = 64,
578         .dma_boundary = 0xFFFFFFFF,
579         .local_priv_sz  = 8,
580         .remote_priv_sz = sizeof(struct qla_nvme_rport),
581         .lsrqst_priv_sz = sizeof(struct nvme_private),
582         .fcprqst_priv_sz = sizeof(struct nvme_private),
583 };
584
585 #define NVME_ABORT_POLLING_PERIOD    2
586 static int qla_nvme_wait_on_command(srb_t *sp)
587 {
588         int ret = QLA_SUCCESS;
589
590         wait_event_timeout(sp->nvme_ls_waitq, (atomic_read(&sp->ref_count) > 1),
591             NVME_ABORT_POLLING_PERIOD*HZ);
592
593         if (atomic_read(&sp->ref_count) > 1)
594                 ret = QLA_FUNCTION_FAILED;
595
596         return ret;
597 }
598
599 void qla_nvme_abort(struct qla_hw_data *ha, struct srb *sp, int res)
600 {
601         int rval;
602
603         if (ha->flags.fw_started) {
604                 rval = ha->isp_ops->abort_command(sp);
605                 if (!rval && !qla_nvme_wait_on_command(sp))
606                         ql_log(ql_log_warn, NULL, 0x2112,
607                             "timed out waiting on sp=%p\n", sp);
608         } else {
609                 sp->done(sp, res);
610         }
611 }
612
613 static void qla_nvme_unregister_remote_port(struct work_struct *work)
614 {
615         struct fc_port *fcport = container_of(work, struct fc_port,
616             nvme_del_work);
617         struct qla_nvme_rport *qla_rport, *trport;
618         scsi_qla_host_t *base_vha;
619
620         if (!IS_ENABLED(CONFIG_NVME_FC))
621                 return;
622
623         ql_log(ql_log_warn, NULL, 0x2112,
624             "%s: unregister remoteport on %p\n",__func__, fcport);
625
626         base_vha = pci_get_drvdata(fcport->vha->hw->pdev);
627         if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags)) {
628                 ql_dbg(ql_dbg_disc, fcport->vha, 0x2114,
629                     "%s: Notify FC-NVMe transport, set devloss=0\n",
630                     __func__);
631
632                 nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
633         }
634
635         list_for_each_entry_safe(qla_rport, trport,
636             &fcport->vha->nvme_rport_list, list) {
637                 if (qla_rport->fcport == fcport) {
638                         ql_log(ql_log_info, fcport->vha, 0x2113,
639                             "%s: fcport=%p\n", __func__, fcport);
640                         init_completion(&fcport->nvme_del_done);
641                         nvme_fc_unregister_remoteport(
642                             fcport->nvme_remote_port);
643                         wait_for_completion(&fcport->nvme_del_done);
644                         break;
645                 }
646         }
647 }
648
649 void qla_nvme_delete(struct scsi_qla_host *vha)
650 {
651         int nv_ret;
652
653         if (!IS_ENABLED(CONFIG_NVME_FC))
654                 return;
655
656         if (vha->nvme_local_port) {
657                 init_completion(&vha->nvme_del_done);
658                 ql_log(ql_log_info, vha, 0x2116,
659                         "unregister localport=%p\n",
660                         vha->nvme_local_port);
661                 nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
662                 if (nv_ret)
663                         ql_log(ql_log_info, vha, 0x2115,
664                             "Unregister of localport failed\n");
665                 else
666                         wait_for_completion(&vha->nvme_del_done);
667         }
668 }
669
670 int qla_nvme_register_hba(struct scsi_qla_host *vha)
671 {
672         struct nvme_fc_port_template *tmpl;
673         struct qla_hw_data *ha;
674         struct nvme_fc_port_info pinfo;
675         int ret = EINVAL;
676
677         if (!IS_ENABLED(CONFIG_NVME_FC))
678                 return ret;
679
680         ha = vha->hw;
681         tmpl = &qla_nvme_fc_transport;
682
683         WARN_ON(vha->nvme_local_port);
684         WARN_ON(ha->max_req_queues < 3);
685
686         qla_nvme_fc_transport.max_hw_queues =
687             min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
688                 (uint8_t)(ha->max_req_queues - 2));
689
690         pinfo.node_name = wwn_to_u64(vha->node_name);
691         pinfo.port_name = wwn_to_u64(vha->port_name);
692         pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
693         pinfo.port_id = vha->d_id.b24;
694
695         ql_log(ql_log_info, vha, 0xffff,
696             "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
697             pinfo.node_name, pinfo.port_name, pinfo.port_id);
698         qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
699
700         ret = nvme_fc_register_localport(&pinfo, tmpl,
701             get_device(&ha->pdev->dev), &vha->nvme_local_port);
702         if (ret) {
703                 ql_log(ql_log_warn, vha, 0xffff,
704                     "register_localport failed: ret=%x\n", ret);
705         } else {
706                 vha->nvme_local_port->private = vha;
707         }
708
709         return ret;
710 }