Merge branch 'clk-stm32mp1' into clk-fixes
[linux-2.6-microblaze.git] / drivers / scsi / qla2xxx / qla_nvme.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2017 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_nvme.h"
8 #include <linux/scatterlist.h>
9 #include <linux/delay.h>
10 #include <linux/nvme.h>
11 #include <linux/nvme-fc.h>
12
13 static struct nvme_fc_port_template qla_nvme_fc_transport;
14
15 static void qla_nvme_unregister_remote_port(struct work_struct *);
16
17 int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
18 {
19         struct qla_nvme_rport *rport;
20         struct nvme_fc_port_info req;
21         int ret;
22
23         if (!IS_ENABLED(CONFIG_NVME_FC))
24                 return 0;
25
26         if (!vha->flags.nvme_enabled) {
27                 ql_log(ql_log_info, vha, 0x2100,
28                     "%s: Not registering target since Host NVME is not enabled\n",
29                     __func__);
30                 return 0;
31         }
32
33         if (!(fcport->nvme_prli_service_param &
34             (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
35                 (fcport->nvme_flag & NVME_FLAG_REGISTERED))
36                 return 0;
37
38         INIT_WORK(&fcport->nvme_del_work, qla_nvme_unregister_remote_port);
39         fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
40
41         memset(&req, 0, sizeof(struct nvme_fc_port_info));
42         req.port_name = wwn_to_u64(fcport->port_name);
43         req.node_name = wwn_to_u64(fcport->node_name);
44         req.port_role = 0;
45         req.dev_loss_tmo = NVME_FC_DEV_LOSS_TMO;
46
47         if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
48                 req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
49
50         if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
51                 req.port_role |= FC_PORT_ROLE_NVME_TARGET;
52
53         if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
54                 req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
55
56         req.port_id = fcport->d_id.b24;
57
58         ql_log(ql_log_info, vha, 0x2102,
59             "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
60             __func__, req.node_name, req.port_name,
61             req.port_id);
62
63         ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
64             &fcport->nvme_remote_port);
65         if (ret) {
66                 ql_log(ql_log_warn, vha, 0x212e,
67                     "Failed to register remote port. Transport returned %d\n",
68                     ret);
69                 return ret;
70         }
71
72         rport = fcport->nvme_remote_port->private;
73         rport->fcport = fcport;
74         list_add_tail(&rport->list, &vha->nvme_rport_list);
75
76         fcport->nvme_flag |= NVME_FLAG_REGISTERED;
77         return 0;
78 }
79
80 /* Allocate a queue for NVMe traffic */
81 static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
82     unsigned int qidx, u16 qsize, void **handle)
83 {
84         struct scsi_qla_host *vha;
85         struct qla_hw_data *ha;
86         struct qla_qpair *qpair;
87
88         if (!qidx)
89                 qidx++;
90
91         vha = (struct scsi_qla_host *)lport->private;
92         ha = vha->hw;
93
94         ql_log(ql_log_info, vha, 0x2104,
95             "%s: handle %p, idx =%d, qsize %d\n",
96             __func__, handle, qidx, qsize);
97
98         if (qidx > qla_nvme_fc_transport.max_hw_queues) {
99                 ql_log(ql_log_warn, vha, 0x212f,
100                     "%s: Illegal qidx=%d. Max=%d\n",
101                     __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
102                 return -EINVAL;
103         }
104
105         if (ha->queue_pair_map[qidx]) {
106                 *handle = ha->queue_pair_map[qidx];
107                 ql_log(ql_log_info, vha, 0x2121,
108                     "Returning existing qpair of %p for idx=%x\n",
109                     *handle, qidx);
110                 return 0;
111         }
112
113         qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
114         if (qpair == NULL) {
115                 ql_log(ql_log_warn, vha, 0x2122,
116                     "Failed to allocate qpair\n");
117                 return -EINVAL;
118         }
119         *handle = qpair;
120
121         return 0;
122 }
123
124 static void qla_nvme_sp_ls_done(void *ptr, int res)
125 {
126         srb_t *sp = ptr;
127         struct srb_iocb *nvme;
128         struct nvmefc_ls_req   *fd;
129         struct nvme_private *priv;
130
131         if (atomic_read(&sp->ref_count) == 0) {
132                 ql_log(ql_log_warn, sp->fcport->vha, 0x2123,
133                     "SP reference-count to ZERO on LS_done -- sp=%p.\n", sp);
134                 return;
135         }
136
137         if (!atomic_dec_and_test(&sp->ref_count))
138                 return;
139
140         if (res)
141                 res = -EINVAL;
142
143         nvme = &sp->u.iocb_cmd;
144         fd = nvme->u.nvme.desc;
145         priv = fd->private;
146         priv->comp_status = res;
147         schedule_work(&priv->ls_work);
148         /* work schedule doesn't need the sp */
149         qla2x00_rel_sp(sp);
150 }
151
152 static void qla_nvme_sp_done(void *ptr, int res)
153 {
154         srb_t *sp = ptr;
155         struct srb_iocb *nvme;
156         struct nvmefc_fcp_req *fd;
157
158         nvme = &sp->u.iocb_cmd;
159         fd = nvme->u.nvme.desc;
160
161         if (!atomic_dec_and_test(&sp->ref_count))
162                 return;
163
164         if (res == QLA_SUCCESS)
165                 fd->status = 0;
166         else
167                 fd->status = NVME_SC_INTERNAL;
168
169         fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
170         fd->done(fd);
171         qla2xxx_rel_qpair_sp(sp->qpair, sp);
172
173         return;
174 }
175
176 static void qla_nvme_abort_work(struct work_struct *work)
177 {
178         struct nvme_private *priv =
179                 container_of(work, struct nvme_private, abort_work);
180         srb_t *sp = priv->sp;
181         fc_port_t *fcport = sp->fcport;
182         struct qla_hw_data *ha = fcport->vha->hw;
183         int rval;
184
185         rval = ha->isp_ops->abort_command(sp);
186
187         ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
188             "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
189             __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
190             sp, sp->handle, fcport, rval);
191 }
192
193 static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
194     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
195 {
196         struct nvme_private *priv = fd->private;
197
198         INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
199         schedule_work(&priv->abort_work);
200 }
201
202 static void qla_nvme_ls_complete(struct work_struct *work)
203 {
204         struct nvme_private *priv =
205             container_of(work, struct nvme_private, ls_work);
206         struct nvmefc_ls_req *fd = priv->fd;
207
208         fd->done(fd, priv->comp_status);
209 }
210
211 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
212     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
213 {
214         struct qla_nvme_rport *qla_rport = rport->private;
215         fc_port_t *fcport = qla_rport->fcport;
216         struct srb_iocb   *nvme;
217         struct nvme_private *priv = fd->private;
218         struct scsi_qla_host *vha;
219         int     rval = QLA_FUNCTION_FAILED;
220         struct qla_hw_data *ha;
221         srb_t           *sp;
222
223         vha = fcport->vha;
224         ha = vha->hw;
225         /* Alloc SRB structure */
226         sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
227         if (!sp)
228                 return rval;
229
230         sp->type = SRB_NVME_LS;
231         sp->name = "nvme_ls";
232         sp->done = qla_nvme_sp_ls_done;
233         atomic_set(&sp->ref_count, 1);
234         nvme = &sp->u.iocb_cmd;
235         priv->sp = sp;
236         priv->fd = fd;
237         INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
238         nvme->u.nvme.desc = fd;
239         nvme->u.nvme.dir = 0;
240         nvme->u.nvme.dl = 0;
241         nvme->u.nvme.cmd_len = fd->rqstlen;
242         nvme->u.nvme.rsp_len = fd->rsplen;
243         nvme->u.nvme.rsp_dma = fd->rspdma;
244         nvme->u.nvme.timeout_sec = fd->timeout;
245         nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
246             fd->rqstlen, DMA_TO_DEVICE);
247         dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
248             fd->rqstlen, DMA_TO_DEVICE);
249
250         rval = qla2x00_start_sp(sp);
251         if (rval != QLA_SUCCESS) {
252                 ql_log(ql_log_warn, vha, 0x700e,
253                     "qla2x00_start_sp failed = %d\n", rval);
254                 atomic_dec(&sp->ref_count);
255                 wake_up(&sp->nvme_ls_waitq);
256                 return rval;
257         }
258
259         return rval;
260 }
261
262 static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
263     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
264     struct nvmefc_fcp_req *fd)
265 {
266         struct nvme_private *priv = fd->private;
267
268         INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
269         schedule_work(&priv->abort_work);
270 }
271
272 static void qla_nvme_poll(struct nvme_fc_local_port *lport, void *hw_queue_handle)
273 {
274         struct qla_qpair *qpair = hw_queue_handle;
275         unsigned long flags;
276         struct scsi_qla_host *vha = lport->private;
277
278         spin_lock_irqsave(&qpair->qp_lock, flags);
279         qla24xx_process_response_queue(vha, qpair->rsp);
280         spin_unlock_irqrestore(&qpair->qp_lock, flags);
281 }
282
283 static inline int qla2x00_start_nvme_mq(srb_t *sp)
284 {
285         unsigned long   flags;
286         uint32_t        *clr_ptr;
287         uint32_t        index;
288         uint32_t        handle;
289         struct cmd_nvme *cmd_pkt;
290         uint16_t        cnt, i;
291         uint16_t        req_cnt;
292         uint16_t        tot_dsds;
293         uint16_t        avail_dsds;
294         uint32_t        *cur_dsd;
295         struct req_que *req = NULL;
296         struct scsi_qla_host *vha = sp->fcport->vha;
297         struct qla_hw_data *ha = vha->hw;
298         struct qla_qpair *qpair = sp->qpair;
299         struct srb_iocb *nvme = &sp->u.iocb_cmd;
300         struct scatterlist *sgl, *sg;
301         struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
302         uint32_t        rval = QLA_SUCCESS;
303
304         /* Setup qpair pointers */
305         req = qpair->req;
306         tot_dsds = fd->sg_cnt;
307
308         /* Acquire qpair specific lock */
309         spin_lock_irqsave(&qpair->qp_lock, flags);
310
311         /* Check for room in outstanding command list. */
312         handle = req->current_outstanding_cmd;
313         for (index = 1; index < req->num_outstanding_cmds; index++) {
314                 handle++;
315                 if (handle == req->num_outstanding_cmds)
316                         handle = 1;
317                 if (!req->outstanding_cmds[handle])
318                         break;
319         }
320
321         if (index == req->num_outstanding_cmds) {
322                 rval = -EBUSY;
323                 goto queuing_error;
324         }
325         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
326         if (req->cnt < (req_cnt + 2)) {
327                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
328                     RD_REG_DWORD_RELAXED(req->req_q_out);
329
330                 if (req->ring_index < cnt)
331                         req->cnt = cnt - req->ring_index;
332                 else
333                         req->cnt = req->length - (req->ring_index - cnt);
334
335                 if (req->cnt < (req_cnt + 2)){
336                         rval = -EBUSY;
337                         goto queuing_error;
338                 }
339         }
340
341         if (unlikely(!fd->sqid)) {
342                 struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
343                 if (cmd->sqe.common.opcode == nvme_admin_async_event) {
344                         nvme->u.nvme.aen_op = 1;
345                         atomic_inc(&ha->nvme_active_aen_cnt);
346                 }
347         }
348
349         /* Build command packet. */
350         req->current_outstanding_cmd = handle;
351         req->outstanding_cmds[handle] = sp;
352         sp->handle = handle;
353         req->cnt -= req_cnt;
354
355         cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
356         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
357
358         /* Zero out remaining portion of packet. */
359         clr_ptr = (uint32_t *)cmd_pkt + 2;
360         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
361
362         cmd_pkt->entry_status = 0;
363
364         /* Update entry type to indicate Command NVME IOCB */
365         cmd_pkt->entry_type = COMMAND_NVME;
366
367         /* No data transfer how do we check buffer len == 0?? */
368         if (fd->io_dir == NVMEFC_FCP_READ) {
369                 cmd_pkt->control_flags =
370                     cpu_to_le16(CF_READ_DATA | CF_NVME_ENABLE);
371                 vha->qla_stats.input_bytes += fd->payload_length;
372                 vha->qla_stats.input_requests++;
373         } else if (fd->io_dir == NVMEFC_FCP_WRITE) {
374                 cmd_pkt->control_flags =
375                     cpu_to_le16(CF_WRITE_DATA | CF_NVME_ENABLE);
376                 vha->qla_stats.output_bytes += fd->payload_length;
377                 vha->qla_stats.output_requests++;
378         } else if (fd->io_dir == 0) {
379                 cmd_pkt->control_flags = cpu_to_le16(CF_NVME_ENABLE);
380         }
381
382         /* Set NPORT-ID */
383         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
384         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
385         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
386         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
387         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
388
389         /* NVME RSP IU */
390         cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
391         cmd_pkt->nvme_rsp_dseg_address[0] = cpu_to_le32(LSD(fd->rspdma));
392         cmd_pkt->nvme_rsp_dseg_address[1] = cpu_to_le32(MSD(fd->rspdma));
393
394         /* NVME CNMD IU */
395         cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
396         cmd_pkt->nvme_cmnd_dseg_address[0] = cpu_to_le32(LSD(fd->cmddma));
397         cmd_pkt->nvme_cmnd_dseg_address[1] = cpu_to_le32(MSD(fd->cmddma));
398
399         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
400         cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
401
402         /* One DSD is available in the Command Type NVME IOCB */
403         avail_dsds = 1;
404         cur_dsd = (uint32_t *)&cmd_pkt->nvme_data_dseg_address[0];
405         sgl = fd->first_sgl;
406
407         /* Load data segments */
408         for_each_sg(sgl, sg, tot_dsds, i) {
409                 dma_addr_t      sle_dma;
410                 cont_a64_entry_t *cont_pkt;
411
412                 /* Allocate additional continuation packets? */
413                 if (avail_dsds == 0) {
414                         /*
415                          * Five DSDs are available in the Continuation
416                          * Type 1 IOCB.
417                          */
418
419                         /* Adjust ring index */
420                         req->ring_index++;
421                         if (req->ring_index == req->length) {
422                                 req->ring_index = 0;
423                                 req->ring_ptr = req->ring;
424                         } else {
425                                 req->ring_ptr++;
426                         }
427                         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
428                         *((uint32_t *)(&cont_pkt->entry_type)) =
429                             cpu_to_le32(CONTINUE_A64_TYPE);
430
431                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
432                         avail_dsds = 5;
433                 }
434
435                 sle_dma = sg_dma_address(sg);
436                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
437                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
438                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
439                 avail_dsds--;
440         }
441
442         /* Set total entry count. */
443         cmd_pkt->entry_count = (uint8_t)req_cnt;
444         wmb();
445
446         /* Adjust ring index. */
447         req->ring_index++;
448         if (req->ring_index == req->length) {
449                 req->ring_index = 0;
450                 req->ring_ptr = req->ring;
451         } else {
452                 req->ring_ptr++;
453         }
454
455         /* Set chip new ring index. */
456         WRT_REG_DWORD(req->req_q_in, req->ring_index);
457
458 queuing_error:
459         spin_unlock_irqrestore(&qpair->qp_lock, flags);
460         return rval;
461 }
462
463 /* Post a command */
464 static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
465     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
466     struct nvmefc_fcp_req *fd)
467 {
468         fc_port_t *fcport;
469         struct srb_iocb *nvme;
470         struct scsi_qla_host *vha;
471         int rval = -ENODEV;
472         srb_t *sp;
473         struct qla_qpair *qpair = hw_queue_handle;
474         struct nvme_private *priv;
475         struct qla_nvme_rport *qla_rport = rport->private;
476
477         if (!fd || !qpair) {
478                 ql_log(ql_log_warn, NULL, 0x2134,
479                     "NO NVMe request or Queue Handle\n");
480                 return rval;
481         }
482
483         priv = fd->private;
484         fcport = qla_rport->fcport;
485         if (!fcport) {
486                 ql_log(ql_log_warn, NULL, 0x210e, "No fcport ptr\n");
487                 return rval;
488         }
489
490         vha = fcport->vha;
491
492         if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
493                 return rval;
494
495         /*
496          * If we know the dev is going away while the transport is still sending
497          * IO's return busy back to stall the IO Q.  This happens when the
498          * link goes away and fw hasn't notified us yet, but IO's are being
499          * returned. If the dev comes back quickly we won't exhaust the IO
500          * retry count at the core.
501          */
502         if (fcport->nvme_flag & NVME_FLAG_RESETTING)
503                 return -EBUSY;
504
505         /* Alloc SRB structure */
506         sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
507         if (!sp)
508                 return -EBUSY;
509
510         atomic_set(&sp->ref_count, 1);
511         init_waitqueue_head(&sp->nvme_ls_waitq);
512         priv->sp = sp;
513         sp->type = SRB_NVME_CMD;
514         sp->name = "nvme_cmd";
515         sp->done = qla_nvme_sp_done;
516         sp->qpair = qpair;
517         nvme = &sp->u.iocb_cmd;
518         nvme->u.nvme.desc = fd;
519
520         rval = qla2x00_start_nvme_mq(sp);
521         if (rval != QLA_SUCCESS) {
522                 ql_log(ql_log_warn, vha, 0x212d,
523                     "qla2x00_start_nvme_mq failed = %d\n", rval);
524                 atomic_dec(&sp->ref_count);
525                 wake_up(&sp->nvme_ls_waitq);
526         }
527
528         return rval;
529 }
530
531 static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
532 {
533         struct scsi_qla_host *vha = lport->private;
534
535         ql_log(ql_log_info, vha, 0x210f,
536             "localport delete of %p completed.\n", vha->nvme_local_port);
537         vha->nvme_local_port = NULL;
538         complete(&vha->nvme_del_done);
539 }
540
541 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
542 {
543         fc_port_t *fcport;
544         struct qla_nvme_rport *qla_rport = rport->private, *trport;
545
546         fcport = qla_rport->fcport;
547         fcport->nvme_remote_port = NULL;
548         fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
549
550         list_for_each_entry_safe(qla_rport, trport,
551             &fcport->vha->nvme_rport_list, list) {
552                 if (qla_rport->fcport == fcport) {
553                         list_del(&qla_rport->list);
554                         break;
555                 }
556         }
557         complete(&fcport->nvme_del_done);
558
559         if (!test_bit(UNLOADING, &fcport->vha->dpc_flags)) {
560                 INIT_WORK(&fcport->free_work, qlt_free_session_done);
561                 schedule_work(&fcport->free_work);
562         }
563
564         fcport->nvme_flag &= ~(NVME_FLAG_REGISTERED | NVME_FLAG_DELETING);
565         ql_log(ql_log_info, fcport->vha, 0x2110,
566             "remoteport_delete of %p completed.\n", fcport);
567 }
568
569 static struct nvme_fc_port_template qla_nvme_fc_transport = {
570         .localport_delete = qla_nvme_localport_delete,
571         .remoteport_delete = qla_nvme_remoteport_delete,
572         .create_queue   = qla_nvme_alloc_queue,
573         .delete_queue   = NULL,
574         .ls_req         = qla_nvme_ls_req,
575         .ls_abort       = qla_nvme_ls_abort,
576         .fcp_io         = qla_nvme_post_cmd,
577         .fcp_abort      = qla_nvme_fcp_abort,
578         .poll_queue     = qla_nvme_poll,
579         .max_hw_queues  = 8,
580         .max_sgl_segments = 128,
581         .max_dif_sgl_segments = 64,
582         .dma_boundary = 0xFFFFFFFF,
583         .local_priv_sz  = 8,
584         .remote_priv_sz = sizeof(struct qla_nvme_rport),
585         .lsrqst_priv_sz = sizeof(struct nvme_private),
586         .fcprqst_priv_sz = sizeof(struct nvme_private),
587 };
588
589 #define NVME_ABORT_POLLING_PERIOD    2
590 static int qla_nvme_wait_on_command(srb_t *sp)
591 {
592         int ret = QLA_SUCCESS;
593
594         wait_event_timeout(sp->nvme_ls_waitq, (atomic_read(&sp->ref_count) > 1),
595             NVME_ABORT_POLLING_PERIOD*HZ);
596
597         if (atomic_read(&sp->ref_count) > 1)
598                 ret = QLA_FUNCTION_FAILED;
599
600         return ret;
601 }
602
603 void qla_nvme_abort(struct qla_hw_data *ha, struct srb *sp, int res)
604 {
605         int rval;
606
607         if (!test_bit(ABORT_ISP_ACTIVE, &sp->vha->dpc_flags)) {
608                 rval = ha->isp_ops->abort_command(sp);
609                 if (!rval && !qla_nvme_wait_on_command(sp))
610                         ql_log(ql_log_warn, NULL, 0x2112,
611                             "timed out waiting on sp=%p\n", sp);
612         } else {
613                 sp->done(sp, res);
614         }
615 }
616
617 static void qla_nvme_unregister_remote_port(struct work_struct *work)
618 {
619         struct fc_port *fcport = container_of(work, struct fc_port,
620             nvme_del_work);
621         struct qla_nvme_rport *qla_rport, *trport;
622
623         if (!IS_ENABLED(CONFIG_NVME_FC))
624                 return;
625
626         ql_log(ql_log_warn, NULL, 0x2112,
627             "%s: unregister remoteport on %p\n",__func__, fcport);
628
629         list_for_each_entry_safe(qla_rport, trport,
630             &fcport->vha->nvme_rport_list, list) {
631                 if (qla_rport->fcport == fcport) {
632                         ql_log(ql_log_info, fcport->vha, 0x2113,
633                             "%s: fcport=%p\n", __func__, fcport);
634                         init_completion(&fcport->nvme_del_done);
635                         nvme_fc_unregister_remoteport(
636                             fcport->nvme_remote_port);
637                         wait_for_completion(&fcport->nvme_del_done);
638                         break;
639                 }
640         }
641 }
642
643 void qla_nvme_delete(struct scsi_qla_host *vha)
644 {
645         struct qla_nvme_rport *qla_rport, *trport;
646         fc_port_t *fcport;
647         int nv_ret;
648
649         if (!IS_ENABLED(CONFIG_NVME_FC))
650                 return;
651
652         list_for_each_entry_safe(qla_rport, trport,
653             &vha->nvme_rport_list, list) {
654                 fcport = qla_rport->fcport;
655
656                 ql_log(ql_log_info, fcport->vha, 0x2114, "%s: fcport=%p\n",
657                     __func__, fcport);
658
659                 nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
660                 init_completion(&fcport->nvme_del_done);
661                 nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
662                 wait_for_completion(&fcport->nvme_del_done);
663         }
664
665         if (vha->nvme_local_port) {
666                 init_completion(&vha->nvme_del_done);
667                 ql_log(ql_log_info, vha, 0x2116,
668                         "unregister localport=%p\n",
669                         vha->nvme_local_port);
670                 nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
671                 if (nv_ret)
672                         ql_log(ql_log_info, vha, 0x2115,
673                             "Unregister of localport failed\n");
674                 else
675                         wait_for_completion(&vha->nvme_del_done);
676         }
677 }
678
679 void qla_nvme_register_hba(struct scsi_qla_host *vha)
680 {
681         struct nvme_fc_port_template *tmpl;
682         struct qla_hw_data *ha;
683         struct nvme_fc_port_info pinfo;
684         int ret;
685
686         if (!IS_ENABLED(CONFIG_NVME_FC))
687                 return;
688
689         ha = vha->hw;
690         tmpl = &qla_nvme_fc_transport;
691
692         WARN_ON(vha->nvme_local_port);
693         WARN_ON(ha->max_req_queues < 3);
694
695         qla_nvme_fc_transport.max_hw_queues =
696             min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
697                 (uint8_t)(ha->max_req_queues - 2));
698
699         pinfo.node_name = wwn_to_u64(vha->node_name);
700         pinfo.port_name = wwn_to_u64(vha->port_name);
701         pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
702         pinfo.port_id = vha->d_id.b24;
703
704         ql_log(ql_log_info, vha, 0xffff,
705             "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
706             pinfo.node_name, pinfo.port_name, pinfo.port_id);
707         qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
708
709         ret = nvme_fc_register_localport(&pinfo, tmpl,
710             get_device(&ha->pdev->dev), &vha->nvme_local_port);
711         if (ret) {
712                 ql_log(ql_log_warn, vha, 0xffff,
713                     "register_localport failed: ret=%x\n", ret);
714                 return;
715         }
716         vha->nvme_local_port->private = vha;
717 }