Merge branch '5.12/scsi-fixes' into 5.13/scsi-staging
[linux-2.6-microblaze.git] / drivers / scsi / qla2xxx / qla_nvme.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * QLogic Fibre Channel HBA Driver
4  * Copyright (c)  2003-2017 QLogic Corporation
5  */
6 #include "qla_nvme.h"
7 #include <linux/scatterlist.h>
8 #include <linux/delay.h>
9 #include <linux/nvme.h>
10 #include <linux/nvme-fc.h>
11
12 static struct nvme_fc_port_template qla_nvme_fc_transport;
13
14 int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
15 {
16         struct qla_nvme_rport *rport;
17         struct nvme_fc_port_info req;
18         int ret;
19
20         if (!IS_ENABLED(CONFIG_NVME_FC))
21                 return 0;
22
23         if (!vha->flags.nvme_enabled) {
24                 ql_log(ql_log_info, vha, 0x2100,
25                     "%s: Not registering target since Host NVME is not enabled\n",
26                     __func__);
27                 return 0;
28         }
29
30         if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
31                 return 0;
32
33         if (!(fcport->nvme_prli_service_param &
34             (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
35                 (fcport->nvme_flag & NVME_FLAG_REGISTERED))
36                 return 0;
37
38         fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
39
40         memset(&req, 0, sizeof(struct nvme_fc_port_info));
41         req.port_name = wwn_to_u64(fcport->port_name);
42         req.node_name = wwn_to_u64(fcport->node_name);
43         req.port_role = 0;
44         req.dev_loss_tmo = 0;
45
46         if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
47                 req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
48
49         if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
50                 req.port_role |= FC_PORT_ROLE_NVME_TARGET;
51
52         if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
53                 req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
54
55         req.port_id = fcport->d_id.b24;
56
57         ql_log(ql_log_info, vha, 0x2102,
58             "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
59             __func__, req.node_name, req.port_name,
60             req.port_id);
61
62         ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
63             &fcport->nvme_remote_port);
64         if (ret) {
65                 ql_log(ql_log_warn, vha, 0x212e,
66                     "Failed to register remote port. Transport returned %d\n",
67                     ret);
68                 return ret;
69         }
70
71         if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER)
72                 ql_log(ql_log_info, vha, 0x212a,
73                        "PortID:%06x Supports SLER\n", req.port_id);
74
75         if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL)
76                 ql_log(ql_log_info, vha, 0x212b,
77                        "PortID:%06x Supports PI control\n", req.port_id);
78
79         rport = fcport->nvme_remote_port->private;
80         rport->fcport = fcport;
81
82         fcport->nvme_flag |= NVME_FLAG_REGISTERED;
83         return 0;
84 }
85
86 /* Allocate a queue for NVMe traffic */
87 static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
88     unsigned int qidx, u16 qsize, void **handle)
89 {
90         struct scsi_qla_host *vha;
91         struct qla_hw_data *ha;
92         struct qla_qpair *qpair;
93
94         if (!qidx)
95                 qidx++;
96
97         vha = (struct scsi_qla_host *)lport->private;
98         ha = vha->hw;
99
100         ql_log(ql_log_info, vha, 0x2104,
101             "%s: handle %p, idx =%d, qsize %d\n",
102             __func__, handle, qidx, qsize);
103
104         if (qidx > qla_nvme_fc_transport.max_hw_queues) {
105                 ql_log(ql_log_warn, vha, 0x212f,
106                     "%s: Illegal qidx=%d. Max=%d\n",
107                     __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
108                 return -EINVAL;
109         }
110
111         if (ha->queue_pair_map[qidx]) {
112                 *handle = ha->queue_pair_map[qidx];
113                 ql_log(ql_log_info, vha, 0x2121,
114                     "Returning existing qpair of %p for idx=%x\n",
115                     *handle, qidx);
116                 return 0;
117         }
118
119         qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
120         if (qpair == NULL) {
121                 ql_log(ql_log_warn, vha, 0x2122,
122                     "Failed to allocate qpair\n");
123                 return -EINVAL;
124         }
125         *handle = qpair;
126
127         return 0;
128 }
129
130 static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
131 {
132         struct srb *sp = container_of(kref, struct srb, cmd_kref);
133         struct nvme_private *priv = (struct nvme_private *)sp->priv;
134         struct nvmefc_fcp_req *fd;
135         struct srb_iocb *nvme;
136         unsigned long flags;
137
138         if (!priv)
139                 goto out;
140
141         nvme = &sp->u.iocb_cmd;
142         fd = nvme->u.nvme.desc;
143
144         spin_lock_irqsave(&priv->cmd_lock, flags);
145         priv->sp = NULL;
146         sp->priv = NULL;
147         if (priv->comp_status == QLA_SUCCESS) {
148                 fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
149                 fd->status = NVME_SC_SUCCESS;
150         } else {
151                 fd->rcv_rsplen = 0;
152                 fd->transferred_length = 0;
153                 fd->status = NVME_SC_INTERNAL;
154         }
155         spin_unlock_irqrestore(&priv->cmd_lock, flags);
156
157         fd->done(fd);
158 out:
159         qla2xxx_rel_qpair_sp(sp->qpair, sp);
160 }
161
162 static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
163 {
164         struct srb *sp = container_of(kref, struct srb, cmd_kref);
165         struct nvme_private *priv = (struct nvme_private *)sp->priv;
166         struct nvmefc_ls_req *fd;
167         unsigned long flags;
168
169         if (!priv)
170                 goto out;
171
172         spin_lock_irqsave(&priv->cmd_lock, flags);
173         priv->sp = NULL;
174         sp->priv = NULL;
175         spin_unlock_irqrestore(&priv->cmd_lock, flags);
176
177         fd = priv->fd;
178         fd->done(fd, priv->comp_status);
179 out:
180         qla2x00_rel_sp(sp);
181 }
182
183 static void qla_nvme_ls_complete(struct work_struct *work)
184 {
185         struct nvme_private *priv =
186                 container_of(work, struct nvme_private, ls_work);
187
188         kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
189 }
190
191 static void qla_nvme_sp_ls_done(srb_t *sp, int res)
192 {
193         struct nvme_private *priv = sp->priv;
194
195         if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
196                 return;
197
198         if (res)
199                 res = -EINVAL;
200
201         priv->comp_status = res;
202         INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
203         schedule_work(&priv->ls_work);
204 }
205
206 /* it assumed that QPair lock is held. */
207 static void qla_nvme_sp_done(srb_t *sp, int res)
208 {
209         struct nvme_private *priv = sp->priv;
210
211         priv->comp_status = res;
212         kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
213
214         return;
215 }
216
217 static void qla_nvme_abort_work(struct work_struct *work)
218 {
219         struct nvme_private *priv =
220                 container_of(work, struct nvme_private, abort_work);
221         srb_t *sp = priv->sp;
222         fc_port_t *fcport = sp->fcport;
223         struct qla_hw_data *ha = fcport->vha->hw;
224         int rval;
225
226         ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
227                "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n",
228                __func__, sp, sp->handle, fcport, fcport->deleted);
229
230         if (!ha->flags.fw_started || fcport->deleted)
231                 goto out;
232
233         if (ha->flags.host_shutting_down) {
234                 ql_log(ql_log_info, sp->fcport->vha, 0xffff,
235                     "%s Calling done on sp: %p, type: 0x%x\n",
236                     __func__, sp, sp->type);
237                 sp->done(sp, 0);
238                 goto out;
239         }
240
241         rval = ha->isp_ops->abort_command(sp);
242
243         ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
244             "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
245             __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
246             sp, sp->handle, fcport, rval);
247
248         /*
249          * Returned before decreasing kref so that I/O requests
250          * are waited until ABTS complete. This kref is decreased
251          * at qla24xx_abort_sp_done function.
252          */
253         if (ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(sp))
254                 return;
255 out:
256         /* kref_get was done before work was schedule. */
257         kref_put(&sp->cmd_kref, sp->put_fn);
258 }
259
260 static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
261     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
262 {
263         struct nvme_private *priv = fd->private;
264         unsigned long flags;
265
266         spin_lock_irqsave(&priv->cmd_lock, flags);
267         if (!priv->sp) {
268                 spin_unlock_irqrestore(&priv->cmd_lock, flags);
269                 return;
270         }
271
272         if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
273                 spin_unlock_irqrestore(&priv->cmd_lock, flags);
274                 return;
275         }
276         spin_unlock_irqrestore(&priv->cmd_lock, flags);
277
278         INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
279         schedule_work(&priv->abort_work);
280 }
281
282 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
283     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
284 {
285         struct qla_nvme_rport *qla_rport = rport->private;
286         fc_port_t *fcport = qla_rport->fcport;
287         struct srb_iocb   *nvme;
288         struct nvme_private *priv = fd->private;
289         struct scsi_qla_host *vha;
290         int     rval = QLA_FUNCTION_FAILED;
291         struct qla_hw_data *ha;
292         srb_t           *sp;
293
294         if (!fcport || fcport->deleted)
295                 return rval;
296
297         vha = fcport->vha;
298         ha = vha->hw;
299
300         if (!ha->flags.fw_started)
301                 return rval;
302
303         /* Alloc SRB structure */
304         sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
305         if (!sp)
306                 return rval;
307
308         sp->type = SRB_NVME_LS;
309         sp->name = "nvme_ls";
310         sp->done = qla_nvme_sp_ls_done;
311         sp->put_fn = qla_nvme_release_ls_cmd_kref;
312         sp->priv = priv;
313         priv->sp = sp;
314         kref_init(&sp->cmd_kref);
315         spin_lock_init(&priv->cmd_lock);
316         nvme = &sp->u.iocb_cmd;
317         priv->fd = fd;
318         nvme->u.nvme.desc = fd;
319         nvme->u.nvme.dir = 0;
320         nvme->u.nvme.dl = 0;
321         nvme->u.nvme.cmd_len = fd->rqstlen;
322         nvme->u.nvme.rsp_len = fd->rsplen;
323         nvme->u.nvme.rsp_dma = fd->rspdma;
324         nvme->u.nvme.timeout_sec = fd->timeout;
325         nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
326             fd->rqstlen, DMA_TO_DEVICE);
327         dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
328             fd->rqstlen, DMA_TO_DEVICE);
329
330         rval = qla2x00_start_sp(sp);
331         if (rval != QLA_SUCCESS) {
332                 ql_log(ql_log_warn, vha, 0x700e,
333                     "qla2x00_start_sp failed = %d\n", rval);
334                 wake_up(&sp->nvme_ls_waitq);
335                 sp->priv = NULL;
336                 priv->sp = NULL;
337                 qla2x00_rel_sp(sp);
338                 return rval;
339         }
340
341         return rval;
342 }
343
344 static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
345     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
346     struct nvmefc_fcp_req *fd)
347 {
348         struct nvme_private *priv = fd->private;
349         unsigned long flags;
350
351         spin_lock_irqsave(&priv->cmd_lock, flags);
352         if (!priv->sp) {
353                 spin_unlock_irqrestore(&priv->cmd_lock, flags);
354                 return;
355         }
356         if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
357                 spin_unlock_irqrestore(&priv->cmd_lock, flags);
358                 return;
359         }
360         spin_unlock_irqrestore(&priv->cmd_lock, flags);
361
362         INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
363         schedule_work(&priv->abort_work);
364 }
365
366 static inline int qla2x00_start_nvme_mq(srb_t *sp)
367 {
368         unsigned long   flags;
369         uint32_t        *clr_ptr;
370         uint32_t        handle;
371         struct cmd_nvme *cmd_pkt;
372         uint16_t        cnt, i;
373         uint16_t        req_cnt;
374         uint16_t        tot_dsds;
375         uint16_t        avail_dsds;
376         struct dsd64    *cur_dsd;
377         struct req_que *req = NULL;
378         struct scsi_qla_host *vha = sp->fcport->vha;
379         struct qla_hw_data *ha = vha->hw;
380         struct qla_qpair *qpair = sp->qpair;
381         struct srb_iocb *nvme = &sp->u.iocb_cmd;
382         struct scatterlist *sgl, *sg;
383         struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
384         struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
385         uint32_t        rval = QLA_SUCCESS;
386
387         /* Setup qpair pointers */
388         req = qpair->req;
389         tot_dsds = fd->sg_cnt;
390
391         /* Acquire qpair specific lock */
392         spin_lock_irqsave(&qpair->qp_lock, flags);
393
394         handle = qla2xxx_get_next_handle(req);
395         if (handle == 0) {
396                 rval = -EBUSY;
397                 goto queuing_error;
398         }
399         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
400         if (req->cnt < (req_cnt + 2)) {
401                 if (IS_SHADOW_REG_CAPABLE(ha)) {
402                         cnt = *req->out_ptr;
403                 } else {
404                         cnt = rd_reg_dword_relaxed(req->req_q_out);
405                         if (qla2x00_check_reg16_for_disconnect(vha, cnt))
406                                 goto queuing_error;
407                 }
408
409                 if (req->ring_index < cnt)
410                         req->cnt = cnt - req->ring_index;
411                 else
412                         req->cnt = req->length - (req->ring_index - cnt);
413
414                 if (req->cnt < (req_cnt + 2)){
415                         rval = -EBUSY;
416                         goto queuing_error;
417                 }
418         }
419
420         if (unlikely(!fd->sqid)) {
421                 if (cmd->sqe.common.opcode == nvme_admin_async_event) {
422                         nvme->u.nvme.aen_op = 1;
423                         atomic_inc(&ha->nvme_active_aen_cnt);
424                 }
425         }
426
427         /* Build command packet. */
428         req->current_outstanding_cmd = handle;
429         req->outstanding_cmds[handle] = sp;
430         sp->handle = handle;
431         req->cnt -= req_cnt;
432
433         cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
434         cmd_pkt->handle = make_handle(req->id, handle);
435
436         /* Zero out remaining portion of packet. */
437         clr_ptr = (uint32_t *)cmd_pkt + 2;
438         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
439
440         cmd_pkt->entry_status = 0;
441
442         /* Update entry type to indicate Command NVME IOCB */
443         cmd_pkt->entry_type = COMMAND_NVME;
444
445         /* No data transfer how do we check buffer len == 0?? */
446         if (fd->io_dir == NVMEFC_FCP_READ) {
447                 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
448                 qpair->counters.input_bytes += fd->payload_length;
449                 qpair->counters.input_requests++;
450         } else if (fd->io_dir == NVMEFC_FCP_WRITE) {
451                 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
452                 if ((vha->flags.nvme_first_burst) &&
453                     (sp->fcport->nvme_prli_service_param &
454                         NVME_PRLI_SP_FIRST_BURST)) {
455                         if ((fd->payload_length <=
456                             sp->fcport->nvme_first_burst_size) ||
457                                 (sp->fcport->nvme_first_burst_size == 0))
458                                 cmd_pkt->control_flags |=
459                                         cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE);
460                 }
461                 qpair->counters.output_bytes += fd->payload_length;
462                 qpair->counters.output_requests++;
463         } else if (fd->io_dir == 0) {
464                 cmd_pkt->control_flags = 0;
465         }
466         /* Set BIT_13 of control flags for Async event */
467         if (vha->flags.nvme2_enabled &&
468             cmd->sqe.common.opcode == nvme_admin_async_event) {
469                 cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT);
470         }
471
472         /* Set NPORT-ID */
473         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
474         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
475         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
476         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
477         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
478
479         /* NVME RSP IU */
480         cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
481         put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address);
482
483         /* NVME CNMD IU */
484         cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
485         cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma);
486
487         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
488         cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
489
490         /* One DSD is available in the Command Type NVME IOCB */
491         avail_dsds = 1;
492         cur_dsd = &cmd_pkt->nvme_dsd;
493         sgl = fd->first_sgl;
494
495         /* Load data segments */
496         for_each_sg(sgl, sg, tot_dsds, i) {
497                 cont_a64_entry_t *cont_pkt;
498
499                 /* Allocate additional continuation packets? */
500                 if (avail_dsds == 0) {
501                         /*
502                          * Five DSDs are available in the Continuation
503                          * Type 1 IOCB.
504                          */
505
506                         /* Adjust ring index */
507                         req->ring_index++;
508                         if (req->ring_index == req->length) {
509                                 req->ring_index = 0;
510                                 req->ring_ptr = req->ring;
511                         } else {
512                                 req->ring_ptr++;
513                         }
514                         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
515                         put_unaligned_le32(CONTINUE_A64_TYPE,
516                                            &cont_pkt->entry_type);
517
518                         cur_dsd = cont_pkt->dsd;
519                         avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
520                 }
521
522                 append_dsd64(&cur_dsd, sg);
523                 avail_dsds--;
524         }
525
526         /* Set total entry count. */
527         cmd_pkt->entry_count = (uint8_t)req_cnt;
528         wmb();
529
530         /* Adjust ring index. */
531         req->ring_index++;
532         if (req->ring_index == req->length) {
533                 req->ring_index = 0;
534                 req->ring_ptr = req->ring;
535         } else {
536                 req->ring_ptr++;
537         }
538
539         /* Set chip new ring index. */
540         wrt_reg_dword(req->req_q_in, req->ring_index);
541
542 queuing_error:
543         spin_unlock_irqrestore(&qpair->qp_lock, flags);
544
545         return rval;
546 }
547
548 /* Post a command */
549 static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
550     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
551     struct nvmefc_fcp_req *fd)
552 {
553         fc_port_t *fcport;
554         struct srb_iocb *nvme;
555         struct scsi_qla_host *vha;
556         int rval;
557         srb_t *sp;
558         struct qla_qpair *qpair = hw_queue_handle;
559         struct nvme_private *priv = fd->private;
560         struct qla_nvme_rport *qla_rport = rport->private;
561
562         if (!priv) {
563                 /* nvme association has been torn down */
564                 return -ENODEV;
565         }
566
567         fcport = qla_rport->fcport;
568
569         if (unlikely(!qpair || !fcport || fcport->deleted))
570                 return -EBUSY;
571
572         if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
573                 return -ENODEV;
574
575         vha = fcport->vha;
576
577         if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
578                 return -EBUSY;
579
580         /*
581          * If we know the dev is going away while the transport is still sending
582          * IO's return busy back to stall the IO Q.  This happens when the
583          * link goes away and fw hasn't notified us yet, but IO's are being
584          * returned. If the dev comes back quickly we won't exhaust the IO
585          * retry count at the core.
586          */
587         if (fcport->nvme_flag & NVME_FLAG_RESETTING)
588                 return -EBUSY;
589
590         /* Alloc SRB structure */
591         sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
592         if (!sp)
593                 return -EBUSY;
594
595         init_waitqueue_head(&sp->nvme_ls_waitq);
596         kref_init(&sp->cmd_kref);
597         spin_lock_init(&priv->cmd_lock);
598         sp->priv = priv;
599         priv->sp = sp;
600         sp->type = SRB_NVME_CMD;
601         sp->name = "nvme_cmd";
602         sp->done = qla_nvme_sp_done;
603         sp->put_fn = qla_nvme_release_fcp_cmd_kref;
604         sp->qpair = qpair;
605         sp->vha = vha;
606         sp->cmd_sp = sp;
607         nvme = &sp->u.iocb_cmd;
608         nvme->u.nvme.desc = fd;
609
610         rval = qla2x00_start_nvme_mq(sp);
611         if (rval != QLA_SUCCESS) {
612                 ql_log(ql_log_warn, vha, 0x212d,
613                     "qla2x00_start_nvme_mq failed = %d\n", rval);
614                 wake_up(&sp->nvme_ls_waitq);
615                 sp->priv = NULL;
616                 priv->sp = NULL;
617                 qla2xxx_rel_qpair_sp(sp->qpair, sp);
618         }
619
620         return rval;
621 }
622
623 static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
624 {
625         struct scsi_qla_host *vha = lport->private;
626
627         ql_log(ql_log_info, vha, 0x210f,
628             "localport delete of %p completed.\n", vha->nvme_local_port);
629         vha->nvme_local_port = NULL;
630         complete(&vha->nvme_del_done);
631 }
632
633 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
634 {
635         fc_port_t *fcport;
636         struct qla_nvme_rport *qla_rport = rport->private;
637
638         fcport = qla_rport->fcport;
639         fcport->nvme_remote_port = NULL;
640         fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
641         fcport->nvme_flag &= ~NVME_FLAG_DELETING;
642         ql_log(ql_log_info, fcport->vha, 0x2110,
643             "remoteport_delete of %p %8phN completed.\n",
644             fcport, fcport->port_name);
645         complete(&fcport->nvme_del_done);
646 }
647
648 static struct nvme_fc_port_template qla_nvme_fc_transport = {
649         .localport_delete = qla_nvme_localport_delete,
650         .remoteport_delete = qla_nvme_remoteport_delete,
651         .create_queue   = qla_nvme_alloc_queue,
652         .delete_queue   = NULL,
653         .ls_req         = qla_nvme_ls_req,
654         .ls_abort       = qla_nvme_ls_abort,
655         .fcp_io         = qla_nvme_post_cmd,
656         .fcp_abort      = qla_nvme_fcp_abort,
657         .max_hw_queues  = 8,
658         .max_sgl_segments = 1024,
659         .max_dif_sgl_segments = 64,
660         .dma_boundary = 0xFFFFFFFF,
661         .local_priv_sz  = 8,
662         .remote_priv_sz = sizeof(struct qla_nvme_rport),
663         .lsrqst_priv_sz = sizeof(struct nvme_private),
664         .fcprqst_priv_sz = sizeof(struct nvme_private),
665 };
666
667 void qla_nvme_unregister_remote_port(struct fc_port *fcport)
668 {
669         int ret;
670
671         if (!IS_ENABLED(CONFIG_NVME_FC))
672                 return;
673
674         ql_log(ql_log_warn, NULL, 0x2112,
675             "%s: unregister remoteport on %p %8phN\n",
676             __func__, fcport, fcport->port_name);
677
678         if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags))
679                 nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
680
681         init_completion(&fcport->nvme_del_done);
682         ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
683         if (ret)
684                 ql_log(ql_log_info, fcport->vha, 0x2114,
685                         "%s: Failed to unregister nvme_remote_port (%d)\n",
686                             __func__, ret);
687         wait_for_completion(&fcport->nvme_del_done);
688 }
689
690 void qla_nvme_delete(struct scsi_qla_host *vha)
691 {
692         int nv_ret;
693
694         if (!IS_ENABLED(CONFIG_NVME_FC))
695                 return;
696
697         if (vha->nvme_local_port) {
698                 init_completion(&vha->nvme_del_done);
699                 ql_log(ql_log_info, vha, 0x2116,
700                         "unregister localport=%p\n",
701                         vha->nvme_local_port);
702                 nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
703                 if (nv_ret)
704                         ql_log(ql_log_info, vha, 0x2115,
705                             "Unregister of localport failed\n");
706                 else
707                         wait_for_completion(&vha->nvme_del_done);
708         }
709 }
710
711 int qla_nvme_register_hba(struct scsi_qla_host *vha)
712 {
713         struct nvme_fc_port_template *tmpl;
714         struct qla_hw_data *ha;
715         struct nvme_fc_port_info pinfo;
716         int ret = -EINVAL;
717
718         if (!IS_ENABLED(CONFIG_NVME_FC))
719                 return ret;
720
721         ha = vha->hw;
722         tmpl = &qla_nvme_fc_transport;
723
724         WARN_ON(vha->nvme_local_port);
725
726         if (ha->max_req_queues < 3) {
727                 if (!ha->flags.max_req_queue_warned)
728                         ql_log(ql_log_info, vha, 0x2120,
729                                "%s: Disabling FC-NVME due to lack of free queue pairs (%d).\n",
730                                __func__, ha->max_req_queues);
731                 ha->flags.max_req_queue_warned = 1;
732                 return ret;
733         }
734
735         qla_nvme_fc_transport.max_hw_queues =
736             min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
737                 (uint8_t)(ha->max_req_queues - 2));
738
739         pinfo.node_name = wwn_to_u64(vha->node_name);
740         pinfo.port_name = wwn_to_u64(vha->port_name);
741         pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
742         pinfo.port_id = vha->d_id.b24;
743
744         ql_log(ql_log_info, vha, 0xffff,
745             "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
746             pinfo.node_name, pinfo.port_name, pinfo.port_id);
747         qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
748
749         ret = nvme_fc_register_localport(&pinfo, tmpl,
750             get_device(&ha->pdev->dev), &vha->nvme_local_port);
751         if (ret) {
752                 ql_log(ql_log_warn, vha, 0xffff,
753                     "register_localport failed: ret=%x\n", ret);
754         } else {
755                 vha->nvme_local_port->private = vha;
756         }
757
758         return ret;
759 }
760
761 void qla_nvme_abort_set_option(struct abort_entry_24xx *abt, srb_t *orig_sp)
762 {
763         struct qla_hw_data *ha;
764
765         if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
766                 return;
767
768         ha = orig_sp->fcport->vha->hw;
769
770         WARN_ON_ONCE(abt->options & cpu_to_le16(BIT_0));
771         /* Use Driver Specified Retry Count */
772         abt->options |= cpu_to_le16(AOF_ABTS_RTY_CNT);
773         abt->drv.abts_rty_cnt = cpu_to_le16(2);
774         /* Use specified response timeout */
775         abt->options |= cpu_to_le16(AOF_RSP_TIMEOUT);
776         /* set it to 2 * r_a_tov in secs */
777         abt->drv.rsp_timeout = cpu_to_le16(2 * (ha->r_a_tov / 10));
778 }
779
780 void qla_nvme_abort_process_comp_status(struct abort_entry_24xx *abt, srb_t *orig_sp)
781 {
782         u16     comp_status;
783         struct scsi_qla_host *vha;
784
785         if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
786                 return;
787
788         vha = orig_sp->fcport->vha;
789
790         comp_status = le16_to_cpu(abt->comp_status);
791         switch (comp_status) {
792         case CS_RESET:          /* reset event aborted */
793         case CS_ABORTED:        /* IOCB was cleaned */
794         /* N_Port handle is not currently logged in */
795         case CS_TIMEOUT:
796         /* N_Port handle was logged out while waiting for ABTS to complete */
797         case CS_PORT_UNAVAILABLE:
798         /* Firmware found that the port name changed */
799         case CS_PORT_LOGGED_OUT:
800         /* BA_RJT was received for the ABTS */
801         case CS_PORT_CONFIG_CHG:
802                 ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09d,
803                        "Abort I/O IOCB completed with error, comp_status=%x\n",
804                 comp_status);
805                 break;
806
807         /* BA_RJT was received for the ABTS */
808         case CS_REJECT_RECEIVED:
809                 ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e,
810                        "BA_RJT was received for the ABTS rjt_vendorUnique = %u",
811                         abt->fw.ba_rjt_vendorUnique);
812                 ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e,
813                        "ba_rjt_reasonCodeExpl = %u, ba_rjt_reasonCode = %u\n",
814                        abt->fw.ba_rjt_reasonCodeExpl, abt->fw.ba_rjt_reasonCode);
815                 break;
816
817         case CS_COMPLETE:
818                 ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09f,
819                        "IOCB request is completed successfully comp_status=%x\n",
820                 comp_status);
821                 break;
822
823         case CS_IOCB_ERROR:
824                 ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf0a0,
825                        "IOCB request is failed, comp_status=%x\n", comp_status);
826                 break;
827
828         default:
829                 ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf0a1,
830                        "Invalid Abort IO IOCB Completion Status %x\n",
831                 comp_status);
832                 break;
833         }
834 }
835
836 inline void qla_wait_nvme_release_cmd_kref(srb_t *orig_sp)
837 {
838         if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
839                 return;
840         kref_put(&orig_sp->cmd_kref, orig_sp->put_fn);
841 }