Merge branch 'for-5.16' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[linux-2.6-microblaze.git] / drivers / scsi / qla2xxx / qla_nvme.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * QLogic Fibre Channel HBA Driver
4  * Copyright (c)  2003-2017 QLogic Corporation
5  */
6 #include "qla_nvme.h"
7 #include <linux/scatterlist.h>
8 #include <linux/delay.h>
9 #include <linux/nvme.h>
10 #include <linux/nvme-fc.h>
11 #include <linux/blk-mq-pci.h>
12 #include <linux/blk-mq.h>
13
14 static struct nvme_fc_port_template qla_nvme_fc_transport;
15
16 int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
17 {
18         struct qla_nvme_rport *rport;
19         struct nvme_fc_port_info req;
20         int ret;
21
22         if (!IS_ENABLED(CONFIG_NVME_FC))
23                 return 0;
24
25         if (!vha->flags.nvme_enabled) {
26                 ql_log(ql_log_info, vha, 0x2100,
27                     "%s: Not registering target since Host NVME is not enabled\n",
28                     __func__);
29                 return 0;
30         }
31
32         if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
33                 return 0;
34
35         if (!(fcport->nvme_prli_service_param &
36             (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
37                 (fcport->nvme_flag & NVME_FLAG_REGISTERED))
38                 return 0;
39
40         fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
41
42         memset(&req, 0, sizeof(struct nvme_fc_port_info));
43         req.port_name = wwn_to_u64(fcport->port_name);
44         req.node_name = wwn_to_u64(fcport->node_name);
45         req.port_role = 0;
46         req.dev_loss_tmo = 0;
47
48         if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
49                 req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
50
51         if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
52                 req.port_role |= FC_PORT_ROLE_NVME_TARGET;
53
54         if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
55                 req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
56
57         req.port_id = fcport->d_id.b24;
58
59         ql_log(ql_log_info, vha, 0x2102,
60             "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
61             __func__, req.node_name, req.port_name,
62             req.port_id);
63
64         ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
65             &fcport->nvme_remote_port);
66         if (ret) {
67                 ql_log(ql_log_warn, vha, 0x212e,
68                     "Failed to register remote port. Transport returned %d\n",
69                     ret);
70                 return ret;
71         }
72
73         if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER)
74                 ql_log(ql_log_info, vha, 0x212a,
75                        "PortID:%06x Supports SLER\n", req.port_id);
76
77         if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL)
78                 ql_log(ql_log_info, vha, 0x212b,
79                        "PortID:%06x Supports PI control\n", req.port_id);
80
81         rport = fcport->nvme_remote_port->private;
82         rport->fcport = fcport;
83
84         fcport->nvme_flag |= NVME_FLAG_REGISTERED;
85         return 0;
86 }
87
88 /* Allocate a queue for NVMe traffic */
89 static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
90     unsigned int qidx, u16 qsize, void **handle)
91 {
92         struct scsi_qla_host *vha;
93         struct qla_hw_data *ha;
94         struct qla_qpair *qpair;
95
96         /* Map admin queue and 1st IO queue to index 0 */
97         if (qidx)
98                 qidx--;
99
100         vha = (struct scsi_qla_host *)lport->private;
101         ha = vha->hw;
102
103         ql_log(ql_log_info, vha, 0x2104,
104             "%s: handle %p, idx =%d, qsize %d\n",
105             __func__, handle, qidx, qsize);
106
107         if (qidx > qla_nvme_fc_transport.max_hw_queues) {
108                 ql_log(ql_log_warn, vha, 0x212f,
109                     "%s: Illegal qidx=%d. Max=%d\n",
110                     __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
111                 return -EINVAL;
112         }
113
114         /* Use base qpair if max_qpairs is 0 */
115         if (!ha->max_qpairs) {
116                 qpair = ha->base_qpair;
117         } else {
118                 if (ha->queue_pair_map[qidx]) {
119                         *handle = ha->queue_pair_map[qidx];
120                         ql_log(ql_log_info, vha, 0x2121,
121                                "Returning existing qpair of %p for idx=%x\n",
122                                *handle, qidx);
123                         return 0;
124                 }
125
126                 qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
127                 if (!qpair) {
128                         ql_log(ql_log_warn, vha, 0x2122,
129                                "Failed to allocate qpair\n");
130                         return -EINVAL;
131                 }
132         }
133         *handle = qpair;
134
135         return 0;
136 }
137
138 static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
139 {
140         struct srb *sp = container_of(kref, struct srb, cmd_kref);
141         struct nvme_private *priv = (struct nvme_private *)sp->priv;
142         struct nvmefc_fcp_req *fd;
143         struct srb_iocb *nvme;
144         unsigned long flags;
145
146         if (!priv)
147                 goto out;
148
149         nvme = &sp->u.iocb_cmd;
150         fd = nvme->u.nvme.desc;
151
152         spin_lock_irqsave(&priv->cmd_lock, flags);
153         priv->sp = NULL;
154         sp->priv = NULL;
155         if (priv->comp_status == QLA_SUCCESS) {
156                 fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
157                 fd->status = NVME_SC_SUCCESS;
158         } else {
159                 fd->rcv_rsplen = 0;
160                 fd->transferred_length = 0;
161                 fd->status = NVME_SC_INTERNAL;
162         }
163         spin_unlock_irqrestore(&priv->cmd_lock, flags);
164
165         fd->done(fd);
166 out:
167         qla2xxx_rel_qpair_sp(sp->qpair, sp);
168 }
169
170 static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
171 {
172         struct srb *sp = container_of(kref, struct srb, cmd_kref);
173         struct nvme_private *priv = (struct nvme_private *)sp->priv;
174         struct nvmefc_ls_req *fd;
175         unsigned long flags;
176
177         if (!priv)
178                 goto out;
179
180         spin_lock_irqsave(&priv->cmd_lock, flags);
181         priv->sp = NULL;
182         sp->priv = NULL;
183         spin_unlock_irqrestore(&priv->cmd_lock, flags);
184
185         fd = priv->fd;
186         fd->done(fd, priv->comp_status);
187 out:
188         qla2x00_rel_sp(sp);
189 }
190
191 static void qla_nvme_ls_complete(struct work_struct *work)
192 {
193         struct nvme_private *priv =
194                 container_of(work, struct nvme_private, ls_work);
195
196         kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
197 }
198
199 static void qla_nvme_sp_ls_done(srb_t *sp, int res)
200 {
201         struct nvme_private *priv = sp->priv;
202
203         if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
204                 return;
205
206         if (res)
207                 res = -EINVAL;
208
209         priv->comp_status = res;
210         INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
211         schedule_work(&priv->ls_work);
212 }
213
214 /* it assumed that QPair lock is held. */
215 static void qla_nvme_sp_done(srb_t *sp, int res)
216 {
217         struct nvme_private *priv = sp->priv;
218
219         priv->comp_status = res;
220         kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
221
222         return;
223 }
224
225 static void qla_nvme_abort_work(struct work_struct *work)
226 {
227         struct nvme_private *priv =
228                 container_of(work, struct nvme_private, abort_work);
229         srb_t *sp = priv->sp;
230         fc_port_t *fcport = sp->fcport;
231         struct qla_hw_data *ha = fcport->vha->hw;
232         int rval, abts_done_called = 1;
233         bool io_wait_for_abort_done;
234         uint32_t handle;
235
236         ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
237                "%s called for sp=%p, hndl=%x on fcport=%p desc=%p deleted=%d\n",
238                __func__, sp, sp->handle, fcport, sp->u.iocb_cmd.u.nvme.desc, fcport->deleted);
239
240         if (!ha->flags.fw_started || fcport->deleted == QLA_SESS_DELETED)
241                 goto out;
242
243         if (ha->flags.host_shutting_down) {
244                 ql_log(ql_log_info, sp->fcport->vha, 0xffff,
245                     "%s Calling done on sp: %p, type: 0x%x\n",
246                     __func__, sp, sp->type);
247                 sp->done(sp, 0);
248                 goto out;
249         }
250
251         /*
252          * sp may not be valid after abort_command if return code is either
253          * SUCCESS or ERR_FROM_FW codes, so cache the value here.
254          */
255         io_wait_for_abort_done = ql2xabts_wait_nvme &&
256                                         QLA_ABTS_WAIT_ENABLED(sp);
257         handle = sp->handle;
258
259         rval = ha->isp_ops->abort_command(sp);
260
261         ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
262             "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
263             __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
264             sp, handle, fcport, rval);
265
266         /*
267          * If async tmf is enabled, the abort callback is called only on
268          * return codes QLA_SUCCESS and QLA_ERR_FROM_FW.
269          */
270         if (ql2xasynctmfenable &&
271             rval != QLA_SUCCESS && rval != QLA_ERR_FROM_FW)
272                 abts_done_called = 0;
273
274         /*
275          * Returned before decreasing kref so that I/O requests
276          * are waited until ABTS complete. This kref is decreased
277          * at qla24xx_abort_sp_done function.
278          */
279         if (abts_done_called && io_wait_for_abort_done)
280                 return;
281 out:
282         /* kref_get was done before work was schedule. */
283         kref_put(&sp->cmd_kref, sp->put_fn);
284 }
285
286 static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
287     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
288 {
289         struct nvme_private *priv = fd->private;
290         unsigned long flags;
291
292         spin_lock_irqsave(&priv->cmd_lock, flags);
293         if (!priv->sp) {
294                 spin_unlock_irqrestore(&priv->cmd_lock, flags);
295                 return;
296         }
297
298         if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
299                 spin_unlock_irqrestore(&priv->cmd_lock, flags);
300                 return;
301         }
302         spin_unlock_irqrestore(&priv->cmd_lock, flags);
303
304         INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
305         schedule_work(&priv->abort_work);
306 }
307
308 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
309     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
310 {
311         struct qla_nvme_rport *qla_rport = rport->private;
312         fc_port_t *fcport = qla_rport->fcport;
313         struct srb_iocb   *nvme;
314         struct nvme_private *priv = fd->private;
315         struct scsi_qla_host *vha;
316         int     rval = QLA_FUNCTION_FAILED;
317         struct qla_hw_data *ha;
318         srb_t           *sp;
319
320         if (!fcport || fcport->deleted)
321                 return rval;
322
323         vha = fcport->vha;
324         ha = vha->hw;
325
326         if (!ha->flags.fw_started)
327                 return rval;
328
329         /* Alloc SRB structure */
330         sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
331         if (!sp)
332                 return rval;
333
334         sp->type = SRB_NVME_LS;
335         sp->name = "nvme_ls";
336         sp->done = qla_nvme_sp_ls_done;
337         sp->put_fn = qla_nvme_release_ls_cmd_kref;
338         sp->priv = priv;
339         priv->sp = sp;
340         kref_init(&sp->cmd_kref);
341         spin_lock_init(&priv->cmd_lock);
342         nvme = &sp->u.iocb_cmd;
343         priv->fd = fd;
344         nvme->u.nvme.desc = fd;
345         nvme->u.nvme.dir = 0;
346         nvme->u.nvme.dl = 0;
347         nvme->u.nvme.cmd_len = fd->rqstlen;
348         nvme->u.nvme.rsp_len = fd->rsplen;
349         nvme->u.nvme.rsp_dma = fd->rspdma;
350         nvme->u.nvme.timeout_sec = fd->timeout;
351         nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
352             fd->rqstlen, DMA_TO_DEVICE);
353         dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
354             fd->rqstlen, DMA_TO_DEVICE);
355
356         rval = qla2x00_start_sp(sp);
357         if (rval != QLA_SUCCESS) {
358                 ql_log(ql_log_warn, vha, 0x700e,
359                     "qla2x00_start_sp failed = %d\n", rval);
360                 wake_up(&sp->nvme_ls_waitq);
361                 sp->priv = NULL;
362                 priv->sp = NULL;
363                 qla2x00_rel_sp(sp);
364                 return rval;
365         }
366
367         return rval;
368 }
369
370 static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
371     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
372     struct nvmefc_fcp_req *fd)
373 {
374         struct nvme_private *priv = fd->private;
375         unsigned long flags;
376
377         spin_lock_irqsave(&priv->cmd_lock, flags);
378         if (!priv->sp) {
379                 spin_unlock_irqrestore(&priv->cmd_lock, flags);
380                 return;
381         }
382         if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
383                 spin_unlock_irqrestore(&priv->cmd_lock, flags);
384                 return;
385         }
386         spin_unlock_irqrestore(&priv->cmd_lock, flags);
387
388         INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
389         schedule_work(&priv->abort_work);
390 }
391
392 static inline int qla2x00_start_nvme_mq(srb_t *sp)
393 {
394         unsigned long   flags;
395         uint32_t        *clr_ptr;
396         uint32_t        handle;
397         struct cmd_nvme *cmd_pkt;
398         uint16_t        cnt, i;
399         uint16_t        req_cnt;
400         uint16_t        tot_dsds;
401         uint16_t        avail_dsds;
402         struct dsd64    *cur_dsd;
403         struct req_que *req = NULL;
404         struct rsp_que *rsp = NULL;
405         struct scsi_qla_host *vha = sp->fcport->vha;
406         struct qla_hw_data *ha = vha->hw;
407         struct qla_qpair *qpair = sp->qpair;
408         struct srb_iocb *nvme = &sp->u.iocb_cmd;
409         struct scatterlist *sgl, *sg;
410         struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
411         struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
412         uint32_t        rval = QLA_SUCCESS;
413
414         /* Setup qpair pointers */
415         req = qpair->req;
416         rsp = qpair->rsp;
417         tot_dsds = fd->sg_cnt;
418
419         /* Acquire qpair specific lock */
420         spin_lock_irqsave(&qpair->qp_lock, flags);
421
422         handle = qla2xxx_get_next_handle(req);
423         if (handle == 0) {
424                 rval = -EBUSY;
425                 goto queuing_error;
426         }
427         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
428         if (req->cnt < (req_cnt + 2)) {
429                 if (IS_SHADOW_REG_CAPABLE(ha)) {
430                         cnt = *req->out_ptr;
431                 } else {
432                         cnt = rd_reg_dword_relaxed(req->req_q_out);
433                         if (qla2x00_check_reg16_for_disconnect(vha, cnt))
434                                 goto queuing_error;
435                 }
436
437                 if (req->ring_index < cnt)
438                         req->cnt = cnt - req->ring_index;
439                 else
440                         req->cnt = req->length - (req->ring_index - cnt);
441
442                 if (req->cnt < (req_cnt + 2)){
443                         rval = -EBUSY;
444                         goto queuing_error;
445                 }
446         }
447
448         if (unlikely(!fd->sqid)) {
449                 if (cmd->sqe.common.opcode == nvme_admin_async_event) {
450                         nvme->u.nvme.aen_op = 1;
451                         atomic_inc(&ha->nvme_active_aen_cnt);
452                 }
453         }
454
455         /* Build command packet. */
456         req->current_outstanding_cmd = handle;
457         req->outstanding_cmds[handle] = sp;
458         sp->handle = handle;
459         req->cnt -= req_cnt;
460
461         cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
462         cmd_pkt->handle = make_handle(req->id, handle);
463
464         /* Zero out remaining portion of packet. */
465         clr_ptr = (uint32_t *)cmd_pkt + 2;
466         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
467
468         cmd_pkt->entry_status = 0;
469
470         /* Update entry type to indicate Command NVME IOCB */
471         cmd_pkt->entry_type = COMMAND_NVME;
472
473         /* No data transfer how do we check buffer len == 0?? */
474         if (fd->io_dir == NVMEFC_FCP_READ) {
475                 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
476                 qpair->counters.input_bytes += fd->payload_length;
477                 qpair->counters.input_requests++;
478         } else if (fd->io_dir == NVMEFC_FCP_WRITE) {
479                 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
480                 if ((vha->flags.nvme_first_burst) &&
481                     (sp->fcport->nvme_prli_service_param &
482                         NVME_PRLI_SP_FIRST_BURST)) {
483                         if ((fd->payload_length <=
484                             sp->fcport->nvme_first_burst_size) ||
485                                 (sp->fcport->nvme_first_burst_size == 0))
486                                 cmd_pkt->control_flags |=
487                                         cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE);
488                 }
489                 qpair->counters.output_bytes += fd->payload_length;
490                 qpair->counters.output_requests++;
491         } else if (fd->io_dir == 0) {
492                 cmd_pkt->control_flags = 0;
493         }
494
495         if (sp->fcport->edif.enable && fd->io_dir != 0)
496                 cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF);
497
498         /* Set BIT_13 of control flags for Async event */
499         if (vha->flags.nvme2_enabled &&
500             cmd->sqe.common.opcode == nvme_admin_async_event) {
501                 cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT);
502         }
503
504         /* Set NPORT-ID */
505         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
506         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
507         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
508         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
509         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
510
511         /* NVME RSP IU */
512         cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
513         put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address);
514
515         /* NVME CNMD IU */
516         cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
517         cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma);
518
519         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
520         cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
521
522         /* One DSD is available in the Command Type NVME IOCB */
523         avail_dsds = 1;
524         cur_dsd = &cmd_pkt->nvme_dsd;
525         sgl = fd->first_sgl;
526
527         /* Load data segments */
528         for_each_sg(sgl, sg, tot_dsds, i) {
529                 cont_a64_entry_t *cont_pkt;
530
531                 /* Allocate additional continuation packets? */
532                 if (avail_dsds == 0) {
533                         /*
534                          * Five DSDs are available in the Continuation
535                          * Type 1 IOCB.
536                          */
537
538                         /* Adjust ring index */
539                         req->ring_index++;
540                         if (req->ring_index == req->length) {
541                                 req->ring_index = 0;
542                                 req->ring_ptr = req->ring;
543                         } else {
544                                 req->ring_ptr++;
545                         }
546                         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
547                         put_unaligned_le32(CONTINUE_A64_TYPE,
548                                            &cont_pkt->entry_type);
549
550                         cur_dsd = cont_pkt->dsd;
551                         avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
552                 }
553
554                 append_dsd64(&cur_dsd, sg);
555                 avail_dsds--;
556         }
557
558         /* Set total entry count. */
559         cmd_pkt->entry_count = (uint8_t)req_cnt;
560         wmb();
561
562         /* Adjust ring index. */
563         req->ring_index++;
564         if (req->ring_index == req->length) {
565                 req->ring_index = 0;
566                 req->ring_ptr = req->ring;
567         } else {
568                 req->ring_ptr++;
569         }
570
571         /* ignore nvme async cmd due to long timeout */
572         if (!nvme->u.nvme.aen_op)
573                 sp->qpair->cmd_cnt++;
574
575         /* Set chip new ring index. */
576         wrt_reg_dword(req->req_q_in, req->ring_index);
577
578         if (vha->flags.process_response_queue &&
579             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
580                 qla24xx_process_response_queue(vha, rsp);
581
582 queuing_error:
583         spin_unlock_irqrestore(&qpair->qp_lock, flags);
584
585         return rval;
586 }
587
588 /* Post a command */
589 static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
590     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
591     struct nvmefc_fcp_req *fd)
592 {
593         fc_port_t *fcport;
594         struct srb_iocb *nvme;
595         struct scsi_qla_host *vha;
596         int rval;
597         srb_t *sp;
598         struct qla_qpair *qpair = hw_queue_handle;
599         struct nvme_private *priv = fd->private;
600         struct qla_nvme_rport *qla_rport = rport->private;
601
602         if (!priv) {
603                 /* nvme association has been torn down */
604                 return -ENODEV;
605         }
606
607         fcport = qla_rport->fcport;
608
609         if (unlikely(!qpair || !fcport || fcport->deleted))
610                 return -EBUSY;
611
612         if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
613                 return -ENODEV;
614
615         vha = fcport->vha;
616
617         if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
618                 return -EBUSY;
619
620         /*
621          * If we know the dev is going away while the transport is still sending
622          * IO's return busy back to stall the IO Q.  This happens when the
623          * link goes away and fw hasn't notified us yet, but IO's are being
624          * returned. If the dev comes back quickly we won't exhaust the IO
625          * retry count at the core.
626          */
627         if (fcport->nvme_flag & NVME_FLAG_RESETTING)
628                 return -EBUSY;
629
630         /* Alloc SRB structure */
631         sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
632         if (!sp)
633                 return -EBUSY;
634
635         init_waitqueue_head(&sp->nvme_ls_waitq);
636         kref_init(&sp->cmd_kref);
637         spin_lock_init(&priv->cmd_lock);
638         sp->priv = priv;
639         priv->sp = sp;
640         sp->type = SRB_NVME_CMD;
641         sp->name = "nvme_cmd";
642         sp->done = qla_nvme_sp_done;
643         sp->put_fn = qla_nvme_release_fcp_cmd_kref;
644         sp->qpair = qpair;
645         sp->vha = vha;
646         sp->cmd_sp = sp;
647         nvme = &sp->u.iocb_cmd;
648         nvme->u.nvme.desc = fd;
649
650         rval = qla2x00_start_nvme_mq(sp);
651         if (rval != QLA_SUCCESS) {
652                 ql_log(ql_log_warn, vha, 0x212d,
653                     "qla2x00_start_nvme_mq failed = %d\n", rval);
654                 wake_up(&sp->nvme_ls_waitq);
655                 sp->priv = NULL;
656                 priv->sp = NULL;
657                 qla2xxx_rel_qpair_sp(sp->qpair, sp);
658         }
659
660         return rval;
661 }
662
663 static void qla_nvme_map_queues(struct nvme_fc_local_port *lport,
664                 struct blk_mq_queue_map *map)
665 {
666         struct scsi_qla_host *vha = lport->private;
667         int rc;
668
669         rc = blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
670         if (rc)
671                 ql_log(ql_log_warn, vha, 0x21de,
672                        "pci map queue failed 0x%x", rc);
673 }
674
675 static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
676 {
677         struct scsi_qla_host *vha = lport->private;
678
679         ql_log(ql_log_info, vha, 0x210f,
680             "localport delete of %p completed.\n", vha->nvme_local_port);
681         vha->nvme_local_port = NULL;
682         complete(&vha->nvme_del_done);
683 }
684
685 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
686 {
687         fc_port_t *fcport;
688         struct qla_nvme_rport *qla_rport = rport->private;
689
690         fcport = qla_rport->fcport;
691         fcport->nvme_remote_port = NULL;
692         fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
693         fcport->nvme_flag &= ~NVME_FLAG_DELETING;
694         ql_log(ql_log_info, fcport->vha, 0x2110,
695             "remoteport_delete of %p %8phN completed.\n",
696             fcport, fcport->port_name);
697         complete(&fcport->nvme_del_done);
698 }
699
700 static struct nvme_fc_port_template qla_nvme_fc_transport = {
701         .localport_delete = qla_nvme_localport_delete,
702         .remoteport_delete = qla_nvme_remoteport_delete,
703         .create_queue   = qla_nvme_alloc_queue,
704         .delete_queue   = NULL,
705         .ls_req         = qla_nvme_ls_req,
706         .ls_abort       = qla_nvme_ls_abort,
707         .fcp_io         = qla_nvme_post_cmd,
708         .fcp_abort      = qla_nvme_fcp_abort,
709         .map_queues     = qla_nvme_map_queues,
710         .max_hw_queues  = 8,
711         .max_sgl_segments = 1024,
712         .max_dif_sgl_segments = 64,
713         .dma_boundary = 0xFFFFFFFF,
714         .local_priv_sz  = 8,
715         .remote_priv_sz = sizeof(struct qla_nvme_rport),
716         .lsrqst_priv_sz = sizeof(struct nvme_private),
717         .fcprqst_priv_sz = sizeof(struct nvme_private),
718 };
719
720 void qla_nvme_unregister_remote_port(struct fc_port *fcport)
721 {
722         int ret;
723
724         if (!IS_ENABLED(CONFIG_NVME_FC))
725                 return;
726
727         ql_log(ql_log_warn, fcport->vha, 0x2112,
728             "%s: unregister remoteport on %p %8phN\n",
729             __func__, fcport, fcport->port_name);
730
731         if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags))
732                 nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
733
734         init_completion(&fcport->nvme_del_done);
735         ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
736         if (ret)
737                 ql_log(ql_log_info, fcport->vha, 0x2114,
738                         "%s: Failed to unregister nvme_remote_port (%d)\n",
739                             __func__, ret);
740         wait_for_completion(&fcport->nvme_del_done);
741 }
742
743 void qla_nvme_delete(struct scsi_qla_host *vha)
744 {
745         int nv_ret;
746
747         if (!IS_ENABLED(CONFIG_NVME_FC))
748                 return;
749
750         if (vha->nvme_local_port) {
751                 init_completion(&vha->nvme_del_done);
752                 ql_log(ql_log_info, vha, 0x2116,
753                         "unregister localport=%p\n",
754                         vha->nvme_local_port);
755                 nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
756                 if (nv_ret)
757                         ql_log(ql_log_info, vha, 0x2115,
758                             "Unregister of localport failed\n");
759                 else
760                         wait_for_completion(&vha->nvme_del_done);
761         }
762 }
763
764 int qla_nvme_register_hba(struct scsi_qla_host *vha)
765 {
766         struct nvme_fc_port_template *tmpl;
767         struct qla_hw_data *ha;
768         struct nvme_fc_port_info pinfo;
769         int ret = -EINVAL;
770
771         if (!IS_ENABLED(CONFIG_NVME_FC))
772                 return ret;
773
774         ha = vha->hw;
775         tmpl = &qla_nvme_fc_transport;
776
777         WARN_ON(vha->nvme_local_port);
778
779         qla_nvme_fc_transport.max_hw_queues =
780             min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
781                 (uint8_t)(ha->max_qpairs ? ha->max_qpairs : 1));
782
783         pinfo.node_name = wwn_to_u64(vha->node_name);
784         pinfo.port_name = wwn_to_u64(vha->port_name);
785         pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
786         pinfo.port_id = vha->d_id.b24;
787
788         ql_log(ql_log_info, vha, 0xffff,
789             "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
790             pinfo.node_name, pinfo.port_name, pinfo.port_id);
791         qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
792
793         ret = nvme_fc_register_localport(&pinfo, tmpl,
794             get_device(&ha->pdev->dev), &vha->nvme_local_port);
795         if (ret) {
796                 ql_log(ql_log_warn, vha, 0xffff,
797                     "register_localport failed: ret=%x\n", ret);
798         } else {
799                 vha->nvme_local_port->private = vha;
800         }
801
802         return ret;
803 }
804
805 void qla_nvme_abort_set_option(struct abort_entry_24xx *abt, srb_t *orig_sp)
806 {
807         struct qla_hw_data *ha;
808
809         if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
810                 return;
811
812         ha = orig_sp->fcport->vha->hw;
813
814         WARN_ON_ONCE(abt->options & cpu_to_le16(BIT_0));
815         /* Use Driver Specified Retry Count */
816         abt->options |= cpu_to_le16(AOF_ABTS_RTY_CNT);
817         abt->drv.abts_rty_cnt = cpu_to_le16(2);
818         /* Use specified response timeout */
819         abt->options |= cpu_to_le16(AOF_RSP_TIMEOUT);
820         /* set it to 2 * r_a_tov in secs */
821         abt->drv.rsp_timeout = cpu_to_le16(2 * (ha->r_a_tov / 10));
822 }
823
824 void qla_nvme_abort_process_comp_status(struct abort_entry_24xx *abt, srb_t *orig_sp)
825 {
826         u16     comp_status;
827         struct scsi_qla_host *vha;
828
829         if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
830                 return;
831
832         vha = orig_sp->fcport->vha;
833
834         comp_status = le16_to_cpu(abt->comp_status);
835         switch (comp_status) {
836         case CS_RESET:          /* reset event aborted */
837         case CS_ABORTED:        /* IOCB was cleaned */
838         /* N_Port handle is not currently logged in */
839         case CS_TIMEOUT:
840         /* N_Port handle was logged out while waiting for ABTS to complete */
841         case CS_PORT_UNAVAILABLE:
842         /* Firmware found that the port name changed */
843         case CS_PORT_LOGGED_OUT:
844         /* BA_RJT was received for the ABTS */
845         case CS_PORT_CONFIG_CHG:
846                 ql_dbg(ql_dbg_async, vha, 0xf09d,
847                        "Abort I/O IOCB completed with error, comp_status=%x\n",
848                 comp_status);
849                 break;
850
851         /* BA_RJT was received for the ABTS */
852         case CS_REJECT_RECEIVED:
853                 ql_dbg(ql_dbg_async, vha, 0xf09e,
854                        "BA_RJT was received for the ABTS rjt_vendorUnique = %u",
855                         abt->fw.ba_rjt_vendorUnique);
856                 ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e,
857                        "ba_rjt_reasonCodeExpl = %u, ba_rjt_reasonCode = %u\n",
858                        abt->fw.ba_rjt_reasonCodeExpl, abt->fw.ba_rjt_reasonCode);
859                 break;
860
861         case CS_COMPLETE:
862                 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0xf09f,
863                        "IOCB request is completed successfully comp_status=%x\n",
864                 comp_status);
865                 break;
866
867         case CS_IOCB_ERROR:
868                 ql_dbg(ql_dbg_async, vha, 0xf0a0,
869                        "IOCB request is failed, comp_status=%x\n", comp_status);
870                 break;
871
872         default:
873                 ql_dbg(ql_dbg_async, vha, 0xf0a1,
874                        "Invalid Abort IO IOCB Completion Status %x\n",
875                 comp_status);
876                 break;
877         }
878 }
879
880 inline void qla_wait_nvme_release_cmd_kref(srb_t *orig_sp)
881 {
882         if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
883                 return;
884         kref_put(&orig_sp->cmd_kref, orig_sp->put_fn);
885 }