1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
39 #include "lpfc_version.h"
43 #include "lpfc_sli4.h"
45 #include "lpfc_disc.h"
47 #include "lpfc_nvme.h"
48 #include "lpfc_scsi.h"
49 #include "lpfc_logmsg.h"
50 #include "lpfc_crtn.h"
51 #include "lpfc_vport.h"
52 #include "lpfc_debugfs.h"
54 /* NVME initiator-based functions */
56 static struct lpfc_io_buf *
57 lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
58 int idx, int expedite);
61 lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *);
63 static struct nvme_fc_port_template lpfc_nvme_template;
66 * lpfc_nvme_create_queue -
67 * @pnvme_lport: Transport localport that LS is to be issued from
68 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
69 * @qsize: Size of the queue in bytes
70 * @handle: An opaque driver handle used in follow-up calls.
72 * Driver registers this routine to preallocate and initialize any
73 * internal data structures to bind the @qidx to its internal IO queues.
74 * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ.
78 * -EINVAL - Unsupported input value.
79 * -ENOMEM - Could not alloc necessary memory
82 lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
83 unsigned int qidx, u16 qsize,
86 struct lpfc_nvme_lport *lport;
87 struct lpfc_vport *vport;
88 struct lpfc_nvme_qhandle *qhandle;
91 if (!pnvme_lport->private)
94 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
96 qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
100 qhandle->cpu_id = raw_smp_processor_id();
101 qhandle->qidx = qidx;
103 * NVME qidx == 0 is the admin queue, so both admin queue
104 * and first IO queue will use MSI-X vector and associated
105 * EQ/CQ/WQ at index 0. After that they are sequentially assigned.
108 str = "IO "; /* IO queue */
109 qhandle->index = ((qidx - 1) %
110 lpfc_nvme_template.max_hw_queues);
112 str = "ADM"; /* Admin queue */
113 qhandle->index = qidx;
116 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
117 "6073 Binding %s HdwQueue %d (cpu %d) to "
118 "hdw_queue %d qhandle x%px\n", str,
119 qidx, qhandle->cpu_id, qhandle->index, qhandle);
120 *handle = (void *)qhandle;
125 * lpfc_nvme_delete_queue -
126 * @pnvme_lport: Transport localport that LS is to be issued from
127 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
128 * @handle: An opaque driver handle from lpfc_nvme_create_queue
130 * Driver registers this routine to free
131 * any internal data structures to bind the @qidx to its internal
136 * TODO: What are the failure codes.
139 lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
143 struct lpfc_nvme_lport *lport;
144 struct lpfc_vport *vport;
146 if (!pnvme_lport->private)
149 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
150 vport = lport->vport;
152 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
153 "6001 ENTER. lpfc_pnvme x%px, qidx x%x qhandle x%px\n",
154 lport, qidx, handle);
159 lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
161 struct lpfc_nvme_lport *lport = localport->private;
163 lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
164 "6173 localport x%px delete complete\n",
167 /* release any threads waiting for the unreg to complete */
168 if (lport->vport->localport)
169 complete(lport->lport_unreg_cmp);
172 /* lpfc_nvme_remoteport_delete
174 * @remoteport: Pointer to an nvme transport remoteport instance.
176 * This is a template downcall. NVME transport calls this function
177 * when it has completed the unregistration of a previously
178 * registered remoteport.
184 lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
186 struct lpfc_nvme_rport *rport = remoteport->private;
187 struct lpfc_vport *vport;
188 struct lpfc_nodelist *ndlp;
193 pr_err("**** %s: NULL ndlp on rport x%px remoteport x%px\n",
194 __func__, rport, remoteport);
200 pr_err("**** %s: Null vport on ndlp x%px, ste x%x rport x%px\n",
201 __func__, ndlp, ndlp->nlp_state, rport);
205 fc4_xpt_flags = NVME_XPT_REGD | SCSI_XPT_REGD;
207 /* Remove this rport from the lport's list - memory is owned by the
208 * transport. Remove the ndlp reference for the NVME transport before
209 * calling state machine to remove the node.
211 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
212 "6146 remoteport delete of remoteport x%px\n",
214 spin_lock_irq(&ndlp->lock);
216 /* The register rebind might have occurred before the delete
217 * downcall. Guard against this race.
219 if (ndlp->fc4_xpt_flags & NLP_WAIT_FOR_UNREG)
220 ndlp->fc4_xpt_flags &= ~(NLP_WAIT_FOR_UNREG | NVME_XPT_REGD);
222 spin_unlock_irq(&ndlp->lock);
224 /* On a devloss timeout event, one more put is executed provided the
225 * NVME and SCSI rport unregister requests are complete. If the vport
226 * is unloading, this extra put is executed by lpfc_drop_node.
228 if (!(ndlp->fc4_xpt_flags & fc4_xpt_flags))
229 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
236 * lpfc_nvme_handle_lsreq - Process an unsolicited NVME LS request
237 * @phba: pointer to lpfc hba data structure.
238 * @axchg: pointer to exchange context for the NVME LS request
240 * This routine is used for processing an asychronously received NVME LS
241 * request. Any remaining validation is done and the LS is then forwarded
242 * to the nvme-fc transport via nvme_fc_rcv_ls_req().
244 * The calling sequence should be: nvme_fc_rcv_ls_req() -> (processing)
245 * -> lpfc_nvme_xmt_ls_rsp/cmp -> req->done.
246 * __lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg.
248 * Returns 0 if LS was handled and delivered to the transport
249 * Returns 1 if LS failed to be handled and should be dropped
252 lpfc_nvme_handle_lsreq(struct lpfc_hba *phba,
253 struct lpfc_async_xchg_ctx *axchg)
255 #if (IS_ENABLED(CONFIG_NVME_FC))
256 struct lpfc_vport *vport;
257 struct lpfc_nvme_rport *lpfc_rport;
258 struct nvme_fc_remote_port *remoteport;
259 struct lpfc_nvme_lport *lport;
260 uint32_t *payload = axchg->payload;
263 vport = axchg->ndlp->vport;
264 lpfc_rport = axchg->ndlp->nrport;
268 remoteport = lpfc_rport->remoteport;
269 if (!vport->localport)
272 lport = vport->localport->private;
276 rc = nvme_fc_rcv_ls_req(remoteport, &axchg->ls_rsp, axchg->payload,
279 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
280 "6205 NVME Unsol rcv: sz %d rc %d: %08x %08x %08x "
283 *payload, *(payload+1), *(payload+2),
284 *(payload+3), *(payload+4), *(payload+5));
293 * __lpfc_nvme_ls_req_cmp - Generic completion handler for a NVME
295 * @phba: Pointer to HBA context object
296 * @vport: The local port that issued the LS
297 * @cmdwqe: Pointer to driver command WQE object.
298 * @wcqe: Pointer to driver response CQE object.
300 * This function is the generic completion handler for NVME LS requests.
301 * The function updates any states and statistics, calls the transport
302 * ls_req done() routine, then tears down the command and buffers used
303 * for the LS request.
306 __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
307 struct lpfc_iocbq *cmdwqe,
308 struct lpfc_wcqe_complete *wcqe)
310 struct nvmefc_ls_req *pnvme_lsreq;
311 struct lpfc_dmabuf *buf_ptr;
312 struct lpfc_nodelist *ndlp;
315 pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
316 ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
317 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
319 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
320 "6047 NVMEx LS REQ x%px cmpl DID %x Xri: %x "
321 "status %x reason x%x cmd:x%px lsreg:x%px bmp:x%px "
323 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
324 cmdwqe->sli4_xritag, status,
325 (wcqe->parameter & 0xffff),
326 cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
328 lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x parm x%x\n",
329 cmdwqe->sli4_xritag, status, wcqe->parameter);
331 if (cmdwqe->context3) {
332 buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3;
333 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
335 cmdwqe->context3 = NULL;
337 if (pnvme_lsreq->done)
338 pnvme_lsreq->done(pnvme_lsreq, status);
340 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
341 "6046 NVMEx cmpl without done call back? "
342 "Data x%px DID %x Xri: %x status %x\n",
343 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
344 cmdwqe->sli4_xritag, status);
347 cmdwqe->context1 = NULL;
349 lpfc_sli_release_iocbq(phba, cmdwqe);
353 lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
354 struct lpfc_wcqe_complete *wcqe)
356 struct lpfc_vport *vport = cmdwqe->vport;
357 struct lpfc_nvme_lport *lport;
360 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
362 if (vport->localport) {
363 lport = (struct lpfc_nvme_lport *)vport->localport->private;
365 atomic_inc(&lport->fc4NvmeLsCmpls);
367 if (bf_get(lpfc_wcqe_c_xb, wcqe))
368 atomic_inc(&lport->cmpl_ls_xb);
369 atomic_inc(&lport->cmpl_ls_err);
374 __lpfc_nvme_ls_req_cmp(phba, vport, cmdwqe, wcqe);
378 lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
379 struct lpfc_dmabuf *inp,
380 struct nvmefc_ls_req *pnvme_lsreq,
381 void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
382 struct lpfc_wcqe_complete *),
383 struct lpfc_nodelist *ndlp, uint32_t num_entry,
384 uint32_t tmo, uint8_t retry)
386 struct lpfc_hba *phba = vport->phba;
387 union lpfc_wqe128 *wqe;
388 struct lpfc_iocbq *genwqe;
389 struct ulp_bde64 *bpl;
390 struct ulp_bde64 bde;
391 int i, rc, xmit_len, first_len;
393 /* Allocate buffer for command WQE */
394 genwqe = lpfc_sli_get_iocbq(phba);
399 /* Initialize only 64 bytes */
400 memset(wqe, 0, sizeof(union lpfc_wqe));
402 genwqe->context3 = (uint8_t *)bmp;
403 genwqe->iocb_flag |= LPFC_IO_NVME_LS;
405 /* Save for completion so we can release these resources */
406 genwqe->context1 = lpfc_nlp_get(ndlp);
407 if (!genwqe->context1) {
408 dev_warn(&phba->pcidev->dev,
409 "Warning: Failed node ref, not sending LS_REQ\n");
410 lpfc_sli_release_iocbq(phba, genwqe);
414 genwqe->context2 = (uint8_t *)pnvme_lsreq;
415 /* Fill in payload, bp points to frame payload */
418 /* FC spec states we need 3 * ratov for CT requests */
419 tmo = (3 * phba->fc_ratov);
421 /* For this command calculate the xmit length of the request bde. */
424 bpl = (struct ulp_bde64 *)bmp->virt;
425 for (i = 0; i < num_entry; i++) {
426 bde.tus.w = bpl[i].tus.w;
427 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
429 xmit_len += bde.tus.f.bdeSize;
431 first_len = xmit_len;
434 genwqe->rsvd2 = num_entry;
435 genwqe->hba_wqidx = 0;
438 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
439 wqe->generic.bde.tus.f.bdeSize = first_len;
440 wqe->generic.bde.addrLow = bpl[0].addrLow;
441 wqe->generic.bde.addrHigh = bpl[0].addrHigh;
444 wqe->gen_req.request_payload_len = first_len;
449 bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
450 bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
451 bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
452 bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ);
453 bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
456 bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com,
457 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
458 bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag);
461 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, tmo);
462 bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3);
463 bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE);
464 bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI);
467 wqe->gen_req.wqe_com.abort_tag = genwqe->iotag;
470 bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag);
473 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
474 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
475 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
476 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
477 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
480 bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
481 bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND);
484 /* Issue GEN REQ WQE for NPORT <did> */
485 genwqe->wqe_cmpl = cmpl;
486 genwqe->iocb_cmpl = NULL;
487 genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
488 genwqe->vport = vport;
489 genwqe->retry = retry;
491 lpfc_nvmeio_data(phba, "NVME LS XMIT: xri x%x iotag x%x to x%06x\n",
492 genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);
494 rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe);
496 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
497 "6045 Issue GEN REQ WQE to NPORT x%x "
498 "Data: x%x x%x rc x%x\n",
499 ndlp->nlp_DID, genwqe->iotag,
500 vport->port_state, rc);
502 lpfc_sli_release_iocbq(phba, genwqe);
506 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_ELS,
507 "6050 Issue GEN REQ WQE to NPORT x%x "
508 "Data: oxid: x%x state: x%x wq:x%px lsreq:x%px "
509 "bmp:x%px xmit:%d 1st:%d\n",
510 ndlp->nlp_DID, genwqe->sli4_xritag,
512 genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
518 * __lpfc_nvme_ls_req - Generic service routine to issue an NVME LS request
519 * @vport: The local port issuing the LS
520 * @ndlp: The remote port to send the LS to
521 * @pnvme_lsreq: Pointer to LS request structure from the transport
522 * @gen_req_cmp: Completion call-back
524 * Routine validates the ndlp, builds buffers and sends a GEN_REQUEST
525 * WQE to perform the LS operation.
529 * non-zero: various error codes, in form of -Exxx
532 __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
533 struct nvmefc_ls_req *pnvme_lsreq,
534 void (*gen_req_cmp)(struct lpfc_hba *phba,
535 struct lpfc_iocbq *cmdwqe,
536 struct lpfc_wcqe_complete *wcqe))
538 struct lpfc_dmabuf *bmp;
539 struct ulp_bde64 *bpl;
541 uint16_t ntype, nstate;
544 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
545 "6051 NVMEx LS REQ: Bad NDLP x%px, Failing "
551 ntype = ndlp->nlp_type;
552 nstate = ndlp->nlp_state;
553 if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) ||
554 (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) {
555 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
556 "6088 NVMEx LS REQ: Fail DID x%06x not "
557 "ready for IO. Type x%x, State x%x\n",
558 ndlp->nlp_DID, ntype, nstate);
562 if (!vport->phba->sli4_hba.nvmels_wq)
566 * there are two dma buf in the request, actually there is one and
567 * the second one is just the start address + cmd size.
568 * Before calling lpfc_nvme_gen_req these buffers need to be wrapped
569 * in a lpfc_dmabuf struct. When freeing we just free the wrapper
570 * because the nvem layer owns the data bufs.
571 * We do not have to break these packets open, we don't care what is
572 * in them. And we do not have to look at the resonse data, we only
573 * care that we got a response. All of the caring is going to happen
574 * in the nvme-fc layer.
577 bmp = kmalloc(sizeof(*bmp), GFP_KERNEL);
579 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
580 "6044 NVMEx LS REQ: Could not alloc LS buf "
586 bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys));
588 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
589 "6042 NVMEx LS REQ: Could not alloc mbuf "
596 INIT_LIST_HEAD(&bmp->list);
598 bpl = (struct ulp_bde64 *)bmp->virt;
599 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma));
600 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma));
601 bpl->tus.f.bdeFlags = 0;
602 bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen;
603 bpl->tus.w = le32_to_cpu(bpl->tus.w);
606 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma));
607 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma));
608 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
609 bpl->tus.f.bdeSize = pnvme_lsreq->rsplen;
610 bpl->tus.w = le32_to_cpu(bpl->tus.w);
612 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
613 "6149 NVMEx LS REQ: Issue to DID 0x%06x lsreq x%px, "
614 "rqstlen:%d rsplen:%d %pad %pad\n",
615 ndlp->nlp_DID, pnvme_lsreq, pnvme_lsreq->rqstlen,
616 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
617 &pnvme_lsreq->rspdma);
619 ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr,
620 pnvme_lsreq, gen_req_cmp, ndlp, 2,
621 pnvme_lsreq->timeout, 0);
622 if (ret != WQE_SUCCESS) {
623 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
624 "6052 NVMEx REQ: EXIT. issue ls wqe failed "
625 "lsreq x%px Status %x DID %x\n",
626 pnvme_lsreq, ret, ndlp->nlp_DID);
627 lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
636 * lpfc_nvme_ls_req - Issue an NVME Link Service request
637 * @pnvme_lport: Transport localport that LS is to be issued from.
638 * @pnvme_rport: Transport remoteport that LS is to be sent to.
639 * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
641 * Driver registers this routine to handle any link service request
642 * from the nvme_fc transport to a remote nvme-aware port.
646 * non-zero: various error codes, in form of -Exxx
649 lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
650 struct nvme_fc_remote_port *pnvme_rport,
651 struct nvmefc_ls_req *pnvme_lsreq)
653 struct lpfc_nvme_lport *lport;
654 struct lpfc_nvme_rport *rport;
655 struct lpfc_vport *vport;
658 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
659 rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
660 if (unlikely(!lport) || unlikely(!rport))
663 vport = lport->vport;
664 if (vport->load_flag & FC_UNLOADING)
667 atomic_inc(&lport->fc4NvmeLsRequests);
669 ret = __lpfc_nvme_ls_req(vport, rport->ndlp, pnvme_lsreq,
670 lpfc_nvme_ls_req_cmp);
672 atomic_inc(&lport->xmt_ls_err);
678 * __lpfc_nvme_ls_abort - Generic service routine to abort a prior
680 * @vport: The local port that issued the LS
681 * @ndlp: The remote port the LS was sent to
682 * @pnvme_lsreq: Pointer to LS request structure from the transport
684 * The driver validates the ndlp, looks for the LS, and aborts the
688 * 0 : if LS found and aborted
689 * non-zero: various error conditions in form -Exxx
692 __lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
693 struct nvmefc_ls_req *pnvme_lsreq)
695 struct lpfc_hba *phba = vport->phba;
696 struct lpfc_sli_ring *pring;
697 struct lpfc_iocbq *wqe, *next_wqe;
698 bool foundit = false;
701 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
702 "6049 NVMEx LS REQ Abort: Bad NDLP x%px DID "
703 "x%06x, Failing LS Req\n",
704 ndlp, ndlp ? ndlp->nlp_DID : 0);
708 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS,
709 "6040 NVMEx LS REQ Abort: Issue LS_ABORT for lsreq "
710 "x%px rqstlen:%d rsplen:%d %pad %pad\n",
711 pnvme_lsreq, pnvme_lsreq->rqstlen,
712 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
713 &pnvme_lsreq->rspdma);
716 * Lock the ELS ring txcmplq and look for the wqe that matches
717 * this ELS. If found, issue an abort on the wqe.
719 pring = phba->sli4_hba.nvmels_wq->pring;
720 spin_lock_irq(&phba->hbalock);
721 spin_lock(&pring->ring_lock);
722 list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
723 if (wqe->context2 == pnvme_lsreq) {
724 wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
729 spin_unlock(&pring->ring_lock);
732 lpfc_sli_issue_abort_iotag(phba, pring, wqe, NULL);
733 spin_unlock_irq(&phba->hbalock);
738 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS,
739 "6213 NVMEx LS REQ Abort: Unable to locate req x%px\n",
745 lpfc_nvme_xmt_ls_rsp(struct nvme_fc_local_port *localport,
746 struct nvme_fc_remote_port *remoteport,
747 struct nvmefc_ls_rsp *ls_rsp)
749 struct lpfc_async_xchg_ctx *axchg =
750 container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp);
751 struct lpfc_nvme_lport *lport;
754 if (axchg->phba->pport->load_flag & FC_UNLOADING)
757 lport = (struct lpfc_nvme_lport *)localport->private;
759 rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, __lpfc_nvme_xmt_ls_rsp_cmp);
763 * unless the failure is due to having already sent
764 * the response, an abort will be generated for the
765 * exchange if the rsp can't be sent.
768 atomic_inc(&lport->xmt_ls_abort);
776 * lpfc_nvme_ls_abort - Abort a prior NVME LS request
777 * @pnvme_lport: Transport localport that LS is to be issued from.
778 * @pnvme_rport: Transport remoteport that LS is to be sent to.
779 * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
781 * Driver registers this routine to abort a NVME LS request that is
782 * in progress (from the transports perspective).
785 lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
786 struct nvme_fc_remote_port *pnvme_rport,
787 struct nvmefc_ls_req *pnvme_lsreq)
789 struct lpfc_nvme_lport *lport;
790 struct lpfc_vport *vport;
791 struct lpfc_nodelist *ndlp;
794 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
795 if (unlikely(!lport))
797 vport = lport->vport;
799 if (vport->load_flag & FC_UNLOADING)
802 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
804 ret = __lpfc_nvme_ls_abort(vport, ndlp, pnvme_lsreq);
806 atomic_inc(&lport->xmt_ls_abort);
809 /* Fix up the existing sgls for NVME IO. */
811 lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
812 struct lpfc_io_buf *lpfc_ncmd,
813 struct nvmefc_fcp_req *nCmd)
815 struct lpfc_hba *phba = vport->phba;
816 struct sli4_sge *sgl;
817 union lpfc_wqe128 *wqe;
818 uint32_t *wptr, *dptr;
821 * Get a local pointer to the built-in wqe and correct
822 * the cmd size to match NVME's 96 bytes and fix
826 wqe = &lpfc_ncmd->cur_iocbq.wqe;
829 * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to
830 * match NVME. NVME sends 96 bytes. Also, use the
831 * nvme commands command and response dma addresses
832 * rather than the virtual memory to ease the restore
835 sgl = lpfc_ncmd->dma_sgl;
836 sgl->sge_len = cpu_to_le32(nCmd->cmdlen);
837 if (phba->cfg_nvme_embed_cmd) {
841 /* Word 0-2 - NVME CMND IU (embedded payload) */
842 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED;
843 wqe->generic.bde.tus.f.bdeSize = 56;
844 wqe->generic.bde.addrHigh = 0;
845 wqe->generic.bde.addrLow = 64; /* Word 16 */
847 /* Word 10 - dbde is 0, wqes is 1 in template */
850 * Embed the payload in the last half of the WQE
851 * WQE words 16-30 get the NVME CMD IU payload
853 * WQE words 16-19 get payload Words 1-4
854 * WQE words 20-21 get payload Words 6-7
855 * WQE words 22-29 get payload Words 16-23
857 wptr = &wqe->words[16]; /* WQE ptr */
858 dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */
859 dptr++; /* Skip Word 0 in payload */
861 *wptr++ = *dptr++; /* Word 1 */
862 *wptr++ = *dptr++; /* Word 2 */
863 *wptr++ = *dptr++; /* Word 3 */
864 *wptr++ = *dptr++; /* Word 4 */
865 dptr++; /* Skip Word 5 in payload */
866 *wptr++ = *dptr++; /* Word 6 */
867 *wptr++ = *dptr++; /* Word 7 */
868 dptr += 8; /* Skip Words 8-15 in payload */
869 *wptr++ = *dptr++; /* Word 16 */
870 *wptr++ = *dptr++; /* Word 17 */
871 *wptr++ = *dptr++; /* Word 18 */
872 *wptr++ = *dptr++; /* Word 19 */
873 *wptr++ = *dptr++; /* Word 20 */
874 *wptr++ = *dptr++; /* Word 21 */
875 *wptr++ = *dptr++; /* Word 22 */
876 *wptr = *dptr; /* Word 23 */
878 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->cmddma));
879 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->cmddma));
881 /* Word 0-2 - NVME CMND IU Inline BDE */
882 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
883 wqe->generic.bde.tus.f.bdeSize = nCmd->cmdlen;
884 wqe->generic.bde.addrHigh = sgl->addr_hi;
885 wqe->generic.bde.addrLow = sgl->addr_lo;
888 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
889 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
894 /* Setup the physical region for the FCP RSP */
895 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma));
896 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma));
897 sgl->word2 = le32_to_cpu(sgl->word2);
899 bf_set(lpfc_sli4_sge_last, sgl, 0);
901 bf_set(lpfc_sli4_sge_last, sgl, 1);
902 sgl->word2 = cpu_to_le32(sgl->word2);
903 sgl->sge_len = cpu_to_le32(nCmd->rsplen);
908 * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO
910 * Driver registers this routine as it io request handler. This
911 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
912 * data structure to the rport indicated in @lpfc_nvme_rport.
916 * TODO: What are the failure codes.
919 lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
920 struct lpfc_wcqe_complete *wcqe)
922 struct lpfc_io_buf *lpfc_ncmd =
923 (struct lpfc_io_buf *)pwqeIn->context1;
924 struct lpfc_vport *vport = pwqeIn->vport;
925 struct nvmefc_fcp_req *nCmd;
926 struct nvme_fc_ersp_iu *ep;
927 struct nvme_fc_cmd_iu *cp;
928 struct lpfc_nodelist *ndlp;
929 struct lpfc_nvme_fcpreq_priv *freqpriv;
930 struct lpfc_nvme_lport *lport;
931 uint32_t code, status, idx;
932 uint16_t cid, sqhd, data;
934 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
938 /* Sanity check on return of outstanding command */
940 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
941 "6071 Null lpfc_ncmd pointer. No "
942 "release, skip completion\n");
946 /* Guard against abort handler being called at same time */
947 spin_lock(&lpfc_ncmd->buf_lock);
949 if (!lpfc_ncmd->nvmeCmd) {
950 spin_unlock(&lpfc_ncmd->buf_lock);
951 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
952 "6066 Missing cmpl ptrs: lpfc_ncmd x%px, "
954 lpfc_ncmd, lpfc_ncmd->nvmeCmd);
956 /* Release the lpfc_ncmd regardless of the missing elements. */
957 lpfc_release_nvme_buf(phba, lpfc_ncmd);
960 nCmd = lpfc_ncmd->nvmeCmd;
961 status = bf_get(lpfc_wcqe_c_status, wcqe);
963 idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
964 phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++;
966 if (unlikely(status && vport->localport)) {
967 lport = (struct lpfc_nvme_lport *)vport->localport->private;
969 if (bf_get(lpfc_wcqe_c_xb, wcqe))
970 atomic_inc(&lport->cmpl_fcp_xb);
971 atomic_inc(&lport->cmpl_fcp_err);
975 lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
976 lpfc_ncmd->cur_iocbq.sli4_xritag,
977 status, wcqe->parameter);
979 * Catch race where our node has transitioned, but the
980 * transport is still transitioning.
982 ndlp = lpfc_ncmd->ndlp;
984 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
985 "6062 Ignoring NVME cmpl. No ndlp\n");
989 code = bf_get(lpfc_wcqe_c_code, wcqe);
990 if (code == CQE_CODE_NVME_ERSP) {
991 /* For this type of CQE, we need to rebuild the rsp */
992 ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
995 * Get Command Id from cmd to plug into response. This
996 * code is not needed in the next NVME Transport drop.
998 cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
999 cid = cp->sqe.common.command_id;
1002 * RSN is in CQE word 2
1003 * SQHD is in CQE Word 3 bits 15:0
1004 * Cmd Specific info is in CQE Word 1
1005 * and in CQE Word 0 bits 15:0
1007 sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe);
1009 /* Now lets build the NVME ERSP IU */
1010 ep->iu_len = cpu_to_be16(8);
1011 ep->rsn = wcqe->parameter;
1012 ep->xfrd_len = cpu_to_be32(nCmd->payload_length);
1014 ptr = (uint32_t *)&ep->cqe.result.u64;
1015 *ptr++ = wcqe->total_data_placed;
1016 data = bf_get(lpfc_wcqe_c_ersp0, wcqe);
1017 *ptr = (uint32_t)data;
1018 ep->cqe.sq_head = sqhd;
1019 ep->cqe.sq_id = nCmd->sqid;
1020 ep->cqe.command_id = cid;
1023 lpfc_ncmd->status = IOSTAT_SUCCESS;
1024 lpfc_ncmd->result = 0;
1025 nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
1026 nCmd->transferred_length = nCmd->payload_length;
1028 lpfc_ncmd->status = (status & LPFC_IOCB_STATUS_MASK);
1029 lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
1031 /* For NVME, the only failure path that results in an
1032 * IO error is when the adapter rejects it. All other
1033 * conditions are a success case and resolved by the
1035 * IOSTAT_FCP_RSP_ERROR means:
1036 * 1. Length of data received doesn't match total
1037 * transfer length in WQE
1038 * 2. If the RSP payload does NOT match these cases:
1039 * a. RSP length 12/24 bytes and all zeros
1042 switch (lpfc_ncmd->status) {
1043 case IOSTAT_SUCCESS:
1044 nCmd->transferred_length = wcqe->total_data_placed;
1045 nCmd->rcv_rsplen = 0;
1048 case IOSTAT_FCP_RSP_ERROR:
1049 nCmd->transferred_length = wcqe->total_data_placed;
1050 nCmd->rcv_rsplen = wcqe->parameter;
1053 /* Check if this is really an ERSP */
1054 if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) {
1055 lpfc_ncmd->status = IOSTAT_SUCCESS;
1056 lpfc_ncmd->result = 0;
1058 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1059 "6084 NVME Completion ERSP: "
1060 "xri %x placed x%x\n",
1061 lpfc_ncmd->cur_iocbq.sli4_xritag,
1062 wcqe->total_data_placed);
1065 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1066 "6081 NVME Completion Protocol Error: "
1067 "xri %x status x%x result x%x "
1069 lpfc_ncmd->cur_iocbq.sli4_xritag,
1070 lpfc_ncmd->status, lpfc_ncmd->result,
1071 wcqe->total_data_placed);
1073 case IOSTAT_LOCAL_REJECT:
1074 /* Let fall through to set command final state. */
1075 if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED)
1076 lpfc_printf_vlog(vport, KERN_INFO,
1078 "6032 Delay Aborted cmd x%px "
1079 "nvme cmd x%px, xri x%x, "
1082 lpfc_ncmd->cur_iocbq.sli4_xritag,
1083 bf_get(lpfc_wcqe_c_xb, wcqe));
1087 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1088 "6072 NVME Completion Error: xri %x "
1089 "status x%x result x%x [x%x] "
1091 lpfc_ncmd->cur_iocbq.sli4_xritag,
1092 lpfc_ncmd->status, lpfc_ncmd->result,
1094 wcqe->total_data_placed);
1095 nCmd->transferred_length = 0;
1096 nCmd->rcv_rsplen = 0;
1097 nCmd->status = NVME_SC_INTERNAL;
1101 /* pick up SLI4 exhange busy condition */
1102 if (bf_get(lpfc_wcqe_c_xb, wcqe))
1103 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
1105 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
1107 /* Update stats and complete the IO. There is
1108 * no need for dma unprep because the nvme_transport
1109 * owns the dma address.
1111 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1112 if (lpfc_ncmd->ts_cmd_start) {
1113 lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
1114 lpfc_ncmd->ts_data_io = ktime_get_ns();
1115 phba->ktime_last_cmd = lpfc_ncmd->ts_data_io;
1116 lpfc_io_ktime(phba, lpfc_ncmd);
1118 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_NVME_IO)) {
1119 cpu = raw_smp_processor_id();
1120 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
1121 if (lpfc_ncmd->cpu != cpu)
1122 lpfc_printf_vlog(vport,
1123 KERN_INFO, LOG_NVME_IOERR,
1124 "6701 CPU Check cmpl: "
1125 "cpu %d expect %d\n",
1126 cpu, lpfc_ncmd->cpu);
1130 /* NVME targets need completion held off until the abort exchange
1131 * completes unless the NVME Rport is getting unregistered.
1134 if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
1135 freqpriv = nCmd->private;
1136 freqpriv->nvme_buf = NULL;
1137 lpfc_ncmd->nvmeCmd = NULL;
1138 spin_unlock(&lpfc_ncmd->buf_lock);
1141 spin_unlock(&lpfc_ncmd->buf_lock);
1143 /* Call release with XB=1 to queue the IO into the abort list. */
1144 lpfc_release_nvme_buf(phba, lpfc_ncmd);
1149 * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO
1150 * @vport: pointer to a host virtual N_Port data structure
1151 * @lpfc_ncmd: Pointer to lpfc scsi command
1152 * @pnode: pointer to a node-list data structure
1153 * @cstat: pointer to the control status structure
1155 * Driver registers this routine as it io request handler. This
1156 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1157 * data structure to the rport indicated in @lpfc_nvme_rport.
1161 * TODO: What are the failure codes.
1164 lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
1165 struct lpfc_io_buf *lpfc_ncmd,
1166 struct lpfc_nodelist *pnode,
1167 struct lpfc_fc4_ctrl_stat *cstat)
1169 struct lpfc_hba *phba = vport->phba;
1170 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1171 struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq);
1172 union lpfc_wqe128 *wqe = &pwqeq->wqe;
1176 * There are three possibilities here - use scatter-gather segment, use
1177 * the single mapping, or neither.
1180 if (nCmd->io_dir == NVMEFC_FCP_WRITE) {
1181 /* From the iwrite template, initialize words 7 - 11 */
1182 memcpy(&wqe->words[7],
1183 &lpfc_iwrite_cmd_template.words[7],
1184 sizeof(uint32_t) * 5);
1187 wqe->fcp_iwrite.total_xfer_len = nCmd->payload_length;
1190 if ((phba->cfg_nvme_enable_fb) &&
1191 (pnode->nlp_flag & NLP_FIRSTBURST)) {
1192 req_len = lpfc_ncmd->nvmeCmd->payload_length;
1193 if (req_len < pnode->nvme_fb_size)
1194 wqe->fcp_iwrite.initial_xfer_len =
1197 wqe->fcp_iwrite.initial_xfer_len =
1198 pnode->nvme_fb_size;
1200 wqe->fcp_iwrite.initial_xfer_len = 0;
1202 cstat->output_requests++;
1204 /* From the iread template, initialize words 7 - 11 */
1205 memcpy(&wqe->words[7],
1206 &lpfc_iread_cmd_template.words[7],
1207 sizeof(uint32_t) * 5);
1210 wqe->fcp_iread.total_xfer_len = nCmd->payload_length;
1213 wqe->fcp_iread.rsrvd5 = 0;
1215 cstat->input_requests++;
1218 /* From the icmnd template, initialize words 4 - 11 */
1219 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
1220 sizeof(uint32_t) * 8);
1221 cstat->control_requests++;
1224 if (pnode->nlp_nvme_info & NLP_NVME_NSLER)
1225 bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
1227 * Finish initializing those WQE fields that are independent
1228 * of the nvme_cmnd request_buffer
1232 bf_set(payload_offset_len, &wqe->fcp_icmd,
1233 (nCmd->rsplen + nCmd->cmdlen));
1236 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
1237 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
1238 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
1241 wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
1244 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
1247 bf_set(wqe_xchg, &wqe->fcp_iwrite.wqe_com, LPFC_NVME_XCHG);
1249 /* Words 13 14 15 are for PBDE support */
1251 pwqeq->vport = vport;
1257 * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO
1258 * @vport: pointer to a host virtual N_Port data structure
1259 * @lpfc_ncmd: Pointer to lpfc scsi command
1261 * Driver registers this routine as it io request handler. This
1262 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1263 * data structure to the rport indicated in @lpfc_nvme_rport.
1267 * TODO: What are the failure codes.
1270 lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
1271 struct lpfc_io_buf *lpfc_ncmd)
1273 struct lpfc_hba *phba = vport->phba;
1274 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1275 union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe;
1276 struct sli4_sge *sgl = lpfc_ncmd->dma_sgl;
1277 struct sli4_hybrid_sgl *sgl_xtra = NULL;
1278 struct scatterlist *data_sg;
1279 struct sli4_sge *first_data_sgl;
1280 struct ulp_bde64 *bde;
1281 dma_addr_t physaddr = 0;
1282 uint32_t num_bde = 0;
1283 uint32_t dma_len = 0;
1284 uint32_t dma_offset = 0;
1286 bool lsp_just_set = false;
1288 /* Fix up the command and response DMA stuff. */
1289 lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
1292 * There are three possibilities here - use scatter-gather segment, use
1293 * the single mapping, or neither.
1297 * Jump over the cmd and rsp SGEs. The fix routine
1298 * has already adjusted for this.
1302 first_data_sgl = sgl;
1303 lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
1304 if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
1305 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1306 "6058 Too many sg segments from "
1307 "NVME Transport. Max %d, "
1308 "nvmeIO sg_cnt %d\n",
1309 phba->cfg_nvme_seg_cnt + 1,
1310 lpfc_ncmd->seg_cnt);
1311 lpfc_ncmd->seg_cnt = 0;
1316 * The driver established a maximum scatter-gather segment count
1317 * during probe that limits the number of sg elements in any
1318 * single nvme command. Just run through the seg_cnt and format
1321 nseg = nCmd->sg_cnt;
1322 data_sg = nCmd->first_sgl;
1324 /* for tracking the segment boundaries */
1326 for (i = 0; i < nseg; i++) {
1327 if (data_sg == NULL) {
1328 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1329 "6059 dptr err %d, nseg %d\n",
1331 lpfc_ncmd->seg_cnt = 0;
1336 if ((num_bde + 1) == nseg) {
1337 bf_set(lpfc_sli4_sge_last, sgl, 1);
1338 bf_set(lpfc_sli4_sge_type, sgl,
1339 LPFC_SGE_TYPE_DATA);
1341 bf_set(lpfc_sli4_sge_last, sgl, 0);
1343 /* expand the segment */
1344 if (!lsp_just_set &&
1345 !((j + 1) % phba->border_sge_num) &&
1346 ((nseg - 1) != i)) {
1348 bf_set(lpfc_sli4_sge_type, sgl,
1351 sgl_xtra = lpfc_get_sgl_per_hdwq(
1354 if (unlikely(!sgl_xtra)) {
1355 lpfc_ncmd->seg_cnt = 0;
1358 sgl->addr_lo = cpu_to_le32(putPaddrLow(
1359 sgl_xtra->dma_phys_sgl));
1360 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
1361 sgl_xtra->dma_phys_sgl));
1364 bf_set(lpfc_sli4_sge_type, sgl,
1365 LPFC_SGE_TYPE_DATA);
1369 if (!(bf_get(lpfc_sli4_sge_type, sgl) &
1370 LPFC_SGE_TYPE_LSP)) {
1371 if ((nseg - 1) == i)
1372 bf_set(lpfc_sli4_sge_last, sgl, 1);
1374 physaddr = data_sg->dma_address;
1375 dma_len = data_sg->length;
1376 sgl->addr_lo = cpu_to_le32(
1377 putPaddrLow(physaddr));
1378 sgl->addr_hi = cpu_to_le32(
1379 putPaddrHigh(physaddr));
1381 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1382 sgl->word2 = cpu_to_le32(sgl->word2);
1383 sgl->sge_len = cpu_to_le32(dma_len);
1385 dma_offset += dma_len;
1386 data_sg = sg_next(data_sg);
1390 lsp_just_set = false;
1392 sgl->word2 = cpu_to_le32(sgl->word2);
1394 sgl->sge_len = cpu_to_le32(
1395 phba->cfg_sg_dma_buf_size);
1397 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
1400 lsp_just_set = true;
1405 if (phba->cfg_enable_pbde) {
1406 /* Use PBDE support for first SGL only, offset == 0 */
1408 bde = (struct ulp_bde64 *)
1410 bde->addrLow = first_data_sgl->addr_lo;
1411 bde->addrHigh = first_data_sgl->addr_hi;
1412 bde->tus.f.bdeSize =
1413 le32_to_cpu(first_data_sgl->sge_len);
1414 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1415 bde->tus.w = cpu_to_le32(bde->tus.w);
1418 bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
1420 memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
1421 bf_set(wqe_pbde, &wqe->generic.wqe_com, 0);
1425 lpfc_ncmd->seg_cnt = 0;
1427 /* For this clause to be valid, the payload_length
1428 * and sg_cnt must zero.
1430 if (nCmd->payload_length != 0) {
1431 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1432 "6063 NVME DMA Prep Err: sg_cnt %d "
1433 "payload_length x%x\n",
1434 nCmd->sg_cnt, nCmd->payload_length);
1442 * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO
1443 * @pnvme_lport: Pointer to the driver's local port data
1444 * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1445 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1446 * @pnvme_fcreq: IO request from nvme fc to driver.
1448 * Driver registers this routine as it io request handler. This
1449 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1450 * data structure to the rport indicated in @lpfc_nvme_rport.
1454 * TODO: What are the failure codes.
1457 lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1458 struct nvme_fc_remote_port *pnvme_rport,
1459 void *hw_queue_handle,
1460 struct nvmefc_fcp_req *pnvme_fcreq)
1465 struct lpfc_nvme_lport *lport;
1466 struct lpfc_fc4_ctrl_stat *cstat;
1467 struct lpfc_vport *vport;
1468 struct lpfc_hba *phba;
1469 struct lpfc_nodelist *ndlp;
1470 struct lpfc_io_buf *lpfc_ncmd;
1471 struct lpfc_nvme_rport *rport;
1472 struct lpfc_nvme_qhandle *lpfc_queue_info;
1473 struct lpfc_nvme_fcpreq_priv *freqpriv;
1474 struct nvme_common_command *sqe;
1475 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1479 /* Validate pointers. LLDD fault handling with transport does
1480 * have timing races.
1482 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1483 if (unlikely(!lport)) {
1488 vport = lport->vport;
1490 if (unlikely(!hw_queue_handle)) {
1491 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1492 "6117 Fail IO, NULL hw_queue_handle\n");
1493 atomic_inc(&lport->xmt_fcp_err);
1500 if (unlikely(vport->load_flag & FC_UNLOADING)) {
1501 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1502 "6124 Fail IO, Driver unload\n");
1503 atomic_inc(&lport->xmt_fcp_err);
1508 freqpriv = pnvme_fcreq->private;
1509 if (unlikely(!freqpriv)) {
1510 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1511 "6158 Fail IO, NULL request data\n");
1512 atomic_inc(&lport->xmt_fcp_err);
1517 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1519 start = ktime_get_ns();
1521 rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
1522 lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle;
1525 * Catch race where our node has transitioned, but the
1526 * transport is still transitioning.
1530 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1531 "6053 Busy IO, ndlp not ready: rport x%px "
1532 "ndlp x%px, DID x%06x\n",
1533 rport, ndlp, pnvme_rport->port_id);
1534 atomic_inc(&lport->xmt_fcp_err);
1539 /* The remote node has to be a mapped target or it's an error. */
1540 if ((ndlp->nlp_type & NLP_NVME_TARGET) &&
1541 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
1542 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1543 "6036 Fail IO, DID x%06x not ready for "
1544 "IO. State x%x, Type x%x Flg x%x\n",
1545 pnvme_rport->port_id,
1546 ndlp->nlp_state, ndlp->nlp_type,
1547 ndlp->fc4_xpt_flags);
1548 atomic_inc(&lport->xmt_fcp_bad_ndlp);
1554 /* Currently only NVME Keep alive commands should be expedited
1555 * if the driver runs out of a resource. These should only be
1556 * issued on the admin queue, qidx 0
1558 if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) {
1559 sqe = &((struct nvme_fc_cmd_iu *)
1560 pnvme_fcreq->cmdaddr)->sqe.common;
1561 if (sqe->opcode == nvme_admin_keep_alive)
1565 /* The node is shared with FCP IO, make sure the IO pending count does
1566 * not exceed the programmed depth.
1568 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
1569 if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
1571 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1572 "6174 Fail IO, ndlp qdepth exceeded: "
1573 "idx %d DID %x pend %d qdepth %d\n",
1574 lpfc_queue_info->index, ndlp->nlp_DID,
1575 atomic_read(&ndlp->cmd_pending),
1577 atomic_inc(&lport->xmt_fcp_qdepth);
1583 /* Lookup Hardware Queue index based on fcp_io_sched module parameter */
1584 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
1585 idx = lpfc_queue_info->index;
1587 cpu = raw_smp_processor_id();
1588 idx = phba->sli4_hba.cpu_map[cpu].hdwq;
1591 lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, idx, expedite);
1592 if (lpfc_ncmd == NULL) {
1593 atomic_inc(&lport->xmt_fcp_noxri);
1594 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1595 "6065 Fail IO, driver buffer pool is empty: "
1597 lpfc_queue_info->index, ndlp->nlp_DID);
1601 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1603 lpfc_ncmd->ts_cmd_start = start;
1604 lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd;
1606 lpfc_ncmd->ts_cmd_start = 0;
1611 * Store the data needed by the driver to issue, abort, and complete
1613 * Do not let the IO hang out forever. There is no midlayer issuing
1614 * an abort so inform the FW of the maximum IO pending time.
1616 freqpriv->nvme_buf = lpfc_ncmd;
1617 lpfc_ncmd->nvmeCmd = pnvme_fcreq;
1618 lpfc_ncmd->ndlp = ndlp;
1619 lpfc_ncmd->qidx = lpfc_queue_info->qidx;
1622 * Issue the IO on the WQ indicated by index in the hw_queue_handle.
1623 * This identfier was create in our hardware queue create callback
1624 * routine. The driver now is dependent on the IO queue steering from
1625 * the transport. We are trusting the upper NVME layers know which
1626 * index to use and that they have affinitized a CPU to this hardware
1627 * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ.
1629 lpfc_ncmd->cur_iocbq.hba_wqidx = idx;
1630 cstat = &phba->sli4_hba.hdwq[idx].nvme_cstat;
1632 lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat);
1633 ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
1635 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1636 "6175 Fail IO, Prep DMA: "
1638 lpfc_queue_info->index, ndlp->nlp_DID);
1639 atomic_inc(&lport->xmt_fcp_err);
1641 goto out_free_nvme_buf;
1644 lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
1645 lpfc_ncmd->cur_iocbq.sli4_xritag,
1646 lpfc_queue_info->index, ndlp->nlp_DID);
1648 ret = lpfc_sli4_issue_wqe(phba, lpfc_ncmd->hdwq, &lpfc_ncmd->cur_iocbq);
1650 atomic_inc(&lport->xmt_fcp_wqerr);
1651 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1652 "6113 Fail IO, Could not issue WQE err %x "
1653 "sid: x%x did: x%x oxid: x%x\n",
1654 ret, vport->fc_myDID, ndlp->nlp_DID,
1655 lpfc_ncmd->cur_iocbq.sli4_xritag);
1656 goto out_free_nvme_buf;
1659 if (phba->cfg_xri_rebalancing)
1660 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_ncmd->hdwq_no);
1662 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1663 if (lpfc_ncmd->ts_cmd_start)
1664 lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
1666 if (phba->hdwqstat_on & LPFC_CHECK_NVME_IO) {
1667 cpu = raw_smp_processor_id();
1668 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
1669 lpfc_ncmd->cpu = cpu;
1671 lpfc_printf_vlog(vport,
1672 KERN_INFO, LOG_NVME_IOERR,
1673 "6702 CPU Check cmd: "
1676 lpfc_queue_info->index);
1682 if (lpfc_ncmd->nvmeCmd->sg_cnt) {
1683 if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
1684 cstat->output_requests--;
1686 cstat->input_requests--;
1688 cstat->control_requests--;
1689 lpfc_release_nvme_buf(phba, lpfc_ncmd);
1695 * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
1696 * @phba: Pointer to HBA context object
1697 * @cmdiocb: Pointer to command iocb object.
1698 * @abts_cmpl: Pointer to wcqe complete object.
1700 * This is the callback function for any NVME FCP IO that was aborted.
1706 lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1707 struct lpfc_wcqe_complete *abts_cmpl)
1709 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1710 "6145 ABORT_XRI_CN completing on rpi x%x "
1711 "original iotag x%x, abort cmd iotag x%x "
1712 "req_tag x%x, status x%x, hwstatus x%x\n",
1713 cmdiocb->iocb.un.acxri.abortContextTag,
1714 cmdiocb->iocb.un.acxri.abortIoTag,
1716 bf_get(lpfc_wcqe_c_request_tag, abts_cmpl),
1717 bf_get(lpfc_wcqe_c_status, abts_cmpl),
1718 bf_get(lpfc_wcqe_c_hw_status, abts_cmpl));
1719 lpfc_sli_release_iocbq(phba, cmdiocb);
1723 * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS
1724 * @pnvme_lport: Pointer to the driver's local port data
1725 * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1726 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1727 * @pnvme_fcreq: IO request from nvme fc to driver.
1729 * Driver registers this routine as its nvme request io abort handler. This
1730 * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq
1731 * data structure to the rport indicated in @lpfc_nvme_rport. This routine
1732 * is executed asynchronously - one the target is validated as "MAPPED" and
1733 * ready for IO, the driver issues the abort request and returns.
1739 lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1740 struct nvme_fc_remote_port *pnvme_rport,
1741 void *hw_queue_handle,
1742 struct nvmefc_fcp_req *pnvme_fcreq)
1744 struct lpfc_nvme_lport *lport;
1745 struct lpfc_vport *vport;
1746 struct lpfc_hba *phba;
1747 struct lpfc_io_buf *lpfc_nbuf;
1748 struct lpfc_iocbq *nvmereq_wqe;
1749 struct lpfc_nvme_fcpreq_priv *freqpriv;
1750 unsigned long flags;
1753 /* Validate pointers. LLDD fault handling with transport does
1754 * have timing races.
1756 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1757 if (unlikely(!lport))
1760 vport = lport->vport;
1762 if (unlikely(!hw_queue_handle)) {
1763 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1764 "6129 Fail Abort, HW Queue Handle NULL.\n");
1769 freqpriv = pnvme_fcreq->private;
1771 if (unlikely(!freqpriv))
1773 if (vport->load_flag & FC_UNLOADING)
1776 /* Announce entry to new IO submit field. */
1777 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1778 "6002 Abort Request to rport DID x%06x "
1779 "for nvme_fc_req x%px\n",
1780 pnvme_rport->port_id,
1783 /* If the hba is getting reset, this flag is set. It is
1784 * cleared when the reset is complete and rings reestablished.
1786 spin_lock_irqsave(&phba->hbalock, flags);
1787 /* driver queued commands are in process of being flushed */
1788 if (phba->hba_flag & HBA_IOQ_FLUSH) {
1789 spin_unlock_irqrestore(&phba->hbalock, flags);
1790 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1791 "6139 Driver in reset cleanup - flushing "
1792 "NVME Req now. hba_flag x%x\n",
1797 lpfc_nbuf = freqpriv->nvme_buf;
1799 spin_unlock_irqrestore(&phba->hbalock, flags);
1800 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1801 "6140 NVME IO req has no matching lpfc nvme "
1802 "io buffer. Skipping abort req.\n");
1804 } else if (!lpfc_nbuf->nvmeCmd) {
1805 spin_unlock_irqrestore(&phba->hbalock, flags);
1806 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1807 "6141 lpfc NVME IO req has no nvme_fcreq "
1808 "io buffer. Skipping abort req.\n");
1811 nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
1813 /* Guard against IO completion being called at same time */
1814 spin_lock(&lpfc_nbuf->buf_lock);
1817 * The lpfc_nbuf and the mapped nvme_fcreq in the driver's
1818 * state must match the nvme_fcreq passed by the nvme
1819 * transport. If they don't match, it is likely the driver
1820 * has already completed the NVME IO and the nvme transport
1821 * has not seen it yet.
1823 if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
1824 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1825 "6143 NVME req mismatch: "
1826 "lpfc_nbuf x%px nvmeCmd x%px, "
1827 "pnvme_fcreq x%px. Skipping Abort xri x%x\n",
1828 lpfc_nbuf, lpfc_nbuf->nvmeCmd,
1829 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1833 /* Don't abort IOs no longer on the pending queue. */
1834 if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
1835 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1836 "6142 NVME IO req x%px not queued - skipping "
1837 "abort req xri x%x\n",
1838 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1842 atomic_inc(&lport->xmt_fcp_abort);
1843 lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
1844 nvmereq_wqe->sli4_xritag,
1845 nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
1847 /* Outstanding abort is in progress */
1848 if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
1849 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1850 "6144 Outstanding NVME I/O Abort Request "
1851 "still pending on nvme_fcreq x%px, "
1852 "lpfc_ncmd x%px xri x%x\n",
1853 pnvme_fcreq, lpfc_nbuf,
1854 nvmereq_wqe->sli4_xritag);
1858 ret_val = lpfc_sli4_issue_abort_iotag(phba, nvmereq_wqe,
1859 lpfc_nvme_abort_fcreq_cmpl);
1861 spin_unlock(&lpfc_nbuf->buf_lock);
1862 spin_unlock_irqrestore(&phba->hbalock, flags);
1864 /* Make sure HBA is alive */
1865 lpfc_issue_hb_tmo(phba);
1867 if (ret_val != WQE_SUCCESS) {
1868 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1869 "6137 Failed abts issue_wqe with status x%x "
1870 "for nvme_fcreq x%px.\n",
1871 ret_val, pnvme_fcreq);
1875 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1876 "6138 Transport Abort NVME Request Issued for "
1878 nvmereq_wqe->sli4_xritag);
1882 spin_unlock(&lpfc_nbuf->buf_lock);
1883 spin_unlock_irqrestore(&phba->hbalock, flags);
1887 /* Declare and initialization an instance of the FC NVME template. */
1888 static struct nvme_fc_port_template lpfc_nvme_template = {
1889 /* initiator-based functions */
1890 .localport_delete = lpfc_nvme_localport_delete,
1891 .remoteport_delete = lpfc_nvme_remoteport_delete,
1892 .create_queue = lpfc_nvme_create_queue,
1893 .delete_queue = lpfc_nvme_delete_queue,
1894 .ls_req = lpfc_nvme_ls_req,
1895 .fcp_io = lpfc_nvme_fcp_io_submit,
1896 .ls_abort = lpfc_nvme_ls_abort,
1897 .fcp_abort = lpfc_nvme_fcp_abort,
1898 .xmt_ls_rsp = lpfc_nvme_xmt_ls_rsp,
1901 .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1902 .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1903 .dma_boundary = 0xFFFFFFFF,
1905 /* Sizes of additional private data for data structures.
1906 * No use for the last two sizes at this time.
1908 .local_priv_sz = sizeof(struct lpfc_nvme_lport),
1909 .remote_priv_sz = sizeof(struct lpfc_nvme_rport),
1910 .lsrqst_priv_sz = 0,
1911 .fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv),
1915 * lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA
1917 * This routine removes a nvme buffer from head of @hdwq io_buf_list
1918 * and returns to caller.
1922 * Pointer to lpfc_nvme_buf - Success
1924 static struct lpfc_io_buf *
1925 lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1926 int idx, int expedite)
1928 struct lpfc_io_buf *lpfc_ncmd;
1929 struct lpfc_sli4_hdw_queue *qp;
1930 struct sli4_sge *sgl;
1931 struct lpfc_iocbq *pwqeq;
1932 union lpfc_wqe128 *wqe;
1934 lpfc_ncmd = lpfc_get_io_buf(phba, NULL, idx, expedite);
1937 pwqeq = &(lpfc_ncmd->cur_iocbq);
1940 /* Setup key fields in buffer that may have been changed
1941 * if other protocols used this buffer.
1943 pwqeq->iocb_flag = LPFC_IO_NVME;
1944 pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
1945 lpfc_ncmd->start_time = jiffies;
1946 lpfc_ncmd->flags = 0;
1948 /* Rsp SGE will be filled in when we rcv an IO
1949 * from the NVME Layer to be sent.
1950 * The cmd is going to be embedded so we need a SKIP SGE.
1952 sgl = lpfc_ncmd->dma_sgl;
1953 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
1954 bf_set(lpfc_sli4_sge_last, sgl, 0);
1955 sgl->word2 = cpu_to_le32(sgl->word2);
1956 /* Fill in word 3 / sgl_len during cmd submission */
1958 /* Initialize 64 bytes only */
1959 memset(wqe, 0, sizeof(union lpfc_wqe));
1961 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
1962 atomic_inc(&ndlp->cmd_pending);
1963 lpfc_ncmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
1967 qp = &phba->sli4_hba.hdwq[idx];
1968 qp->empty_io_bufs++;
1975 * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list.
1976 * @phba: The Hba for which this call is being executed.
1977 * @lpfc_ncmd: The nvme buffer which is being released.
1979 * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba
1980 * lpfc_io_buf_list list. For SLI4 XRI's are tied to the nvme buffer
1981 * and cannot be reused for at least RA_TOV amount of time if it was
1985 lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
1987 struct lpfc_sli4_hdw_queue *qp;
1988 unsigned long iflag = 0;
1990 if ((lpfc_ncmd->flags & LPFC_SBUF_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
1991 atomic_dec(&lpfc_ncmd->ndlp->cmd_pending);
1993 lpfc_ncmd->ndlp = NULL;
1994 lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
1996 qp = lpfc_ncmd->hdwq;
1997 if (unlikely(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
1998 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1999 "6310 XB release deferred for "
2000 "ox_id x%x on reqtag x%x\n",
2001 lpfc_ncmd->cur_iocbq.sli4_xritag,
2002 lpfc_ncmd->cur_iocbq.iotag);
2004 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
2005 list_add_tail(&lpfc_ncmd->list,
2006 &qp->lpfc_abts_io_buf_list);
2007 qp->abts_nvme_io_bufs++;
2008 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
2010 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp);
2014 * lpfc_nvme_create_localport - Create/Bind an nvme localport instance.
2015 * @vport: the lpfc_vport instance requesting a localport.
2017 * This routine is invoked to create an nvme localport instance to bind
2018 * to the nvme_fc_transport. It is called once during driver load
2019 * like lpfc_create_shost after all other services are initialized.
2020 * It requires a vport, vpi, and wwns at call time. Other localport
2021 * parameters are modified as the driver's FCID and the Fabric WWN
2026 * -ENOMEM - no heap memory available
2027 * other values - from nvme registration upcall
2030 lpfc_nvme_create_localport(struct lpfc_vport *vport)
2033 struct lpfc_hba *phba = vport->phba;
2034 struct nvme_fc_port_info nfcp_info;
2035 struct nvme_fc_local_port *localport;
2036 struct lpfc_nvme_lport *lport;
2038 /* Initialize this localport instance. The vport wwn usage ensures
2039 * that NPIV is accounted for.
2041 memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info));
2042 nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR;
2043 nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
2044 nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
2046 /* We need to tell the transport layer + 1 because it takes page
2047 * alignment into account. When space for the SGL is allocated we
2048 * allocate + 3, one for cmd, one for rsp and one for this alignment
2050 lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
2052 /* Advertise how many hw queues we support based on cfg_hdw_queue,
2053 * which will not exceed cpu count.
2055 lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
2057 if (!IS_ENABLED(CONFIG_NVME_FC))
2060 /* localport is allocated from the stack, but the registration
2061 * call allocates heap memory as well as the private area.
2064 ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
2065 &vport->phba->pcidev->dev, &localport);
2067 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
2068 "6005 Successfully registered local "
2069 "NVME port num %d, localP x%px, private "
2070 "x%px, sg_seg %d\n",
2071 localport->port_num, localport,
2073 lpfc_nvme_template.max_sgl_segments);
2075 /* Private is our lport size declared in the template. */
2076 lport = (struct lpfc_nvme_lport *)localport->private;
2077 vport->localport = localport;
2078 lport->vport = vport;
2079 vport->nvmei_support = 1;
2081 atomic_set(&lport->xmt_fcp_noxri, 0);
2082 atomic_set(&lport->xmt_fcp_bad_ndlp, 0);
2083 atomic_set(&lport->xmt_fcp_qdepth, 0);
2084 atomic_set(&lport->xmt_fcp_err, 0);
2085 atomic_set(&lport->xmt_fcp_wqerr, 0);
2086 atomic_set(&lport->xmt_fcp_abort, 0);
2087 atomic_set(&lport->xmt_ls_abort, 0);
2088 atomic_set(&lport->xmt_ls_err, 0);
2089 atomic_set(&lport->cmpl_fcp_xb, 0);
2090 atomic_set(&lport->cmpl_fcp_err, 0);
2091 atomic_set(&lport->cmpl_ls_xb, 0);
2092 atomic_set(&lport->cmpl_ls_err, 0);
2094 atomic_set(&lport->fc4NvmeLsRequests, 0);
2095 atomic_set(&lport->fc4NvmeLsCmpls, 0);
2101 #if (IS_ENABLED(CONFIG_NVME_FC))
2102 /* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg.
2104 * The driver has to wait for the host nvme transport to callback
2105 * indicating the localport has successfully unregistered all
2106 * resources. Since this is an uninterruptible wait, loop every ten
2107 * seconds and print a message indicating no progress.
2109 * An uninterruptible wait is used because of the risk of transport-to-
2110 * driver state mismatch.
2113 lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
2114 struct lpfc_nvme_lport *lport,
2115 struct completion *lport_unreg_cmp)
2118 int ret, i, pending = 0;
2119 struct lpfc_sli_ring *pring;
2120 struct lpfc_hba *phba = vport->phba;
2121 struct lpfc_sli4_hdw_queue *qp;
2122 int abts_scsi, abts_nvme;
2124 /* Host transport has to clean up and confirm requiring an indefinite
2125 * wait. Print a message if a 10 second wait expires and renew the
2126 * wait. This is unexpected.
2128 wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
2130 ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
2131 if (unlikely(!ret)) {
2135 for (i = 0; i < phba->cfg_hdw_queue; i++) {
2136 qp = &phba->sli4_hba.hdwq[i];
2137 pring = qp->io_wq->pring;
2140 pending += pring->txcmplq_cnt;
2141 abts_scsi += qp->abts_scsi_io_bufs;
2142 abts_nvme += qp->abts_nvme_io_bufs;
2144 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2145 "6176 Lport x%px Localport x%px wait "
2146 "timed out. Pending %d [%d:%d]. "
2148 lport, vport->localport, pending,
2149 abts_scsi, abts_nvme);
2154 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
2155 "6177 Lport x%px Localport x%px Complete Success\n",
2156 lport, vport->localport);
2161 * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
2162 * @vport: pointer to a host virtual N_Port data structure
2164 * This routine is invoked to destroy all lports bound to the phba.
2165 * The lport memory was allocated by the nvme fc transport and is
2166 * released there. This routine ensures all rports bound to the
2167 * lport have been disconnected.
2171 lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2173 #if (IS_ENABLED(CONFIG_NVME_FC))
2174 struct nvme_fc_local_port *localport;
2175 struct lpfc_nvme_lport *lport;
2177 DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp);
2179 if (vport->nvmei_support == 0)
2182 localport = vport->localport;
2183 lport = (struct lpfc_nvme_lport *)localport->private;
2185 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2186 "6011 Destroying NVME localport x%px\n",
2189 /* lport's rport list is clear. Unregister
2190 * lport and release resources.
2192 lport->lport_unreg_cmp = &lport_unreg_cmp;
2193 ret = nvme_fc_unregister_localport(localport);
2195 /* Wait for completion. This either blocks
2196 * indefinitely or succeeds
2198 lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp);
2199 vport->localport = NULL;
2201 /* Regardless of the unregister upcall response, clear
2202 * nvmei_support. All rports are unregistered and the
2203 * driver will clean up.
2205 vport->nvmei_support = 0;
2207 lpfc_printf_vlog(vport,
2208 KERN_INFO, LOG_NVME_DISC,
2209 "6009 Unregistered lport Success\n");
2211 lpfc_printf_vlog(vport,
2212 KERN_INFO, LOG_NVME_DISC,
2213 "6010 Unregistered lport "
2214 "Failed, status x%x\n",
2221 lpfc_nvme_update_localport(struct lpfc_vport *vport)
2223 #if (IS_ENABLED(CONFIG_NVME_FC))
2224 struct nvme_fc_local_port *localport;
2225 struct lpfc_nvme_lport *lport;
2227 localport = vport->localport;
2229 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2230 "6710 Update NVME fail. No localport\n");
2233 lport = (struct lpfc_nvme_lport *)localport->private;
2235 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2236 "6171 Update NVME fail. localP x%px, No lport\n",
2240 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2241 "6012 Update NVME lport x%px did x%x\n",
2242 localport, vport->fc_myDID);
2244 localport->port_id = vport->fc_myDID;
2245 if (localport->port_id == 0)
2246 localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY;
2248 localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;
2250 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2251 "6030 bound lport x%px to DID x%06x\n",
2252 lport, localport->port_id);
2257 lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2259 #if (IS_ENABLED(CONFIG_NVME_FC))
2261 struct nvme_fc_local_port *localport;
2262 struct lpfc_nvme_lport *lport;
2263 struct lpfc_nvme_rport *rport;
2264 struct lpfc_nvme_rport *oldrport;
2265 struct nvme_fc_remote_port *remote_port;
2266 struct nvme_fc_port_info rpinfo;
2267 struct lpfc_nodelist *prev_ndlp = NULL;
2268 struct fc_rport *srport = ndlp->rport;
2270 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
2271 "6006 Register NVME PORT. DID x%06x nlptype x%x\n",
2272 ndlp->nlp_DID, ndlp->nlp_type);
2274 localport = vport->localport;
2278 lport = (struct lpfc_nvme_lport *)localport->private;
2280 /* NVME rports are not preserved across devloss.
2281 * Just register this instance. Note, rpinfo->dev_loss_tmo
2282 * is left 0 to indicate accept transport defaults. The
2283 * driver communicates port role capabilities consistent
2284 * with the PRLI response data.
2286 memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info));
2287 rpinfo.port_id = ndlp->nlp_DID;
2288 if (ndlp->nlp_type & NLP_NVME_TARGET)
2289 rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
2290 if (ndlp->nlp_type & NLP_NVME_INITIATOR)
2291 rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
2293 if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
2294 rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
2296 rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
2297 rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
2299 rpinfo.dev_loss_tmo = srport->dev_loss_tmo;
2301 rpinfo.dev_loss_tmo = vport->cfg_devloss_tmo;
2303 spin_lock_irq(&ndlp->lock);
2304 oldrport = lpfc_ndlp_get_nrport(ndlp);
2306 prev_ndlp = oldrport->ndlp;
2307 spin_unlock_irq(&ndlp->lock);
2309 spin_unlock_irq(&ndlp->lock);
2310 if (!lpfc_nlp_get(ndlp)) {
2311 dev_warn(&vport->phba->pcidev->dev,
2312 "Warning - No node ref - exit register\n");
2317 ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
2319 /* If the ndlp already has an nrport, this is just
2320 * a resume of the existing rport. Else this is a
2323 /* Guard against an unregister/reregister
2324 * race that leaves the WAIT flag set.
2326 spin_lock_irq(&ndlp->lock);
2327 ndlp->fc4_xpt_flags &= ~NLP_WAIT_FOR_UNREG;
2328 ndlp->fc4_xpt_flags |= NVME_XPT_REGD;
2329 spin_unlock_irq(&ndlp->lock);
2330 rport = remote_port->private;
2333 /* Sever the ndlp<->rport association
2334 * before dropping the ndlp ref from
2337 spin_lock_irq(&ndlp->lock);
2338 ndlp->nrport = NULL;
2339 ndlp->fc4_xpt_flags &= ~NLP_WAIT_FOR_UNREG;
2340 spin_unlock_irq(&ndlp->lock);
2342 rport->remoteport = NULL;
2344 /* Reference only removed if previous NDLP is no longer
2345 * active. It might be just a swap and removing the
2346 * reference would cause a premature cleanup.
2348 if (prev_ndlp && prev_ndlp != ndlp) {
2349 if (!prev_ndlp->nrport)
2350 lpfc_nlp_put(prev_ndlp);
2354 /* Clean bind the rport to the ndlp. */
2355 rport->remoteport = remote_port;
2356 rport->lport = lport;
2358 spin_lock_irq(&ndlp->lock);
2359 ndlp->nrport = rport;
2360 spin_unlock_irq(&ndlp->lock);
2361 lpfc_printf_vlog(vport, KERN_INFO,
2362 LOG_NVME_DISC | LOG_NODE,
2363 "6022 Bind lport x%px to remoteport x%px "
2364 "rport x%px WWNN 0x%llx, "
2365 "Rport WWPN 0x%llx DID "
2366 "x%06x Role x%x, ndlp %p prev_ndlp x%px\n",
2367 lport, remote_port, rport,
2368 rpinfo.node_name, rpinfo.port_name,
2369 rpinfo.port_id, rpinfo.port_role,
2372 lpfc_printf_vlog(vport, KERN_ERR,
2374 "6031 RemotePort Registration failed "
2375 "err: %d, DID x%06x\n",
2376 ret, ndlp->nlp_DID);
2386 * lpfc_nvme_rescan_port - Check to see if we should rescan this remoteport
2388 * If the ndlp represents an NVME Target, that we are logged into,
2389 * ping the NVME FC Transport layer to initiate a device rescan
2390 * on this remote NPort.
2393 lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2395 #if (IS_ENABLED(CONFIG_NVME_FC))
2396 struct lpfc_nvme_rport *nrport;
2397 struct nvme_fc_remote_port *remoteport = NULL;
2399 spin_lock_irq(&ndlp->lock);
2400 nrport = lpfc_ndlp_get_nrport(ndlp);
2402 remoteport = nrport->remoteport;
2403 spin_unlock_irq(&ndlp->lock);
2405 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2406 "6170 Rescan NPort DID x%06x type x%x "
2407 "state x%x nrport x%px remoteport x%px\n",
2408 ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state,
2409 nrport, remoteport);
2411 if (!nrport || !remoteport)
2414 /* Only rescan if we are an NVME target in the MAPPED state */
2415 if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY &&
2416 ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
2417 nvme_fc_rescan_remoteport(remoteport);
2419 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2420 "6172 NVME rescanned DID x%06x "
2422 ndlp->nlp_DID, remoteport->port_state);
2426 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2427 "6169 Skip NVME Rport Rescan, NVME remoteport "
2432 /* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
2434 * There is no notion of Devloss or rport recovery from the current
2435 * nvme_transport perspective. Loss of an rport just means IO cannot
2436 * be sent and recovery is completely up to the initator.
2437 * For now, the driver just unbinds the DID and port_role so that
2438 * no further IO can be issued. Changes are planned for later.
2440 * Notes - the ndlp reference count is not decremented here since
2441 * since there is no nvme_transport api for devloss. Node ref count
2442 * is only adjusted in driver unload.
2445 lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2447 #if (IS_ENABLED(CONFIG_NVME_FC))
2449 struct nvme_fc_local_port *localport;
2450 struct lpfc_nvme_lport *lport;
2451 struct lpfc_nvme_rport *rport;
2452 struct nvme_fc_remote_port *remoteport = NULL;
2454 localport = vport->localport;
2456 /* This is fundamental error. The localport is always
2457 * available until driver unload. Just exit.
2462 lport = (struct lpfc_nvme_lport *)localport->private;
2466 spin_lock_irq(&ndlp->lock);
2467 rport = lpfc_ndlp_get_nrport(ndlp);
2469 remoteport = rport->remoteport;
2470 spin_unlock_irq(&ndlp->lock);
2474 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2475 "6033 Unreg nvme remoteport x%px, portname x%llx, "
2476 "port_id x%06x, portstate x%x port type x%x "
2478 remoteport, remoteport->port_name,
2479 remoteport->port_id, remoteport->port_state,
2480 ndlp->nlp_type, kref_read(&ndlp->kref));
2482 /* Sanity check ndlp type. Only call for NVME ports. Don't
2483 * clear any rport state until the transport calls back.
2486 if (ndlp->nlp_type & NLP_NVME_TARGET) {
2487 /* No concern about the role change on the nvme remoteport.
2488 * The transport will update it.
2490 spin_lock_irq(&vport->phba->hbalock);
2491 ndlp->fc4_xpt_flags |= NLP_WAIT_FOR_UNREG;
2492 spin_unlock_irq(&vport->phba->hbalock);
2494 /* Don't let the host nvme transport keep sending keep-alives
2495 * on this remoteport. Vport is unloading, no recovery. The
2496 * return values is ignored. The upcall is a courtesy to the
2499 if (vport->load_flag & FC_UNLOADING)
2500 (void)nvme_fc_set_remoteport_devloss(remoteport, 0);
2502 ret = nvme_fc_unregister_remoteport(remoteport);
2504 /* The driver no longer knows if the nrport memory is valid.
2505 * because the controller teardown process has begun and
2506 * is asynchronous. Break the binding in the ndlp. Also
2507 * remove the register ndlp reference to setup node release.
2509 ndlp->nrport = NULL;
2512 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2513 "6167 NVME unregister failed %d "
2515 ret, remoteport->port_state);
2522 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2523 "6168 State error: lport x%px, rport x%px FCID x%06x\n",
2524 vport->localport, ndlp->rport, ndlp->nlp_DID);
2528 * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
2529 * @phba: pointer to lpfc hba data structure.
2530 * @axri: pointer to the fcp xri abort wcqe structure.
2531 * @lpfc_ncmd: The nvme job structure for the request being aborted.
2533 * This routine is invoked by the worker thread to process a SLI4 fast-path
2534 * NVME aborted xri. Aborted NVME IO commands are completed to the transport
2538 lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
2539 struct sli4_wcqe_xri_aborted *axri,
2540 struct lpfc_io_buf *lpfc_ncmd)
2542 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
2543 struct nvmefc_fcp_req *nvme_cmd = NULL;
2544 struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp;
2548 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
2550 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2551 "6311 nvme_cmd %p xri x%x tag x%x abort complete and "
2553 lpfc_ncmd->nvmeCmd, xri,
2554 lpfc_ncmd->cur_iocbq.iotag);
2556 /* Aborted NVME commands are required to not complete
2557 * before the abort exchange command fully completes.
2558 * Once completed, it is available via the put list.
2560 if (lpfc_ncmd->nvmeCmd) {
2561 nvme_cmd = lpfc_ncmd->nvmeCmd;
2562 nvme_cmd->done(nvme_cmd);
2563 lpfc_ncmd->nvmeCmd = NULL;
2565 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2569 * lpfc_nvme_wait_for_io_drain - Wait for all NVME wqes to complete
2570 * @phba: Pointer to HBA context object.
2572 * This function flushes all wqes in the nvme rings and frees all resources
2573 * in the txcmplq. This function does not issue abort wqes for the IO
2574 * commands in txcmplq, they will just be returned with
2575 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
2576 * slot has been permanently disabled.
2579 lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
2581 struct lpfc_sli_ring *pring;
2582 u32 i, wait_cnt = 0;
2584 if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq)
2587 /* Cycle through all IO rings and make sure all outstanding
2588 * WQEs have been removed from the txcmplqs.
2590 for (i = 0; i < phba->cfg_hdw_queue; i++) {
2591 if (!phba->sli4_hba.hdwq[i].io_wq)
2593 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
2598 /* Retrieve everything on the txcmplq */
2599 while (!list_empty(&pring->txcmplq)) {
2600 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
2603 /* The sleep is 10mS. Every ten seconds,
2604 * dump a message. Something is wrong.
2606 if ((wait_cnt % 1000) == 0) {
2607 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2608 "6178 NVME IO not empty, "
2609 "cnt %d\n", wait_cnt);
2614 /* Make sure HBA is alive */
2615 lpfc_issue_hb_tmo(phba);
2620 lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
2621 uint32_t stat, uint32_t param)
2623 #if (IS_ENABLED(CONFIG_NVME_FC))
2624 struct lpfc_io_buf *lpfc_ncmd;
2625 struct nvmefc_fcp_req *nCmd;
2626 struct lpfc_wcqe_complete wcqe;
2627 struct lpfc_wcqe_complete *wcqep = &wcqe;
2629 lpfc_ncmd = (struct lpfc_io_buf *)pwqeIn->context1;
2631 lpfc_sli_release_iocbq(phba, pwqeIn);
2634 /* For abort iocb just return, IO iocb will do a done call */
2635 if (bf_get(wqe_cmnd, &pwqeIn->wqe.gen_req.wqe_com) ==
2637 lpfc_sli_release_iocbq(phba, pwqeIn);
2641 spin_lock(&lpfc_ncmd->buf_lock);
2642 nCmd = lpfc_ncmd->nvmeCmd;
2644 spin_unlock(&lpfc_ncmd->buf_lock);
2645 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2648 spin_unlock(&lpfc_ncmd->buf_lock);
2650 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2651 "6194 NVME Cancel xri %x\n",
2652 lpfc_ncmd->cur_iocbq.sli4_xritag);
2655 bf_set(lpfc_wcqe_c_status, wcqep, stat);
2656 wcqep->parameter = param;
2657 wcqep->word3 = 0; /* xb is 0 */
2659 /* Call release with XB=1 to queue the IO into the abort list. */
2660 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
2661 bf_set(lpfc_wcqe_c_xb, wcqep, 1);
2663 (pwqeIn->wqe_cmpl)(phba, pwqeIn, wcqep);