1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channsel Host Bus Adapters. *
4 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
39 #include <../drivers/nvme/host/nvme.h>
40 #include <linux/nvme-fc-driver.h>
42 #include "lpfc_version.h"
46 #include "lpfc_sli4.h"
48 #include "lpfc_disc.h"
50 #include "lpfc_scsi.h"
51 #include "lpfc_nvme.h"
52 #include "lpfc_nvmet.h"
53 #include "lpfc_logmsg.h"
54 #include "lpfc_crtn.h"
55 #include "lpfc_vport.h"
56 #include "lpfc_debugfs.h"
58 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
59 struct lpfc_nvmet_rcv_ctx *,
62 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
63 struct lpfc_nvmet_rcv_ctx *);
64 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
65 struct lpfc_nvmet_rcv_ctx *,
67 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
68 struct lpfc_nvmet_rcv_ctx *,
70 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
71 struct lpfc_nvmet_rcv_ctx *,
75 lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
79 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
80 "6313 NVMET Defer ctx release xri x%x flg x%x\n",
81 ctxp->oxid, ctxp->flag);
83 spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
84 if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
85 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
89 ctxp->flag |= LPFC_NVMET_CTX_RLS;
90 list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
91 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
95 * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
96 * @phba: Pointer to HBA context object.
97 * @cmdwqe: Pointer to driver command WQE object.
98 * @wcqe: Pointer to driver response CQE object.
100 * The function is called from SLI ring event handler with no
101 * lock held. This function is the completion handler for NVME LS commands
102 * The function frees memory resources used for the NVME commands.
105 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
106 struct lpfc_wcqe_complete *wcqe)
108 struct lpfc_nvmet_tgtport *tgtp;
109 struct nvmefc_tgt_ls_req *rsp;
110 struct lpfc_nvmet_rcv_ctx *ctxp;
111 uint32_t status, result;
113 status = bf_get(lpfc_wcqe_c_status, wcqe);
114 result = wcqe->parameter;
115 if (!phba->targetport)
118 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
121 atomic_inc(&tgtp->xmt_ls_rsp_error);
123 atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
126 ctxp = cmdwqe->context2;
127 rsp = &ctxp->ctx.ls_req;
129 lpfc_nvmeio_data(phba, "NVMET LS CMPL: xri x%x stat x%x result x%x\n",
130 ctxp->oxid, status, result);
132 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
133 "6038 %s: Entrypoint: ctx %p status %x/%x\n", __func__,
134 ctxp, status, result);
136 lpfc_nlp_put(cmdwqe->context1);
137 cmdwqe->context2 = NULL;
138 cmdwqe->context3 = NULL;
139 lpfc_sli_release_iocbq(phba, cmdwqe);
145 * lpfc_nvmet_rq_post - Repost a NVMET RQ DMA buffer and clean up context
146 * @phba: HBA buffer is associated with
147 * @ctxp: context to clean up
148 * @mp: Buffer to free
150 * Description: Frees the given DMA buffer in the appropriate way given by
151 * reposting it to its associated RQ so it can be reused.
153 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
158 lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp,
159 struct lpfc_dmabuf *mp)
163 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
164 "6314 rq_post ctx xri x%x flag x%x\n",
165 ctxp->oxid, ctxp->flag);
168 pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
171 ctxp->txrdy_phys = 0;
173 ctxp->state = LPFC_NVMET_STE_FREE;
175 lpfc_rq_buf_free(phba, mp);
178 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
180 lpfc_nvmet_ktime(struct lpfc_hba *phba,
181 struct lpfc_nvmet_rcv_ctx *ctxp)
183 uint64_t seg1, seg2, seg3, seg4, seg5;
184 uint64_t seg6, seg7, seg8, seg9, seg10;
189 if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
190 !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
191 !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
192 !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
193 !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
196 if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme)
198 if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
200 if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
202 if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
204 if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
206 if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
208 if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
210 if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
212 if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
215 * Segment 1 - Time from FCP command received by MSI-X ISR
216 * to FCP command is passed to NVME Layer.
217 * Segment 2 - Time from FCP command payload handed
218 * off to NVME Layer to Driver receives a Command op
220 * Segment 3 - Time from Driver receives a Command op
221 * from NVME Layer to Command is put on WQ.
222 * Segment 4 - Time from Driver WQ put is done
223 * to MSI-X ISR for Command cmpl.
224 * Segment 5 - Time from MSI-X ISR for Command cmpl to
225 * Command cmpl is passed to NVME Layer.
226 * Segment 6 - Time from Command cmpl is passed to NVME
227 * Layer to Driver receives a RSP op from NVME Layer.
228 * Segment 7 - Time from Driver receives a RSP op from
229 * NVME Layer to WQ put is done on TRSP FCP Status.
230 * Segment 8 - Time from Driver WQ put is done on TRSP
231 * FCP Status to MSI-X ISR for TRSP cmpl.
232 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
233 * TRSP cmpl is passed to NVME Layer.
234 * Segment 10 - Time from FCP command received by
235 * MSI-X ISR to command is completed on wire.
236 * (Segments 1 thru 8) for READDATA / WRITEDATA
237 * (Segments 1 thru 4) for READDATA_RSP
239 seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
240 seg2 = (ctxp->ts_nvme_data - ctxp->ts_isr_cmd) - seg1;
241 seg3 = (ctxp->ts_data_wqput - ctxp->ts_isr_cmd) -
243 seg4 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd) -
245 seg5 = (ctxp->ts_data_nvme - ctxp->ts_isr_cmd) -
246 seg1 - seg2 - seg3 - seg4;
248 /* For auto rsp commands seg6 thru seg10 will be 0 */
249 if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
250 seg6 = (ctxp->ts_nvme_status -
252 seg1 - seg2 - seg3 - seg4 - seg5;
253 seg7 = (ctxp->ts_status_wqput -
257 seg8 = (ctxp->ts_isr_status -
259 seg1 - seg2 - seg3 - seg4 -
261 seg9 = (ctxp->ts_status_nvme -
263 seg1 - seg2 - seg3 - seg4 -
264 seg5 - seg6 - seg7 - seg8;
265 seg10 = (ctxp->ts_isr_status -
272 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
275 phba->ktime_seg1_total += seg1;
276 if (seg1 < phba->ktime_seg1_min)
277 phba->ktime_seg1_min = seg1;
278 else if (seg1 > phba->ktime_seg1_max)
279 phba->ktime_seg1_max = seg1;
281 phba->ktime_seg2_total += seg2;
282 if (seg2 < phba->ktime_seg2_min)
283 phba->ktime_seg2_min = seg2;
284 else if (seg2 > phba->ktime_seg2_max)
285 phba->ktime_seg2_max = seg2;
287 phba->ktime_seg3_total += seg3;
288 if (seg3 < phba->ktime_seg3_min)
289 phba->ktime_seg3_min = seg3;
290 else if (seg3 > phba->ktime_seg3_max)
291 phba->ktime_seg3_max = seg3;
293 phba->ktime_seg4_total += seg4;
294 if (seg4 < phba->ktime_seg4_min)
295 phba->ktime_seg4_min = seg4;
296 else if (seg4 > phba->ktime_seg4_max)
297 phba->ktime_seg4_max = seg4;
299 phba->ktime_seg5_total += seg5;
300 if (seg5 < phba->ktime_seg5_min)
301 phba->ktime_seg5_min = seg5;
302 else if (seg5 > phba->ktime_seg5_max)
303 phba->ktime_seg5_max = seg5;
305 phba->ktime_data_samples++;
309 phba->ktime_seg6_total += seg6;
310 if (seg6 < phba->ktime_seg6_min)
311 phba->ktime_seg6_min = seg6;
312 else if (seg6 > phba->ktime_seg6_max)
313 phba->ktime_seg6_max = seg6;
315 phba->ktime_seg7_total += seg7;
316 if (seg7 < phba->ktime_seg7_min)
317 phba->ktime_seg7_min = seg7;
318 else if (seg7 > phba->ktime_seg7_max)
319 phba->ktime_seg7_max = seg7;
321 phba->ktime_seg8_total += seg8;
322 if (seg8 < phba->ktime_seg8_min)
323 phba->ktime_seg8_min = seg8;
324 else if (seg8 > phba->ktime_seg8_max)
325 phba->ktime_seg8_max = seg8;
327 phba->ktime_seg9_total += seg9;
328 if (seg9 < phba->ktime_seg9_min)
329 phba->ktime_seg9_min = seg9;
330 else if (seg9 > phba->ktime_seg9_max)
331 phba->ktime_seg9_max = seg9;
333 phba->ktime_seg10_total += seg10;
334 if (seg10 < phba->ktime_seg10_min)
335 phba->ktime_seg10_min = seg10;
336 else if (seg10 > phba->ktime_seg10_max)
337 phba->ktime_seg10_max = seg10;
338 phba->ktime_status_samples++;
343 * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
344 * @phba: Pointer to HBA context object.
345 * @cmdwqe: Pointer to driver command WQE object.
346 * @wcqe: Pointer to driver response CQE object.
348 * The function is called from SLI ring event handler with no
349 * lock held. This function is the completion handler for NVME FCP commands
350 * The function frees memory resources used for the NVME commands.
353 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
354 struct lpfc_wcqe_complete *wcqe)
356 struct lpfc_nvmet_tgtport *tgtp;
357 struct nvmefc_tgt_fcp_req *rsp;
358 struct lpfc_nvmet_rcv_ctx *ctxp;
359 uint32_t status, result, op, start_clean;
360 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
364 ctxp = cmdwqe->context2;
365 ctxp->flag &= ~LPFC_NVMET_IO_INP;
367 rsp = &ctxp->ctx.fcp_req;
370 status = bf_get(lpfc_wcqe_c_status, wcqe);
371 result = wcqe->parameter;
373 if (phba->targetport)
374 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
378 lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
379 ctxp->oxid, op, status);
382 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
383 rsp->transferred_length = 0;
385 atomic_inc(&tgtp->xmt_fcp_rsp_error);
387 /* pick up SLI4 exhange busy condition */
388 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
389 ctxp->flag |= LPFC_NVMET_XBUSY;
391 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
392 "6315 IO Cmpl XBUSY: xri x%x: %x/%x\n",
393 ctxp->oxid, status, result);
395 ctxp->flag &= ~LPFC_NVMET_XBUSY;
399 rsp->fcp_error = NVME_SC_SUCCESS;
400 if (op == NVMET_FCOP_RSP)
401 rsp->transferred_length = rsp->rsplen;
403 rsp->transferred_length = rsp->transfer_length;
405 atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
408 if ((op == NVMET_FCOP_READDATA_RSP) ||
409 (op == NVMET_FCOP_RSP)) {
411 ctxp->state = LPFC_NVMET_STE_DONE;
414 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
415 if (phba->ktime_on) {
416 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
418 cmdwqe->isr_timestamp;
421 ctxp->ts_nvme_status =
423 ctxp->ts_status_wqput =
425 ctxp->ts_isr_status =
427 ctxp->ts_status_nvme =
430 ctxp->ts_isr_status =
431 cmdwqe->isr_timestamp;
432 ctxp->ts_status_nvme =
436 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
437 id = smp_processor_id();
439 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
440 "6703 CPU Check cmpl: "
441 "cpu %d expect %d\n",
443 if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
444 phba->cpucheck_cmpl_io[id]++;
448 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
450 lpfc_nvmet_ktime(phba, ctxp);
452 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
455 start_clean = offsetof(struct lpfc_iocbq, wqe);
456 memset(((char *)cmdwqe) + start_clean, 0,
457 (sizeof(struct lpfc_iocbq) - start_clean));
458 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
459 if (phba->ktime_on) {
460 ctxp->ts_isr_data = cmdwqe->isr_timestamp;
461 ctxp->ts_data_nvme = ktime_get_ns();
463 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
464 id = smp_processor_id();
466 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
467 "6704 CPU Check cmdcmpl: "
468 "cpu %d expect %d\n",
470 if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
471 phba->cpucheck_ccmpl_io[id]++;
479 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
480 struct nvmefc_tgt_ls_req *rsp)
482 struct lpfc_nvmet_rcv_ctx *ctxp =
483 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
484 struct lpfc_hba *phba = ctxp->phba;
485 struct hbq_dmabuf *nvmebuf =
486 (struct hbq_dmabuf *)ctxp->rqb_buffer;
487 struct lpfc_iocbq *nvmewqeq;
488 struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
489 struct lpfc_dmabuf dmabuf;
490 struct ulp_bde64 bpl;
493 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
494 "6023 %s: Entrypoint ctx %p %p\n", __func__,
497 nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
499 if (nvmewqeq == NULL) {
500 atomic_inc(&nvmep->xmt_ls_drop);
501 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
502 "6150 LS Drop IO x%x: Prep\n",
504 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
505 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
506 ctxp->sid, ctxp->oxid);
510 /* Save numBdes for bpl2sgl */
512 nvmewqeq->hba_wqidx = 0;
513 nvmewqeq->context3 = &dmabuf;
515 bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
516 bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
517 bpl.tus.f.bdeSize = rsp->rsplen;
518 bpl.tus.f.bdeFlags = 0;
519 bpl.tus.w = le32_to_cpu(bpl.tus.w);
521 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
522 nvmewqeq->iocb_cmpl = NULL;
523 nvmewqeq->context2 = ctxp;
525 lpfc_nvmeio_data(phba, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n",
526 ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
528 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq);
529 if (rc == WQE_SUCCESS) {
531 * Okay to repost buffer here, but wait till cmpl
532 * before freeing ctxp and iocbq.
534 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
535 ctxp->rqb_buffer = 0;
536 atomic_inc(&nvmep->xmt_ls_rsp);
539 /* Give back resources */
540 atomic_inc(&nvmep->xmt_ls_drop);
541 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
542 "6151 LS Drop IO x%x: Issue %d\n",
545 lpfc_nlp_put(nvmewqeq->context1);
547 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
548 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
553 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
554 struct nvmefc_tgt_fcp_req *rsp)
556 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
557 struct lpfc_nvmet_rcv_ctx *ctxp =
558 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
559 struct lpfc_hba *phba = ctxp->phba;
560 struct lpfc_iocbq *nvmewqeq;
563 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
564 if (phba->ktime_on) {
565 if (rsp->op == NVMET_FCOP_RSP)
566 ctxp->ts_nvme_status = ktime_get_ns();
568 ctxp->ts_nvme_data = ktime_get_ns();
570 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
571 int id = smp_processor_id();
573 if (id < LPFC_CHECK_CPU_CNT)
574 phba->cpucheck_xmt_io[id]++;
575 if (rsp->hwqid != id) {
576 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
577 "6705 CPU Check OP: "
578 "cpu %d expect %d\n",
580 ctxp->cpu = rsp->hwqid;
586 if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) ||
587 (ctxp->state == LPFC_NVMET_STE_ABORT)) {
588 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
589 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
590 "6102 IO xri x%x aborted\n",
596 nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
597 if (nvmewqeq == NULL) {
598 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
599 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
600 "6152 FCP Drop IO x%x: Prep\n",
606 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
607 nvmewqeq->iocb_cmpl = NULL;
608 nvmewqeq->context2 = ctxp;
609 nvmewqeq->iocb_flag |= LPFC_IO_NVMET;
610 ctxp->wqeq->hba_wqidx = rsp->hwqid;
612 lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
613 ctxp->oxid, rsp->op, rsp->rsplen);
615 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
616 if (rc == WQE_SUCCESS) {
617 ctxp->flag |= LPFC_NVMET_IO_INP;
618 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
621 if (rsp->op == NVMET_FCOP_RSP)
622 ctxp->ts_status_wqput = ktime_get_ns();
624 ctxp->ts_data_wqput = ktime_get_ns();
629 /* Give back resources */
630 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
631 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
632 "6153 FCP Drop IO x%x: Issue: %d\n",
635 ctxp->wqeq->hba_wqidx = 0;
636 nvmewqeq->context2 = NULL;
637 nvmewqeq->context3 = NULL;
644 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
646 struct lpfc_nvmet_tgtport *tport = targetport->private;
648 /* release any threads waiting for the unreg to complete */
649 complete(&tport->tport_unreg_done);
653 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
654 struct nvmefc_tgt_fcp_req *req)
656 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
657 struct lpfc_nvmet_rcv_ctx *ctxp =
658 container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
659 struct lpfc_hba *phba = ctxp->phba;
662 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
663 "6103 Abort op: oxri x%x flg x%x cnt %d\n",
664 ctxp->oxid, ctxp->flag, ctxp->entry_cnt);
666 lpfc_nvmeio_data(phba, "NVMET FCP ABRT: "
667 "xri x%x flg x%x cnt x%x\n",
668 ctxp->oxid, ctxp->flag, ctxp->entry_cnt);
670 atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
672 spin_lock_irqsave(&ctxp->ctxlock, flags);
674 /* Since iaab/iaar are NOT set, we need to check
675 * if the firmware is in process of aborting IO
677 if (ctxp->flag & LPFC_NVMET_XBUSY) {
678 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
681 ctxp->flag |= LPFC_NVMET_ABORT_OP;
682 if (ctxp->flag & LPFC_NVMET_IO_INP)
683 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
686 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
688 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
692 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
693 struct nvmefc_tgt_fcp_req *rsp)
695 struct lpfc_nvmet_rcv_ctx *ctxp =
696 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
697 struct lpfc_hba *phba = ctxp->phba;
699 bool aborting = false;
701 spin_lock_irqsave(&ctxp->ctxlock, flags);
702 if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
703 (ctxp->flag & LPFC_NVMET_XBUSY)) {
705 /* let the abort path do the real release */
706 lpfc_nvmet_defer_release(phba, ctxp);
708 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
710 lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d\n", ctxp->oxid,
716 lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
719 static struct nvmet_fc_target_template lpfc_tgttemplate = {
720 .targetport_delete = lpfc_nvmet_targetport_delete,
721 .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
722 .fcp_op = lpfc_nvmet_xmt_fcp_op,
723 .fcp_abort = lpfc_nvmet_xmt_fcp_abort,
724 .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
727 .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
728 .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
729 .dma_boundary = 0xFFFFFFFF,
731 /* optional features */
732 .target_features = 0,
733 /* sizes of additional private data for data structures */
734 .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
738 lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
740 struct lpfc_vport *vport = phba->pport;
741 struct lpfc_nvmet_tgtport *tgtp;
742 struct nvmet_fc_port_info pinfo;
745 if (phba->targetport)
748 memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
749 pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
750 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
751 pinfo.port_id = vport->fc_myDID;
753 /* Limit to LPFC_MAX_NVME_SEG_CNT.
754 * For now need + 1 to get around NVME transport logic.
756 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
757 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
758 "6400 Reducing sg segment cnt to %d\n",
759 LPFC_MAX_NVME_SEG_CNT);
760 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
762 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
764 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
765 lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
766 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP |
767 NVMET_FCTGTFEAT_CMD_IN_ISR |
768 NVMET_FCTGTFEAT_OPDONE_IN_ISR;
770 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
771 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
778 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
779 "6025 Cannot register NVME targetport "
781 phba->targetport = NULL;
783 tgtp = (struct lpfc_nvmet_tgtport *)
784 phba->targetport->private;
787 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
788 "6026 Registered NVME "
789 "targetport: %p, private %p "
790 "portnm %llx nodenm %llx\n",
791 phba->targetport, tgtp,
792 pinfo.port_name, pinfo.node_name);
794 atomic_set(&tgtp->rcv_ls_req_in, 0);
795 atomic_set(&tgtp->rcv_ls_req_out, 0);
796 atomic_set(&tgtp->rcv_ls_req_drop, 0);
797 atomic_set(&tgtp->xmt_ls_abort, 0);
798 atomic_set(&tgtp->xmt_ls_rsp, 0);
799 atomic_set(&tgtp->xmt_ls_drop, 0);
800 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
801 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
802 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
803 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
804 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
805 atomic_set(&tgtp->xmt_fcp_abort, 0);
806 atomic_set(&tgtp->xmt_fcp_drop, 0);
807 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
808 atomic_set(&tgtp->xmt_fcp_read, 0);
809 atomic_set(&tgtp->xmt_fcp_write, 0);
810 atomic_set(&tgtp->xmt_fcp_rsp, 0);
811 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
812 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
813 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
814 atomic_set(&tgtp->xmt_abort_rsp, 0);
815 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
816 atomic_set(&tgtp->xmt_abort_cmpl, 0);
822 lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
824 struct lpfc_vport *vport = phba->pport;
826 if (!phba->targetport)
829 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
830 "6007 Update NVMET port %p did x%x\n",
831 phba->targetport, vport->fc_myDID);
833 phba->targetport->port_id = vport->fc_myDID;
838 * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
839 * @phba: pointer to lpfc hba data structure.
840 * @axri: pointer to the nvmet xri abort wcqe structure.
842 * This routine is invoked by the worker thread to process a SLI4 fast-path
846 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
847 struct sli4_wcqe_xri_aborted *axri)
849 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
850 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
851 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
852 struct lpfc_nodelist *ndlp;
853 unsigned long iflag = 0;
855 bool released = false;
857 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
858 "6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
860 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
862 spin_lock_irqsave(&phba->hbalock, iflag);
863 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
864 list_for_each_entry_safe(ctxp, next_ctxp,
865 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
867 if (ctxp->rqb_buffer->sglq->sli4_xritag != xri)
870 /* Check if we already received a free context call
871 * and we have completed processing an abort situation.
873 if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
874 !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
875 list_del(&ctxp->list);
878 ctxp->flag &= ~LPFC_NVMET_XBUSY;
879 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
881 rrq_empty = list_empty(&phba->active_rrq_list);
882 spin_unlock_irqrestore(&phba->hbalock, iflag);
883 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
884 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
885 (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
886 ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
887 lpfc_set_rrq_active(phba, ndlp,
888 ctxp->rqb_buffer->sglq->sli4_lxritag,
890 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
893 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
894 "6318 XB aborted %x flg x%x (%x)\n",
895 ctxp->oxid, ctxp->flag, released);
897 lpfc_nvmet_rq_post(phba, ctxp,
898 &ctxp->rqb_buffer->hbuf);
900 lpfc_worker_wake_up(phba);
903 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
904 spin_unlock_irqrestore(&phba->hbalock, iflag);
908 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
909 struct fc_frame_header *fc_hdr)
912 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
913 struct lpfc_hba *phba = vport->phba;
914 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
915 struct nvmefc_tgt_fcp_req *rsp;
917 unsigned long iflag = 0;
919 xri = be16_to_cpu(fc_hdr->fh_ox_id);
921 spin_lock_irqsave(&phba->hbalock, iflag);
922 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
923 list_for_each_entry_safe(ctxp, next_ctxp,
924 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
926 if (ctxp->rqb_buffer->sglq->sli4_xritag != xri)
929 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
930 spin_unlock_irqrestore(&phba->hbalock, iflag);
932 spin_lock_irqsave(&ctxp->ctxlock, iflag);
933 ctxp->flag |= LPFC_NVMET_ABTS_RCV;
934 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
936 lpfc_nvmeio_data(phba,
937 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
938 xri, smp_processor_id(), 0);
940 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
941 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
943 rsp = &ctxp->ctx.fcp_req;
944 nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
946 /* Respond with BA_ACC accordingly */
947 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
950 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
951 spin_unlock_irqrestore(&phba->hbalock, iflag);
953 lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
954 xri, smp_processor_id(), 1);
956 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
957 "6320 NVMET Rcv ABTS:rjt xri x%x\n", xri);
959 /* Respond with BA_RJT accordingly */
960 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
966 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
968 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
969 struct lpfc_nvmet_tgtport *tgtp;
971 if (phba->nvmet_support == 0)
973 if (phba->targetport) {
974 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
975 init_completion(&tgtp->tport_unreg_done);
976 nvmet_fc_unregister_targetport(phba->targetport);
977 wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
979 phba->targetport = NULL;
984 * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
985 * @phba: pointer to lpfc hba data structure.
986 * @pring: pointer to a SLI ring.
987 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
989 * This routine is used for processing the WQE associated with a unsolicited
990 * event. It first determines whether there is an existing ndlp that matches
991 * the DID from the unsolicited WQE. If not, it will create a new one with
992 * the DID from the unsolicited WQE. The ELS command from the unsolicited
993 * WQE is then used to invoke the proper routine and to set up proper state
994 * of the discovery state machine.
997 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
998 struct hbq_dmabuf *nvmebuf)
1000 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1001 struct lpfc_nvmet_tgtport *tgtp;
1002 struct fc_frame_header *fc_hdr;
1003 struct lpfc_nvmet_rcv_ctx *ctxp;
1005 uint32_t size, oxid, sid, rc;
1007 if (!nvmebuf || !phba->targetport) {
1008 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1009 "6154 LS Drop IO\n");
1016 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1017 payload = (uint32_t *)(nvmebuf->dbuf.virt);
1018 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1019 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
1020 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1021 sid = sli4_sid_from_fc_hdr(fc_hdr);
1023 ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
1025 atomic_inc(&tgtp->rcv_ls_req_drop);
1026 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1027 "6155 LS Drop IO x%x: Alloc\n",
1030 lpfc_nvmeio_data(phba, "NVMET LS DROP: "
1031 "xri x%x sz %d from %06x\n",
1034 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1042 ctxp->state = LPFC_NVMET_STE_RCV;
1043 ctxp->rqb_buffer = (void *)nvmebuf;
1045 lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n",
1048 * The calling sequence should be:
1049 * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
1050 * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
1052 atomic_inc(&tgtp->rcv_ls_req_in);
1053 rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
1056 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1057 "6037 %s: ctx %p sz %d rc %d: %08x %08x %08x "
1058 "%08x %08x %08x\n", __func__, ctxp, size, rc,
1059 *payload, *(payload+1), *(payload+2),
1060 *(payload+3), *(payload+4), *(payload+5));
1063 atomic_inc(&tgtp->rcv_ls_req_out);
1067 lpfc_nvmeio_data(phba, "NVMET LS DROP: xri x%x sz %d from %06x\n",
1070 atomic_inc(&tgtp->rcv_ls_req_drop);
1071 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1072 "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
1075 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
1077 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1079 atomic_inc(&tgtp->xmt_ls_abort);
1080 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
1085 * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
1086 * @phba: pointer to lpfc hba data structure.
1087 * @pring: pointer to a SLI ring.
1088 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1090 * This routine is used for processing the WQE associated with a unsolicited
1091 * event. It first determines whether there is an existing ndlp that matches
1092 * the DID from the unsolicited WQE. If not, it will create a new one with
1093 * the DID from the unsolicited WQE. The ELS command from the unsolicited
1094 * WQE is then used to invoke the proper routine and to set up proper state
1095 * of the discovery state machine.
1098 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1099 struct lpfc_sli_ring *pring,
1100 struct rqb_dmabuf *nvmebuf,
1101 uint64_t isr_timestamp)
1103 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1104 struct lpfc_nvmet_rcv_ctx *ctxp;
1105 struct lpfc_nvmet_tgtport *tgtp;
1106 struct fc_frame_header *fc_hdr;
1108 uint32_t size, oxid, sid, rc;
1109 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1113 if (!nvmebuf || !phba->targetport) {
1114 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1115 "6157 FCP Drop IO\n");
1123 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1124 payload = (uint32_t *)(nvmebuf->dbuf.virt);
1125 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1126 size = nvmebuf->bytes_recv;
1127 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1128 sid = sli4_sid_from_fc_hdr(fc_hdr);
1130 ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmebuf->context;
1132 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
1133 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1134 "6158 FCP Drop IO x%x: Alloc\n",
1136 lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
1137 /* Cannot send ABTS without context */
1140 memset(ctxp, 0, sizeof(ctxp->ctx));
1148 ctxp->state = LPFC_NVMET_STE_RCV;
1149 ctxp->rqb_buffer = nvmebuf;
1150 ctxp->entry_cnt = 1;
1152 spin_lock_init(&ctxp->ctxlock);
1154 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1155 if (phba->ktime_on) {
1156 ctxp->ts_isr_cmd = isr_timestamp;
1157 ctxp->ts_cmd_nvme = ktime_get_ns();
1158 ctxp->ts_nvme_data = 0;
1159 ctxp->ts_data_wqput = 0;
1160 ctxp->ts_isr_data = 0;
1161 ctxp->ts_data_nvme = 0;
1162 ctxp->ts_nvme_status = 0;
1163 ctxp->ts_status_wqput = 0;
1164 ctxp->ts_isr_status = 0;
1165 ctxp->ts_status_nvme = 0;
1168 if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
1169 id = smp_processor_id();
1170 if (id < LPFC_CHECK_CPU_CNT)
1171 phba->cpucheck_rcv_io[id]++;
1175 lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
1176 oxid, size, smp_processor_id());
1178 atomic_inc(&tgtp->rcv_fcp_cmd_in);
1180 * The calling sequence should be:
1181 * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
1182 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
1184 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
1187 /* Process FCP command */
1189 atomic_inc(&tgtp->rcv_fcp_cmd_out);
1193 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
1194 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1195 "6159 FCP Drop IO x%x: err x%x\n",
1198 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
1201 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
1206 nvmebuf->iocbq->hba_wqidx = 0;
1207 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
1208 lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
1214 * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
1215 * @phba: pointer to lpfc hba data structure.
1216 * @pring: pointer to a SLI ring.
1217 * @nvmebuf: pointer to received nvme data structure.
1219 * This routine is used to process an unsolicited event received from a SLI
1220 * (Service Level Interface) ring. The actual processing of the data buffer
1221 * associated with the unsolicited event is done by invoking the routine
1222 * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
1223 * SLI RQ on which the unsolicited event was received.
1226 lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1227 struct lpfc_iocbq *piocb)
1229 struct lpfc_dmabuf *d_buf;
1230 struct hbq_dmabuf *nvmebuf;
1232 d_buf = piocb->context2;
1233 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
1235 if (phba->nvmet_support == 0) {
1236 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1239 lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
1243 * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
1244 * @phba: pointer to lpfc hba data structure.
1245 * @pring: pointer to a SLI ring.
1246 * @nvmebuf: pointer to received nvme data structure.
1248 * This routine is used to process an unsolicited event received from a SLI
1249 * (Service Level Interface) ring. The actual processing of the data buffer
1250 * associated with the unsolicited event is done by invoking the routine
1251 * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
1252 * SLI RQ on which the unsolicited event was received.
1255 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
1256 struct lpfc_sli_ring *pring,
1257 struct rqb_dmabuf *nvmebuf,
1258 uint64_t isr_timestamp)
1260 if (phba->nvmet_support == 0) {
1261 lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
1264 lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf,
1269 * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
1270 * @phba: pointer to a host N_Port data structure.
1271 * @ctxp: Context info for NVME LS Request
1272 * @rspbuf: DMA buffer of NVME command.
1273 * @rspsize: size of the NVME command.
1275 * This routine is used for allocating a lpfc-WQE data structure from
1276 * the driver lpfc-WQE free-list and prepare the WQE with the parameters
1277 * passed into the routine for discovery state machine to issue an Extended
1278 * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
1279 * and preparation routine that is used by all the discovery state machine
1280 * routines and the NVME command-specific fields will be later set up by
1281 * the individual discovery machine routines after calling this routine
1282 * allocating and preparing a generic WQE data structure. It fills in the
1283 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
1284 * payload and response payload (if expected). The reference count on the
1285 * ndlp is incremented by 1 and the reference to the ndlp is put into
1286 * context1 of the WQE data structure for this WQE to hold the ndlp
1287 * reference for the command's callback function to access later.
1290 * Pointer to the newly allocated/prepared nvme wqe data structure
1291 * NULL - when nvme wqe data structure allocation/preparation failed
1293 static struct lpfc_iocbq *
1294 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
1295 struct lpfc_nvmet_rcv_ctx *ctxp,
1296 dma_addr_t rspbuf, uint16_t rspsize)
1298 struct lpfc_nodelist *ndlp;
1299 struct lpfc_iocbq *nvmewqe;
1300 union lpfc_wqe *wqe;
1302 if (!lpfc_is_link_up(phba)) {
1303 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1304 "6104 lpfc_nvmet_prep_ls_wqe: link err: "
1305 "NPORT x%x oxid:x%x\n",
1306 ctxp->sid, ctxp->oxid);
1310 /* Allocate buffer for command wqe */
1311 nvmewqe = lpfc_sli_get_iocbq(phba);
1312 if (nvmewqe == NULL) {
1313 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1314 "6105 lpfc_nvmet_prep_ls_wqe: No WQE: "
1315 "NPORT x%x oxid:x%x\n",
1316 ctxp->sid, ctxp->oxid);
1320 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1321 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1322 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
1323 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
1324 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1325 "6106 lpfc_nvmet_prep_ls_wqe: No ndlp: "
1326 "NPORT x%x oxid:x%x\n",
1327 ctxp->sid, ctxp->oxid);
1328 goto nvme_wqe_free_wqeq_exit;
1330 ctxp->wqeq = nvmewqe;
1332 /* prevent preparing wqe with NULL ndlp reference */
1333 nvmewqe->context1 = lpfc_nlp_get(ndlp);
1334 if (nvmewqe->context1 == NULL)
1335 goto nvme_wqe_free_wqeq_exit;
1336 nvmewqe->context2 = ctxp;
1338 wqe = &nvmewqe->wqe;
1339 memset(wqe, 0, sizeof(union lpfc_wqe));
1342 wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1343 wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
1344 wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
1345 wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
1352 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
1353 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
1354 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
1355 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
1356 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
1359 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
1360 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1361 bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
1364 bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
1365 CMD_XMIT_SEQUENCE64_WQE);
1366 bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
1367 bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
1368 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
1371 wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
1374 bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
1375 /* Needs to be set by caller */
1376 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
1379 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
1380 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
1381 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
1382 LPFC_WQE_LENLOC_WORD12);
1383 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
1386 bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
1387 LPFC_WQE_CQ_ID_DEFAULT);
1388 bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
1392 wqe->xmit_sequence.xmit_len = rspsize;
1395 nvmewqe->vport = phba->pport;
1396 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
1397 nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
1399 /* Xmit NVME response to remote NPORT <did> */
1400 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1401 "6039 Xmit NVME LS response to remote "
1402 "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
1403 ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
1407 nvme_wqe_free_wqeq_exit:
1408 nvmewqe->context2 = NULL;
1409 nvmewqe->context3 = NULL;
1410 lpfc_sli_release_iocbq(phba, nvmewqe);
1415 static struct lpfc_iocbq *
1416 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
1417 struct lpfc_nvmet_rcv_ctx *ctxp)
1419 struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
1420 struct lpfc_nvmet_tgtport *tgtp;
1421 struct sli4_sge *sgl;
1422 struct lpfc_nodelist *ndlp;
1423 struct lpfc_iocbq *nvmewqe;
1424 struct scatterlist *sgel;
1425 union lpfc_wqe128 *wqe;
1427 dma_addr_t physaddr;
1431 if (!lpfc_is_link_up(phba)) {
1432 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1433 "6107 lpfc_nvmet_prep_fcp_wqe: link err:"
1434 "NPORT x%x oxid:x%x\n", ctxp->sid,
1439 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1440 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1441 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
1442 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
1443 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1444 "6108 lpfc_nvmet_prep_fcp_wqe: no ndlp: "
1445 "NPORT x%x oxid:x%x\n",
1446 ctxp->sid, ctxp->oxid);
1450 if (rsp->sg_cnt > phba->cfg_nvme_seg_cnt) {
1451 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1452 "6109 lpfc_nvmet_prep_fcp_wqe: seg cnt err: "
1453 "NPORT x%x oxid:x%x cnt %d\n",
1454 ctxp->sid, ctxp->oxid, phba->cfg_nvme_seg_cnt);
1458 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1459 nvmewqe = ctxp->wqeq;
1460 if (nvmewqe == NULL) {
1461 /* Allocate buffer for command wqe */
1462 nvmewqe = ctxp->rqb_buffer->iocbq;
1463 if (nvmewqe == NULL) {
1464 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1465 "6110 lpfc_nvmet_prep_fcp_wqe: No "
1466 "WQE: NPORT x%x oxid:x%x\n",
1467 ctxp->sid, ctxp->oxid);
1470 ctxp->wqeq = nvmewqe;
1471 xc = 0; /* create new XRI */
1472 nvmewqe->sli4_lxritag = NO_XRI;
1473 nvmewqe->sli4_xritag = NO_XRI;
1477 if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
1478 (ctxp->entry_cnt == 1)) ||
1479 ((ctxp->state == LPFC_NVMET_STE_DATA) &&
1480 (ctxp->entry_cnt > 1))) {
1481 wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
1483 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1484 "6111 Wrong state %s: %d cnt %d\n",
1485 __func__, ctxp->state, ctxp->entry_cnt);
1489 sgl = (struct sli4_sge *)ctxp->rqb_buffer->sglq->sgl;
1491 case NVMET_FCOP_READDATA:
1492 case NVMET_FCOP_READDATA_RSP:
1493 /* Words 0 - 2 : The first sg segment */
1495 physaddr = sg_dma_address(sgel);
1496 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1497 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
1498 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
1499 wqe->fcp_tsend.bde.addrHigh =
1500 cpu_to_le32(putPaddrHigh(physaddr));
1503 wqe->fcp_tsend.payload_offset_len = 0;
1506 wqe->fcp_tsend.relative_offset = ctxp->offset;
1511 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
1512 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1513 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
1514 nvmewqe->sli4_xritag);
1517 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
1520 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
1523 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
1524 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
1527 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
1528 bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
1529 bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
1530 bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com,
1531 LPFC_WQE_LENLOC_WORD12);
1532 bf_set(wqe_ebde_cnt, &wqe->fcp_tsend.wqe_com, 0);
1533 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, xc);
1534 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
1535 if (phba->cfg_nvme_oas)
1536 bf_set(wqe_oas, &wqe->fcp_tsend.wqe_com, 1);
1539 bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com,
1540 LPFC_WQE_CQ_ID_DEFAULT);
1541 bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com,
1545 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
1547 /* Setup 2 SKIP SGEs */
1551 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
1552 sgl->word2 = cpu_to_le32(sgl->word2);
1558 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
1559 sgl->word2 = cpu_to_le32(sgl->word2);
1562 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
1563 atomic_inc(&tgtp->xmt_fcp_read_rsp);
1564 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
1565 if ((ndlp->nlp_flag & NLP_SUPPRESS_RSP) &&
1566 (rsp->rsplen == 12)) {
1567 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 1);
1568 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
1569 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
1570 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
1572 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
1573 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
1574 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
1575 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
1576 ((rsp->rsplen >> 2) - 1));
1577 memcpy(&wqe->words[16], rsp->rspaddr,
1581 atomic_inc(&tgtp->xmt_fcp_read);
1583 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
1584 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
1585 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
1586 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
1587 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
1589 ctxp->state = LPFC_NVMET_STE_DATA;
1592 case NVMET_FCOP_WRITEDATA:
1593 /* Words 0 - 2 : The first sg segment */
1594 txrdy = pci_pool_alloc(phba->txrdy_payload_pool,
1595 GFP_KERNEL, &physaddr);
1597 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1598 "6041 Bad txrdy buffer: oxid x%x\n",
1602 ctxp->txrdy = txrdy;
1603 ctxp->txrdy_phys = physaddr;
1604 wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1605 wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
1606 wqe->fcp_treceive.bde.addrLow =
1607 cpu_to_le32(putPaddrLow(physaddr));
1608 wqe->fcp_treceive.bde.addrHigh =
1609 cpu_to_le32(putPaddrHigh(physaddr));
1612 wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
1615 wqe->fcp_treceive.relative_offset = ctxp->offset;
1620 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
1621 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1622 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
1623 nvmewqe->sli4_xritag);
1626 bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
1627 bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com,
1628 CMD_FCP_TRECEIVE64_WQE);
1631 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
1634 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
1635 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
1638 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
1639 bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
1640 bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
1641 bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com,
1642 LPFC_WQE_LENLOC_WORD12);
1643 bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, xc);
1644 bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
1645 bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
1646 bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
1647 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
1648 if (phba->cfg_nvme_oas)
1649 bf_set(wqe_oas, &wqe->fcp_treceive.wqe_com, 1);
1652 bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com,
1653 LPFC_WQE_CQ_ID_DEFAULT);
1654 bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com,
1655 FCP_COMMAND_TRECEIVE);
1656 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
1659 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
1661 /* Setup 1 TXRDY and 1 SKIP SGE */
1663 txrdy[1] = cpu_to_be32(rsp->transfer_length);
1666 sgl->addr_hi = putPaddrHigh(physaddr);
1667 sgl->addr_lo = putPaddrLow(physaddr);
1669 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
1670 sgl->word2 = cpu_to_le32(sgl->word2);
1671 sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
1676 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
1677 sgl->word2 = cpu_to_le32(sgl->word2);
1680 ctxp->state = LPFC_NVMET_STE_DATA;
1681 atomic_inc(&tgtp->xmt_fcp_write);
1684 case NVMET_FCOP_RSP:
1686 physaddr = rsp->rspdma;
1687 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1688 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
1689 wqe->fcp_trsp.bde.addrLow =
1690 cpu_to_le32(putPaddrLow(physaddr));
1691 wqe->fcp_trsp.bde.addrHigh =
1692 cpu_to_le32(putPaddrHigh(physaddr));
1695 wqe->fcp_trsp.response_len = rsp->rsplen;
1698 wqe->fcp_trsp.rsvd_4_5[0] = 0;
1704 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
1705 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1706 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
1707 nvmewqe->sli4_xritag);
1710 bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1);
1711 bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
1714 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
1717 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
1718 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
1721 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
1722 bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 0);
1723 bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_WRITE);
1724 bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com,
1725 LPFC_WQE_LENLOC_WORD3);
1726 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, xc);
1727 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
1728 if (phba->cfg_nvme_oas)
1729 bf_set(wqe_oas, &wqe->fcp_trsp.wqe_com, 1);
1732 bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com,
1733 LPFC_WQE_CQ_ID_DEFAULT);
1734 bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com,
1736 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
1737 ctxp->state = LPFC_NVMET_STE_RSP;
1739 if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
1740 /* Good response - all zero's on wire */
1741 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
1742 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
1743 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
1745 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
1746 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
1747 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
1748 ((rsp->rsplen >> 2) - 1));
1749 memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
1752 /* Use rspbuf, NOT sg list */
1755 atomic_inc(&tgtp->xmt_fcp_rsp);
1759 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1760 "6064 Unknown Rsp Op %d\n",
1766 nvmewqe->vport = phba->pport;
1767 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
1768 nvmewqe->context1 = ndlp;
1770 for (i = 0; i < rsp->sg_cnt; i++) {
1772 physaddr = sg_dma_address(sgel);
1773 cnt = sg_dma_len(sgel);
1774 sgl->addr_hi = putPaddrHigh(physaddr);
1775 sgl->addr_lo = putPaddrLow(physaddr);
1777 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
1778 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
1779 if ((i+1) == rsp->sg_cnt)
1780 bf_set(lpfc_sli4_sge_last, sgl, 1);
1781 sgl->word2 = cpu_to_le32(sgl->word2);
1782 sgl->sge_len = cpu_to_le32(cnt);
1784 ctxp->offset += cnt;
1790 * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
1791 * @phba: Pointer to HBA context object.
1792 * @cmdwqe: Pointer to driver command WQE object.
1793 * @wcqe: Pointer to driver response CQE object.
1795 * The function is called from SLI ring event handler with no
1796 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
1797 * The function frees memory resources used for the NVME commands.
1800 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1801 struct lpfc_wcqe_complete *wcqe)
1803 struct lpfc_nvmet_rcv_ctx *ctxp;
1804 struct lpfc_nvmet_tgtport *tgtp;
1805 uint32_t status, result;
1806 unsigned long flags;
1807 bool released = false;
1809 ctxp = cmdwqe->context2;
1810 status = bf_get(lpfc_wcqe_c_status, wcqe);
1811 result = wcqe->parameter;
1813 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1814 atomic_inc(&tgtp->xmt_abort_cmpl);
1816 ctxp->state = LPFC_NVMET_STE_DONE;
1818 /* Check if we already received a free context call
1819 * and we have completed processing an abort situation.
1821 spin_lock_irqsave(&ctxp->ctxlock, flags);
1822 if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
1823 !(ctxp->flag & LPFC_NVMET_XBUSY)) {
1824 list_del(&ctxp->list);
1827 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
1828 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1830 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
1831 "6165 ABORT cmpl: xri x%x flg x%x (%d) "
1832 "WCQE: %08x %08x %08x %08x\n",
1833 ctxp->oxid, ctxp->flag, released,
1834 wcqe->word0, wcqe->total_data_placed,
1835 result, wcqe->word3);
1838 * if transport has released ctx, then can reuse it. Otherwise,
1839 * will be recycled by transport release call.
1842 lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
1844 cmdwqe->context2 = NULL;
1845 cmdwqe->context3 = NULL;
1846 lpfc_sli_release_iocbq(phba, cmdwqe);
1848 /* Since iaab/iaar are NOT set, there is no work left.
1849 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
1850 * should have been called already.
1855 * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
1856 * @phba: Pointer to HBA context object.
1857 * @cmdwqe: Pointer to driver command WQE object.
1858 * @wcqe: Pointer to driver response CQE object.
1860 * The function is called from SLI ring event handler with no
1861 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
1862 * The function frees memory resources used for the NVME commands.
1865 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1866 struct lpfc_wcqe_complete *wcqe)
1868 struct lpfc_nvmet_rcv_ctx *ctxp;
1869 struct lpfc_nvmet_tgtport *tgtp;
1870 unsigned long flags;
1871 uint32_t status, result;
1872 bool released = false;
1874 ctxp = cmdwqe->context2;
1875 status = bf_get(lpfc_wcqe_c_status, wcqe);
1876 result = wcqe->parameter;
1878 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1879 atomic_inc(&tgtp->xmt_abort_cmpl);
1882 /* if context is clear, related io alrady complete */
1883 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1884 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
1885 wcqe->word0, wcqe->total_data_placed,
1886 result, wcqe->word3);
1891 if (ctxp->state != LPFC_NVMET_STE_ABORT) {
1892 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
1893 "6112 ABTS Wrong state:%d oxid x%x\n",
1894 ctxp->state, ctxp->oxid);
1897 /* Check if we already received a free context call
1898 * and we have completed processing an abort situation.
1900 ctxp->state = LPFC_NVMET_STE_DONE;
1901 spin_lock_irqsave(&ctxp->ctxlock, flags);
1902 if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
1903 !(ctxp->flag & LPFC_NVMET_XBUSY)) {
1904 list_del(&ctxp->list);
1907 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
1908 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1910 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1911 "6316 ABTS cmpl xri x%x flg x%x (%x) "
1912 "WCQE: %08x %08x %08x %08x\n",
1913 ctxp->oxid, ctxp->flag, released,
1914 wcqe->word0, wcqe->total_data_placed,
1915 result, wcqe->word3);
1917 * if transport has released ctx, then can reuse it. Otherwise,
1918 * will be recycled by transport release call.
1921 lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
1923 cmdwqe->context2 = NULL;
1924 cmdwqe->context3 = NULL;
1926 /* Since iaab/iaar are NOT set, there is no work left.
1927 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
1928 * should have been called already.
1933 * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
1934 * @phba: Pointer to HBA context object.
1935 * @cmdwqe: Pointer to driver command WQE object.
1936 * @wcqe: Pointer to driver response CQE object.
1938 * The function is called from SLI ring event handler with no
1939 * lock held. This function is the completion handler for NVME ABTS for LS cmds
1940 * The function frees memory resources used for the NVME commands.
1943 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1944 struct lpfc_wcqe_complete *wcqe)
1946 struct lpfc_nvmet_rcv_ctx *ctxp;
1947 struct lpfc_nvmet_tgtport *tgtp;
1948 uint32_t status, result;
1950 ctxp = cmdwqe->context2;
1951 status = bf_get(lpfc_wcqe_c_status, wcqe);
1952 result = wcqe->parameter;
1954 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1955 atomic_inc(&tgtp->xmt_abort_cmpl);
1957 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1958 "6083 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n",
1959 ctxp, wcqe->word0, wcqe->total_data_placed,
1960 result, wcqe->word3);
1963 cmdwqe->context2 = NULL;
1964 cmdwqe->context3 = NULL;
1965 lpfc_sli_release_iocbq(phba, cmdwqe);
1968 lpfc_sli_release_iocbq(phba, cmdwqe);
1972 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
1973 struct lpfc_nvmet_rcv_ctx *ctxp,
1974 uint32_t sid, uint16_t xri)
1976 struct lpfc_nvmet_tgtport *tgtp;
1977 struct lpfc_iocbq *abts_wqeq;
1978 union lpfc_wqe *wqe_abts;
1979 struct lpfc_nodelist *ndlp;
1981 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1982 "6067 ABTS: sid %x xri x%x/x%x\n",
1983 sid, xri, ctxp->wqeq->sli4_xritag);
1985 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1987 ctxp->wqeq = ctxp->rqb_buffer->iocbq;
1988 ctxp->wqeq->hba_wqidx = 0;
1991 ndlp = lpfc_findnode_did(phba->pport, sid);
1992 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1993 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
1994 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
1995 atomic_inc(&tgtp->xmt_abort_rsp_error);
1996 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
1997 "6134 Drop ABTS - wrong NDLP state x%x.\n",
1998 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
2000 /* No failure to an ABTS request. */
2004 abts_wqeq = ctxp->wqeq;
2005 wqe_abts = &abts_wqeq->wqe;
2006 ctxp->state = LPFC_NVMET_STE_ABORT;
2009 * Since we zero the whole WQE, we need to ensure we set the WQE fields
2010 * that were initialized in lpfc_sli4_nvmet_alloc.
2012 memset(wqe_abts, 0, sizeof(union lpfc_wqe));
2015 bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
2016 bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
2017 bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
2018 bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
2019 bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
2022 bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
2023 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2024 bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
2025 abts_wqeq->sli4_xritag);
2028 bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
2029 CMD_XMIT_SEQUENCE64_WQE);
2030 bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
2031 bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
2032 bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
2035 wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
2038 bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
2039 /* Needs to be set by caller */
2040 bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
2043 bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
2044 bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2045 bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
2046 LPFC_WQE_LENLOC_WORD12);
2047 bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
2048 bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
2051 bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
2052 LPFC_WQE_CQ_ID_DEFAULT);
2053 bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
2056 abts_wqeq->vport = phba->pport;
2057 abts_wqeq->context1 = ndlp;
2058 abts_wqeq->context2 = ctxp;
2059 abts_wqeq->context3 = NULL;
2060 abts_wqeq->rsvd2 = 0;
2061 /* hba_wqidx should already be setup from command we are aborting */
2062 abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
2063 abts_wqeq->iocb.ulpLe = 1;
2065 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2066 "6069 Issue ABTS to xri x%x reqtag x%x\n",
2067 xri, abts_wqeq->iotag);
2072 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
2073 struct lpfc_nvmet_rcv_ctx *ctxp,
2074 uint32_t sid, uint16_t xri)
2076 struct lpfc_nvmet_tgtport *tgtp;
2077 struct lpfc_iocbq *abts_wqeq;
2078 union lpfc_wqe *abts_wqe;
2079 struct lpfc_nodelist *ndlp;
2080 unsigned long flags;
2083 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2085 ctxp->wqeq = ctxp->rqb_buffer->iocbq;
2086 ctxp->wqeq->hba_wqidx = 0;
2089 ndlp = lpfc_findnode_did(phba->pport, sid);
2090 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2091 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2092 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2093 atomic_inc(&tgtp->xmt_abort_rsp_error);
2094 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
2095 "6160 Drop ABORT - wrong NDLP state x%x.\n",
2096 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
2098 /* No failure to an ABTS request. */
2099 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2103 /* Issue ABTS for this WQE based on iotag */
2104 ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
2105 if (!ctxp->abort_wqeq) {
2106 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
2107 "6161 ABORT failed: No wqeqs: "
2108 "xri: x%x\n", ctxp->oxid);
2109 /* No failure to an ABTS request. */
2110 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2113 abts_wqeq = ctxp->abort_wqeq;
2114 abts_wqe = &abts_wqeq->wqe;
2115 ctxp->state = LPFC_NVMET_STE_ABORT;
2117 /* Announce entry to new IO submit field. */
2118 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2119 "6162 ABORT Request to rport DID x%06x "
2120 "for xri x%x x%x\n",
2121 ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
2123 /* If the hba is getting reset, this flag is set. It is
2124 * cleared when the reset is complete and rings reestablished.
2126 spin_lock_irqsave(&phba->hbalock, flags);
2127 /* driver queued commands are in process of being flushed */
2128 if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
2129 spin_unlock_irqrestore(&phba->hbalock, flags);
2130 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2131 "6163 Driver in reset cleanup - flushing "
2132 "NVME Req now. hba_flag x%x oxid x%x\n",
2133 phba->hba_flag, ctxp->oxid);
2134 lpfc_sli_release_iocbq(phba, abts_wqeq);
2135 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2139 /* Outstanding abort is in progress */
2140 if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
2141 spin_unlock_irqrestore(&phba->hbalock, flags);
2142 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2143 "6164 Outstanding NVME I/O Abort Request "
2144 "still pending on oxid x%x\n",
2146 lpfc_sli_release_iocbq(phba, abts_wqeq);
2147 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2151 /* Ready - mark outstanding as aborted by driver. */
2152 abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
2154 /* WQEs are reused. Clear stale data and set key fields to
2155 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
2157 memset(abts_wqe, 0, sizeof(union lpfc_wqe));
2160 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
2163 bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
2164 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
2166 /* word 8 - tell the FW to abort the IO associated with this
2167 * outstanding exchange ID.
2169 abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
2171 /* word 9 - this is the iotag for the abts_wqe completion. */
2172 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
2176 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
2177 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
2180 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
2181 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
2182 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
2184 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
2185 abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
2186 abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
2187 abts_wqeq->iocb_cmpl = 0;
2188 abts_wqeq->iocb_flag |= LPFC_IO_NVME;
2189 abts_wqeq->context2 = ctxp;
2190 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
2191 spin_unlock_irqrestore(&phba->hbalock, flags);
2192 if (rc == WQE_SUCCESS)
2195 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2196 lpfc_sli_release_iocbq(phba, abts_wqeq);
2197 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2198 "6166 Failed ABORT issue_wqe with status x%x "
2206 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
2207 struct lpfc_nvmet_rcv_ctx *ctxp,
2208 uint32_t sid, uint16_t xri)
2210 struct lpfc_nvmet_tgtport *tgtp;
2211 struct lpfc_iocbq *abts_wqeq;
2212 unsigned long flags;
2215 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2217 ctxp->wqeq = ctxp->rqb_buffer->iocbq;
2218 ctxp->wqeq->hba_wqidx = 0;
2221 rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
2225 spin_lock_irqsave(&phba->hbalock, flags);
2226 abts_wqeq = ctxp->wqeq;
2227 abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
2228 abts_wqeq->iocb_cmpl = NULL;
2229 abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
2230 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
2231 spin_unlock_irqrestore(&phba->hbalock, flags);
2232 if (rc == WQE_SUCCESS) {
2233 atomic_inc(&tgtp->xmt_abort_rsp);
2238 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2239 atomic_inc(&tgtp->xmt_abort_rsp_error);
2240 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
2241 "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
2247 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
2248 struct lpfc_nvmet_rcv_ctx *ctxp,
2249 uint32_t sid, uint16_t xri)
2251 struct lpfc_nvmet_tgtport *tgtp;
2252 struct lpfc_iocbq *abts_wqeq;
2253 union lpfc_wqe *wqe_abts;
2254 unsigned long flags;
2257 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2259 /* Issue ABTS for this WQE based on iotag */
2260 ctxp->wqeq = lpfc_sli_get_iocbq(phba);
2262 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
2263 "6068 Abort failed: No wqeqs: "
2265 /* No failure to an ABTS request. */
2270 abts_wqeq = ctxp->wqeq;
2271 wqe_abts = &abts_wqeq->wqe;
2272 lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
2274 spin_lock_irqsave(&phba->hbalock, flags);
2275 abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
2276 abts_wqeq->iocb_cmpl = 0;
2277 abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
2278 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq);
2279 spin_unlock_irqrestore(&phba->hbalock, flags);
2280 if (rc == WQE_SUCCESS) {
2281 atomic_inc(&tgtp->xmt_abort_rsp);
2285 atomic_inc(&tgtp->xmt_abort_rsp_error);
2286 abts_wqeq->context2 = NULL;
2287 abts_wqeq->context3 = NULL;
2288 lpfc_sli_release_iocbq(phba, abts_wqeq);
2290 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
2291 "6056 Failed to Issue ABTS. Status x%x\n", rc);