1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
39 #include "lpfc_version.h"
43 #include "lpfc_sli4.h"
45 #include "lpfc_disc.h"
47 #include "lpfc_scsi.h"
48 #include "lpfc_nvme.h"
49 #include "lpfc_logmsg.h"
50 #include "lpfc_crtn.h"
51 #include "lpfc_vport.h"
52 #include "lpfc_debugfs.h"
54 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
55 struct lpfc_async_xchg_ctx *,
58 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
59 struct lpfc_async_xchg_ctx *);
60 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
61 struct lpfc_async_xchg_ctx *,
63 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
64 struct lpfc_async_xchg_ctx *,
66 static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
67 struct lpfc_async_xchg_ctx *);
68 static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *);
70 static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf);
72 static union lpfc_wqe128 lpfc_tsend_cmd_template;
73 static union lpfc_wqe128 lpfc_treceive_cmd_template;
74 static union lpfc_wqe128 lpfc_trsp_cmd_template;
76 /* Setup WQE templates for NVME IOs */
78 lpfc_nvmet_cmd_template(void)
80 union lpfc_wqe128 *wqe;
83 wqe = &lpfc_tsend_cmd_template;
84 memset(wqe, 0, sizeof(union lpfc_wqe128));
86 /* Word 0, 1, 2 - BDE is variable */
88 /* Word 3 - payload_offset_len is zero */
90 /* Word 4 - relative_offset is variable */
92 /* Word 5 - is zero */
94 /* Word 6 - ctxt_tag, xri_tag is variable */
96 /* Word 7 - wqe_ar is variable */
97 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
98 bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF);
99 bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3);
100 bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI);
101 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
103 /* Word 8 - abort_tag is variable */
105 /* Word 9 - reqtag, rcvoxid is variable */
107 /* Word 10 - wqes, xc is variable */
108 bf_set(wqe_xchg, &wqe->fcp_tsend.wqe_com, LPFC_NVME_XCHG);
109 bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
110 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
111 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
112 bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
113 bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12);
115 /* Word 11 - sup, irsp, irsplen is variable */
116 bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND);
117 bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
118 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
119 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
120 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
121 bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0);
123 /* Word 12 - fcp_data_len is variable */
125 /* Word 13, 14, 15 - PBDE is zero */
127 /* TRECEIVE template */
128 wqe = &lpfc_treceive_cmd_template;
129 memset(wqe, 0, sizeof(union lpfc_wqe128));
131 /* Word 0, 1, 2 - BDE is variable */
134 wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
136 /* Word 4 - relative_offset is variable */
138 /* Word 5 - is zero */
140 /* Word 6 - ctxt_tag, xri_tag is variable */
143 bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE);
144 bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF);
145 bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3);
146 bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI);
147 bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
149 /* Word 8 - abort_tag is variable */
151 /* Word 9 - reqtag, rcvoxid is variable */
153 /* Word 10 - xc is variable */
154 bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
155 bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
156 bf_set(wqe_xchg, &wqe->fcp_treceive.wqe_com, LPFC_NVME_XCHG);
157 bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
158 bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
159 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
161 /* Word 11 - pbde is variable */
162 bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE);
163 bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
164 bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0);
165 bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
166 bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
167 bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1);
169 /* Word 12 - fcp_data_len is variable */
171 /* Word 13, 14, 15 - PBDE is variable */
174 wqe = &lpfc_trsp_cmd_template;
175 memset(wqe, 0, sizeof(union lpfc_wqe128));
177 /* Word 0, 1, 2 - BDE is variable */
179 /* Word 3 - response_len is variable */
181 /* Word 4, 5 - is zero */
183 /* Word 6 - ctxt_tag, xri_tag is variable */
186 bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
187 bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED);
188 bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3);
189 bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI);
190 bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); /* wqe_ar */
192 /* Word 8 - abort_tag is variable */
194 /* Word 9 - reqtag is variable */
196 /* Word 10 wqes, xc is variable */
197 bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1);
198 bf_set(wqe_xchg, &wqe->fcp_trsp.wqe_com, LPFC_NVME_XCHG);
199 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
200 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0);
201 bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE);
202 bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3);
204 /* Word 11 irsp, irsplen is variable */
205 bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP);
206 bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
207 bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0);
208 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
209 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
210 bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0);
212 /* Word 12, 13, 14, 15 - is zero */
215 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
216 static struct lpfc_async_xchg_ctx *
217 lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri)
219 struct lpfc_async_xchg_ctx *ctxp;
223 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
224 list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
225 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
231 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
238 static struct lpfc_async_xchg_ctx *
239 lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid)
241 struct lpfc_async_xchg_ctx *ctxp;
245 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
246 list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
247 if (ctxp->oxid != oxid || ctxp->sid != sid)
253 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
262 lpfc_nvmet_defer_release(struct lpfc_hba *phba,
263 struct lpfc_async_xchg_ctx *ctxp)
265 lockdep_assert_held(&ctxp->ctxlock);
267 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
268 "6313 NVMET Defer ctx release oxid x%x flg x%x\n",
269 ctxp->oxid, ctxp->flag);
271 if (ctxp->flag & LPFC_NVME_CTX_RLS)
274 ctxp->flag |= LPFC_NVME_CTX_RLS;
275 spin_lock(&phba->sli4_hba.t_active_list_lock);
276 list_del(&ctxp->list);
277 spin_unlock(&phba->sli4_hba.t_active_list_lock);
278 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
279 list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
280 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
284 * __lpfc_nvme_xmt_ls_rsp_cmp - Generic completion handler for the
285 * transmission of an NVME LS response.
286 * @phba: Pointer to HBA context object.
287 * @cmdwqe: Pointer to driver command WQE object.
288 * @wcqe: Pointer to driver response CQE object.
290 * The function is called from SLI ring event handler with no
291 * lock held. The function frees memory resources used for the command
292 * used to send the NVME LS RSP.
295 __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
296 struct lpfc_wcqe_complete *wcqe)
298 struct lpfc_async_xchg_ctx *axchg = cmdwqe->context2;
299 struct nvmefc_ls_rsp *ls_rsp = &axchg->ls_rsp;
300 uint32_t status, result;
302 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
303 result = wcqe->parameter;
305 if (axchg->state != LPFC_NVME_STE_LS_RSP || axchg->entry_cnt != 2) {
306 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
307 "6410 NVMEx LS cmpl state mismatch IO x%x: "
309 axchg->oxid, axchg->state, axchg->entry_cnt);
312 lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x result x%x\n",
313 axchg->oxid, status, result);
315 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
316 "6038 NVMEx LS rsp cmpl: %d %d oxid x%x\n",
317 status, result, axchg->oxid);
319 lpfc_nlp_put(cmdwqe->context1);
320 cmdwqe->context2 = NULL;
321 cmdwqe->context3 = NULL;
322 lpfc_sli_release_iocbq(phba, cmdwqe);
323 ls_rsp->done(ls_rsp);
324 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
325 "6200 NVMEx LS rsp cmpl done status %d oxid x%x\n",
326 status, axchg->oxid);
331 * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
332 * @phba: Pointer to HBA context object.
333 * @cmdwqe: Pointer to driver command WQE object.
334 * @wcqe: Pointer to driver response CQE object.
336 * The function is called from SLI ring event handler with no
337 * lock held. This function is the completion handler for NVME LS commands
338 * The function updates any states and statistics, then calls the
339 * generic completion handler to free resources.
342 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
343 struct lpfc_wcqe_complete *wcqe)
345 struct lpfc_nvmet_tgtport *tgtp;
346 uint32_t status, result;
348 if (!phba->targetport)
351 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
352 result = wcqe->parameter;
354 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
357 atomic_inc(&tgtp->xmt_ls_rsp_error);
358 if (result == IOERR_ABORT_REQUESTED)
359 atomic_inc(&tgtp->xmt_ls_rsp_aborted);
360 if (bf_get(lpfc_wcqe_c_xb, wcqe))
361 atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
363 atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
368 __lpfc_nvme_xmt_ls_rsp_cmp(phba, cmdwqe, wcqe);
372 * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
373 * @phba: HBA buffer is associated with
374 * @ctx_buf: ctx buffer context
376 * Description: Frees the given DMA buffer in the appropriate way given by
377 * reposting it to its associated RQ so it can be reused.
379 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
384 lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
386 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
387 struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
388 struct lpfc_nvmet_tgtport *tgtp;
389 struct fc_frame_header *fc_hdr;
390 struct rqb_dmabuf *nvmebuf;
391 struct lpfc_nvmet_ctx_info *infop;
392 uint32_t size, oxid, sid;
396 if (ctxp->state == LPFC_NVME_STE_FREE) {
397 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
398 "6411 NVMET free, already free IO x%x: %d %d\n",
399 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
402 if (ctxp->rqb_buffer) {
403 spin_lock_irqsave(&ctxp->ctxlock, iflag);
404 nvmebuf = ctxp->rqb_buffer;
405 /* check if freed in another path whilst acquiring lock */
407 ctxp->rqb_buffer = NULL;
408 if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
409 ctxp->flag &= ~LPFC_NVME_CTX_REUSE_WQ;
410 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
411 nvmebuf->hrq->rqbp->rqb_free_buffer(phba,
414 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
416 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
419 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
422 ctxp->state = LPFC_NVME_STE_FREE;
424 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
425 if (phba->sli4_hba.nvmet_io_wait_cnt) {
426 list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
427 nvmebuf, struct rqb_dmabuf,
429 phba->sli4_hba.nvmet_io_wait_cnt--;
430 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
433 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
434 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
435 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
436 size = nvmebuf->bytes_recv;
437 sid = sli4_sid_from_fc_hdr(fc_hdr);
439 ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
446 ctxp->state = LPFC_NVME_STE_RCV;
449 ctxp->ctxbuf = ctx_buf;
450 ctxp->rqb_buffer = (void *)nvmebuf;
451 spin_lock_init(&ctxp->ctxlock);
453 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
454 /* NOTE: isr time stamp is stale when context is re-assigned*/
455 if (ctxp->ts_isr_cmd) {
456 ctxp->ts_cmd_nvme = 0;
457 ctxp->ts_nvme_data = 0;
458 ctxp->ts_data_wqput = 0;
459 ctxp->ts_isr_data = 0;
460 ctxp->ts_data_nvme = 0;
461 ctxp->ts_nvme_status = 0;
462 ctxp->ts_status_wqput = 0;
463 ctxp->ts_isr_status = 0;
464 ctxp->ts_status_nvme = 0;
467 atomic_inc(&tgtp->rcv_fcp_cmd_in);
469 /* Indicate that a replacement buffer has been posted */
470 spin_lock_irqsave(&ctxp->ctxlock, iflag);
471 ctxp->flag |= LPFC_NVME_CTX_REUSE_WQ;
472 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
474 if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
475 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
476 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
477 "6181 Unable to queue deferred work "
479 "FCP Drop IO [x%x x%x x%x]\n",
481 atomic_read(&tgtp->rcv_fcp_cmd_in),
482 atomic_read(&tgtp->rcv_fcp_cmd_out),
483 atomic_read(&tgtp->xmt_fcp_release));
485 spin_lock_irqsave(&ctxp->ctxlock, iflag);
486 lpfc_nvmet_defer_release(phba, ctxp);
487 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
488 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
492 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
495 * Use the CPU context list, from the MRQ the IO was received on
496 * (ctxp->idx), to save context structure.
498 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
499 list_del_init(&ctxp->list);
500 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
501 cpu = raw_smp_processor_id();
502 infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
503 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
504 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
505 infop->nvmet_ctx_list_cnt++;
506 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
510 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
512 lpfc_nvmet_ktime(struct lpfc_hba *phba,
513 struct lpfc_async_xchg_ctx *ctxp)
515 uint64_t seg1, seg2, seg3, seg4, seg5;
516 uint64_t seg6, seg7, seg8, seg9, seg10;
519 if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
520 !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
521 !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
522 !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
523 !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
526 if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
528 if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme)
530 if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
532 if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
534 if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
536 if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
538 if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
540 if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
542 if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
544 if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
547 * Segment 1 - Time from FCP command received by MSI-X ISR
548 * to FCP command is passed to NVME Layer.
549 * Segment 2 - Time from FCP command payload handed
550 * off to NVME Layer to Driver receives a Command op
552 * Segment 3 - Time from Driver receives a Command op
553 * from NVME Layer to Command is put on WQ.
554 * Segment 4 - Time from Driver WQ put is done
555 * to MSI-X ISR for Command cmpl.
556 * Segment 5 - Time from MSI-X ISR for Command cmpl to
557 * Command cmpl is passed to NVME Layer.
558 * Segment 6 - Time from Command cmpl is passed to NVME
559 * Layer to Driver receives a RSP op from NVME Layer.
560 * Segment 7 - Time from Driver receives a RSP op from
561 * NVME Layer to WQ put is done on TRSP FCP Status.
562 * Segment 8 - Time from Driver WQ put is done on TRSP
563 * FCP Status to MSI-X ISR for TRSP cmpl.
564 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
565 * TRSP cmpl is passed to NVME Layer.
566 * Segment 10 - Time from FCP command received by
567 * MSI-X ISR to command is completed on wire.
568 * (Segments 1 thru 8) for READDATA / WRITEDATA
569 * (Segments 1 thru 4) for READDATA_RSP
571 seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
574 seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
580 seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
586 seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
592 seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
599 /* For auto rsp commands seg6 thru seg10 will be 0 */
600 if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
601 seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
607 seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
613 seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
619 seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
625 if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
627 seg10 = (ctxp->ts_isr_status -
630 if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
636 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
639 phba->ktime_seg1_total += seg1;
640 if (seg1 < phba->ktime_seg1_min)
641 phba->ktime_seg1_min = seg1;
642 else if (seg1 > phba->ktime_seg1_max)
643 phba->ktime_seg1_max = seg1;
645 phba->ktime_seg2_total += seg2;
646 if (seg2 < phba->ktime_seg2_min)
647 phba->ktime_seg2_min = seg2;
648 else if (seg2 > phba->ktime_seg2_max)
649 phba->ktime_seg2_max = seg2;
651 phba->ktime_seg3_total += seg3;
652 if (seg3 < phba->ktime_seg3_min)
653 phba->ktime_seg3_min = seg3;
654 else if (seg3 > phba->ktime_seg3_max)
655 phba->ktime_seg3_max = seg3;
657 phba->ktime_seg4_total += seg4;
658 if (seg4 < phba->ktime_seg4_min)
659 phba->ktime_seg4_min = seg4;
660 else if (seg4 > phba->ktime_seg4_max)
661 phba->ktime_seg4_max = seg4;
663 phba->ktime_seg5_total += seg5;
664 if (seg5 < phba->ktime_seg5_min)
665 phba->ktime_seg5_min = seg5;
666 else if (seg5 > phba->ktime_seg5_max)
667 phba->ktime_seg5_max = seg5;
669 phba->ktime_data_samples++;
673 phba->ktime_seg6_total += seg6;
674 if (seg6 < phba->ktime_seg6_min)
675 phba->ktime_seg6_min = seg6;
676 else if (seg6 > phba->ktime_seg6_max)
677 phba->ktime_seg6_max = seg6;
679 phba->ktime_seg7_total += seg7;
680 if (seg7 < phba->ktime_seg7_min)
681 phba->ktime_seg7_min = seg7;
682 else if (seg7 > phba->ktime_seg7_max)
683 phba->ktime_seg7_max = seg7;
685 phba->ktime_seg8_total += seg8;
686 if (seg8 < phba->ktime_seg8_min)
687 phba->ktime_seg8_min = seg8;
688 else if (seg8 > phba->ktime_seg8_max)
689 phba->ktime_seg8_max = seg8;
691 phba->ktime_seg9_total += seg9;
692 if (seg9 < phba->ktime_seg9_min)
693 phba->ktime_seg9_min = seg9;
694 else if (seg9 > phba->ktime_seg9_max)
695 phba->ktime_seg9_max = seg9;
697 phba->ktime_seg10_total += seg10;
698 if (seg10 < phba->ktime_seg10_min)
699 phba->ktime_seg10_min = seg10;
700 else if (seg10 > phba->ktime_seg10_max)
701 phba->ktime_seg10_max = seg10;
702 phba->ktime_status_samples++;
707 * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
708 * @phba: Pointer to HBA context object.
709 * @cmdwqe: Pointer to driver command WQE object.
710 * @wcqe: Pointer to driver response CQE object.
712 * The function is called from SLI ring event handler with no
713 * lock held. This function is the completion handler for NVME FCP commands
714 * The function frees memory resources used for the NVME commands.
717 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
718 struct lpfc_wcqe_complete *wcqe)
720 struct lpfc_nvmet_tgtport *tgtp;
721 struct nvmefc_tgt_fcp_req *rsp;
722 struct lpfc_async_xchg_ctx *ctxp;
723 uint32_t status, result, op, start_clean, logerr;
724 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
728 ctxp = cmdwqe->context2;
729 ctxp->flag &= ~LPFC_NVME_IO_INP;
731 rsp = &ctxp->hdlrctx.fcp_req;
734 status = bf_get(lpfc_wcqe_c_status, wcqe);
735 result = wcqe->parameter;
737 if (phba->targetport)
738 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
742 lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
743 ctxp->oxid, op, status);
746 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
747 rsp->transferred_length = 0;
749 atomic_inc(&tgtp->xmt_fcp_rsp_error);
750 if (result == IOERR_ABORT_REQUESTED)
751 atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
754 logerr = LOG_NVME_IOERR;
756 /* pick up SLI4 exhange busy condition */
757 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
758 ctxp->flag |= LPFC_NVME_XBUSY;
759 logerr |= LOG_NVME_ABTS;
761 atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
764 ctxp->flag &= ~LPFC_NVME_XBUSY;
767 lpfc_printf_log(phba, KERN_INFO, logerr,
768 "6315 IO Error Cmpl oxid: x%x xri: x%x %x/%x "
770 ctxp->oxid, ctxp->ctxbuf->sglq->sli4_xritag,
771 status, result, ctxp->flag);
774 rsp->fcp_error = NVME_SC_SUCCESS;
775 if (op == NVMET_FCOP_RSP)
776 rsp->transferred_length = rsp->rsplen;
778 rsp->transferred_length = rsp->transfer_length;
780 atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
783 if ((op == NVMET_FCOP_READDATA_RSP) ||
784 (op == NVMET_FCOP_RSP)) {
786 ctxp->state = LPFC_NVME_STE_DONE;
789 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
790 if (ctxp->ts_cmd_nvme) {
791 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
793 cmdwqe->isr_timestamp;
796 ctxp->ts_nvme_status =
798 ctxp->ts_status_wqput =
800 ctxp->ts_isr_status =
802 ctxp->ts_status_nvme =
805 ctxp->ts_isr_status =
806 cmdwqe->isr_timestamp;
807 ctxp->ts_status_nvme =
813 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
814 if (ctxp->ts_cmd_nvme)
815 lpfc_nvmet_ktime(phba, ctxp);
817 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
820 start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
821 memset(((char *)cmdwqe) + start_clean, 0,
822 (sizeof(struct lpfc_iocbq) - start_clean));
823 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
824 if (ctxp->ts_cmd_nvme) {
825 ctxp->ts_isr_data = cmdwqe->isr_timestamp;
826 ctxp->ts_data_nvme = ktime_get_ns();
831 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
832 if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
833 id = raw_smp_processor_id();
834 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
836 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
837 "6704 CPU Check cmdcmpl: "
838 "cpu %d expect %d\n",
845 * __lpfc_nvme_xmt_ls_rsp - Generic service routine to issue transmit
846 * an NVME LS rsp for a prior NVME LS request that was received.
847 * @axchg: pointer to exchange context for the NVME LS request the response
849 * @ls_rsp: pointer to the transport LS RSP that is to be sent
850 * @xmt_ls_rsp_cmp: completion routine to call upon RSP transmit done
852 * This routine is used to format and send a WQE to transmit a NVME LS
853 * Response. The response is for a prior NVME LS request that was
854 * received and posted to the transport.
857 * 0 : if response successfully transmit
858 * non-zero : if response failed to transmit, of the form -Exxx.
861 __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
862 struct nvmefc_ls_rsp *ls_rsp,
863 void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba,
864 struct lpfc_iocbq *cmdwqe,
865 struct lpfc_wcqe_complete *wcqe))
867 struct lpfc_hba *phba = axchg->phba;
868 struct hbq_dmabuf *nvmebuf = (struct hbq_dmabuf *)axchg->rqb_buffer;
869 struct lpfc_iocbq *nvmewqeq;
870 struct lpfc_dmabuf dmabuf;
871 struct ulp_bde64 bpl;
874 if (phba->pport->load_flag & FC_UNLOADING)
877 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
878 "6023 NVMEx LS rsp oxid x%x\n", axchg->oxid);
880 if (axchg->state != LPFC_NVME_STE_LS_RCV || axchg->entry_cnt != 1) {
881 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
882 "6412 NVMEx LS rsp state mismatch "
884 axchg->oxid, axchg->state, axchg->entry_cnt);
887 axchg->state = LPFC_NVME_STE_LS_RSP;
890 nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, axchg, ls_rsp->rspdma,
892 if (nvmewqeq == NULL) {
893 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
894 "6150 NVMEx LS Drop Rsp x%x: Prep\n",
900 /* Save numBdes for bpl2sgl */
902 nvmewqeq->hba_wqidx = 0;
903 nvmewqeq->context3 = &dmabuf;
905 bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
906 bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
907 bpl.tus.f.bdeSize = ls_rsp->rsplen;
908 bpl.tus.f.bdeFlags = 0;
909 bpl.tus.w = le32_to_cpu(bpl.tus.w);
911 * Note: although we're using stack space for the dmabuf, the
912 * call to lpfc_sli4_issue_wqe is synchronous, so it will not
913 * be referenced after it returns back to this routine.
916 nvmewqeq->wqe_cmpl = xmt_ls_rsp_cmp;
917 nvmewqeq->iocb_cmpl = NULL;
918 nvmewqeq->context2 = axchg;
920 lpfc_nvmeio_data(phba, "NVMEx LS RSP: xri x%x wqidx x%x len x%x\n",
921 axchg->oxid, nvmewqeq->hba_wqidx, ls_rsp->rsplen);
923 rc = lpfc_sli4_issue_wqe(phba, axchg->hdwq, nvmewqeq);
925 /* clear to be sure there's no reference */
926 nvmewqeq->context3 = NULL;
928 if (rc == WQE_SUCCESS) {
930 * Okay to repost buffer here, but wait till cmpl
931 * before freeing ctxp and iocbq.
933 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
937 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
938 "6151 NVMEx LS RSP x%x: failed to transmit %d\n",
943 lpfc_nlp_put(nvmewqeq->context1);
946 /* Give back resources */
947 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
950 * As transport doesn't track completions of responses, if the rsp
951 * fails to send, the transport will effectively ignore the rsp
952 * and consider the LS done. However, the driver has an active
953 * exchange open for the LS - so be sure to abort the exchange
954 * if the response isn't sent.
956 lpfc_nvme_unsol_ls_issue_abort(phba, axchg, axchg->sid, axchg->oxid);
961 * lpfc_nvmet_xmt_ls_rsp - Transmit NVME LS response
962 * @tgtport: pointer to target port that NVME LS is to be transmit from.
963 * @ls_rsp: pointer to the transport LS RSP that is to be sent
965 * Driver registers this routine to transmit responses for received NVME
968 * This routine is used to format and send a WQE to transmit a NVME LS
969 * Response. The ls_rsp is used to reverse-map the LS to the original
970 * NVME LS request sequence, which provides addressing information for
971 * the remote port the LS to be sent to, as well as the exchange id
972 * that is the LS is bound to.
975 * 0 : if response successfully transmit
976 * non-zero : if response failed to transmit, of the form -Exxx.
979 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
980 struct nvmefc_ls_rsp *ls_rsp)
982 struct lpfc_async_xchg_ctx *axchg =
983 container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp);
984 struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
987 if (axchg->phba->pport->load_flag & FC_UNLOADING)
990 rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, lpfc_nvmet_xmt_ls_rsp_cmp);
993 atomic_inc(&nvmep->xmt_ls_drop);
995 * unless the failure is due to having already sent
996 * the response, an abort will be generated for the
997 * exchange if the rsp can't be sent.
1000 atomic_inc(&nvmep->xmt_ls_abort);
1004 atomic_inc(&nvmep->xmt_ls_rsp);
1009 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
1010 struct nvmefc_tgt_fcp_req *rsp)
1012 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1013 struct lpfc_async_xchg_ctx *ctxp =
1014 container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1015 struct lpfc_hba *phba = ctxp->phba;
1016 struct lpfc_queue *wq;
1017 struct lpfc_iocbq *nvmewqeq;
1018 struct lpfc_sli_ring *pring;
1019 unsigned long iflags;
1021 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1025 if (phba->pport->load_flag & FC_UNLOADING) {
1030 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1031 if (ctxp->ts_cmd_nvme) {
1032 if (rsp->op == NVMET_FCOP_RSP)
1033 ctxp->ts_nvme_status = ktime_get_ns();
1035 ctxp->ts_nvme_data = ktime_get_ns();
1038 /* Setup the hdw queue if not already set */
1040 ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
1042 if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
1043 id = raw_smp_processor_id();
1044 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
1045 if (rsp->hwqid != id)
1046 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1047 "6705 CPU Check OP: "
1048 "cpu %d expect %d\n",
1050 ctxp->cpu = id; /* Setup cpu for cmpl check */
1055 if ((ctxp->flag & LPFC_NVME_ABTS_RCV) ||
1056 (ctxp->state == LPFC_NVME_STE_ABORT)) {
1057 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1058 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1059 "6102 IO oxid x%x aborted\n",
1065 nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
1066 if (nvmewqeq == NULL) {
1067 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1068 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1069 "6152 FCP Drop IO x%x: Prep\n",
1075 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
1076 nvmewqeq->iocb_cmpl = NULL;
1077 nvmewqeq->context2 = ctxp;
1078 nvmewqeq->iocb_flag |= LPFC_IO_NVMET;
1079 ctxp->wqeq->hba_wqidx = rsp->hwqid;
1081 lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
1082 ctxp->oxid, rsp->op, rsp->rsplen);
1084 ctxp->flag |= LPFC_NVME_IO_INP;
1085 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
1086 if (rc == WQE_SUCCESS) {
1087 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1088 if (!ctxp->ts_cmd_nvme)
1090 if (rsp->op == NVMET_FCOP_RSP)
1091 ctxp->ts_status_wqput = ktime_get_ns();
1093 ctxp->ts_data_wqput = ktime_get_ns();
1100 * WQ was full, so queue nvmewqeq to be sent after
1103 ctxp->flag |= LPFC_NVME_DEFER_WQFULL;
1104 wq = ctxp->hdwq->io_wq;
1106 spin_lock_irqsave(&pring->ring_lock, iflags);
1107 list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
1108 wq->q_flag |= HBA_NVMET_WQFULL;
1109 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1110 atomic_inc(&lpfc_nvmep->defer_wqfull);
1114 /* Give back resources */
1115 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1116 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1117 "6153 FCP Drop IO x%x: Issue: %d\n",
1120 ctxp->wqeq->hba_wqidx = 0;
1121 nvmewqeq->context2 = NULL;
1122 nvmewqeq->context3 = NULL;
1129 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
1131 struct lpfc_nvmet_tgtport *tport = targetport->private;
1133 /* release any threads waiting for the unreg to complete */
1134 if (tport->phba->targetport)
1135 complete(tport->tport_unreg_cmp);
1139 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
1140 struct nvmefc_tgt_fcp_req *req)
1142 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1143 struct lpfc_async_xchg_ctx *ctxp =
1144 container_of(req, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1145 struct lpfc_hba *phba = ctxp->phba;
1146 struct lpfc_queue *wq;
1147 unsigned long flags;
1149 if (phba->pport->load_flag & FC_UNLOADING)
1153 ctxp->hdwq = &phba->sli4_hba.hdwq[0];
1155 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1156 "6103 NVMET Abort op: oxid x%x flg x%x ste %d\n",
1157 ctxp->oxid, ctxp->flag, ctxp->state);
1159 lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
1160 ctxp->oxid, ctxp->flag, ctxp->state);
1162 atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
1164 spin_lock_irqsave(&ctxp->ctxlock, flags);
1166 /* Since iaab/iaar are NOT set, we need to check
1167 * if the firmware is in process of aborting IO
1169 if (ctxp->flag & (LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP)) {
1170 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1173 ctxp->flag |= LPFC_NVME_ABORT_OP;
1175 if (ctxp->flag & LPFC_NVME_DEFER_WQFULL) {
1176 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1177 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1179 wq = ctxp->hdwq->io_wq;
1180 lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
1183 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1185 /* A state of LPFC_NVME_STE_RCV means we have just received
1186 * the NVME command and have not started processing it.
1187 * (by issuing any IO WQEs on this exchange yet)
1189 if (ctxp->state == LPFC_NVME_STE_RCV)
1190 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1193 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1198 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
1199 struct nvmefc_tgt_fcp_req *rsp)
1201 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1202 struct lpfc_async_xchg_ctx *ctxp =
1203 container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1204 struct lpfc_hba *phba = ctxp->phba;
1205 unsigned long flags;
1206 bool aborting = false;
1208 spin_lock_irqsave(&ctxp->ctxlock, flags);
1209 if (ctxp->flag & LPFC_NVME_XBUSY)
1210 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1211 "6027 NVMET release with XBUSY flag x%x"
1213 ctxp->flag, ctxp->oxid);
1214 else if (ctxp->state != LPFC_NVME_STE_DONE &&
1215 ctxp->state != LPFC_NVME_STE_ABORT)
1216 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1217 "6413 NVMET release bad state %d %d oxid x%x\n",
1218 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1220 if ((ctxp->flag & LPFC_NVME_ABORT_OP) ||
1221 (ctxp->flag & LPFC_NVME_XBUSY)) {
1223 /* let the abort path do the real release */
1224 lpfc_nvmet_defer_release(phba, ctxp);
1226 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1228 lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
1229 ctxp->state, aborting);
1231 atomic_inc(&lpfc_nvmep->xmt_fcp_release);
1232 ctxp->flag &= ~LPFC_NVME_TNOTIFY;
1237 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1241 lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
1242 struct nvmefc_tgt_fcp_req *rsp)
1244 struct lpfc_nvmet_tgtport *tgtp;
1245 struct lpfc_async_xchg_ctx *ctxp =
1246 container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1247 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
1248 struct lpfc_hba *phba = ctxp->phba;
1249 unsigned long iflag;
1252 lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
1253 ctxp->oxid, ctxp->size, raw_smp_processor_id());
1256 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1257 "6425 Defer rcv: no buffer oxid x%x: "
1259 ctxp->oxid, ctxp->flag, ctxp->state);
1263 tgtp = phba->targetport->private;
1265 atomic_inc(&tgtp->rcv_fcp_cmd_defer);
1267 /* Free the nvmebuf since a new buffer already replaced it */
1268 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1269 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1270 ctxp->rqb_buffer = NULL;
1271 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1275 * lpfc_nvmet_ls_req_cmp - completion handler for a nvme ls request
1276 * @phba: Pointer to HBA context object
1277 * @cmdwqe: Pointer to driver command WQE object.
1278 * @wcqe: Pointer to driver response CQE object.
1280 * This function is the completion handler for NVME LS requests.
1281 * The function updates any states and statistics, then calls the
1282 * generic completion handler to finish completion of the request.
1285 lpfc_nvmet_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1286 struct lpfc_wcqe_complete *wcqe)
1288 __lpfc_nvme_ls_req_cmp(phba, cmdwqe->vport, cmdwqe, wcqe);
1292 * lpfc_nvmet_ls_req - Issue an Link Service request
1293 * @targetport: pointer to target instance registered with nvmet transport.
1294 * @hosthandle: hosthandle set by the driver in a prior ls_rqst_rcv.
1295 * Driver sets this value to the ndlp pointer.
1296 * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
1298 * Driver registers this routine to handle any link service request
1299 * from the nvme_fc transport to a remote nvme-aware port.
1303 * non-zero: various error codes, in form of -Exxx
1306 lpfc_nvmet_ls_req(struct nvmet_fc_target_port *targetport,
1308 struct nvmefc_ls_req *pnvme_lsreq)
1310 struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private;
1311 struct lpfc_hba *phba;
1312 struct lpfc_nodelist *ndlp;
1319 phba = lpfc_nvmet->phba;
1320 if (phba->pport->load_flag & FC_UNLOADING)
1323 hstate = atomic_read(&lpfc_nvmet->state);
1324 if (hstate == LPFC_NVMET_INV_HOST_ACTIVE)
1327 ndlp = (struct lpfc_nodelist *)hosthandle;
1329 ret = __lpfc_nvme_ls_req(phba->pport, ndlp, pnvme_lsreq,
1330 lpfc_nvmet_ls_req_cmp);
1336 * lpfc_nvmet_ls_abort - Abort a prior NVME LS request
1337 * @targetport: Transport targetport, that LS was issued from.
1338 * @hosthandle: hosthandle set by the driver in a prior ls_rqst_rcv.
1339 * Driver sets this value to the ndlp pointer.
1340 * @pnvme_lsreq: the transport nvme_ls_req structure for LS to be aborted
1342 * Driver registers this routine to abort an NVME LS request that is
1343 * in progress (from the transports perspective).
1346 lpfc_nvmet_ls_abort(struct nvmet_fc_target_port *targetport,
1348 struct nvmefc_ls_req *pnvme_lsreq)
1350 struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private;
1351 struct lpfc_hba *phba;
1352 struct lpfc_nodelist *ndlp;
1355 phba = lpfc_nvmet->phba;
1356 if (phba->pport->load_flag & FC_UNLOADING)
1359 ndlp = (struct lpfc_nodelist *)hosthandle;
1361 ret = __lpfc_nvme_ls_abort(phba->pport, ndlp, pnvme_lsreq);
1363 atomic_inc(&lpfc_nvmet->xmt_ls_abort);
1367 lpfc_nvmet_host_release(void *hosthandle)
1369 struct lpfc_nodelist *ndlp = hosthandle;
1370 struct lpfc_hba *phba = ndlp->phba;
1371 struct lpfc_nvmet_tgtport *tgtp;
1373 if (!phba->targetport || !phba->targetport->private)
1376 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1377 "6202 NVMET XPT releasing hosthandle x%px "
1378 "DID x%x xflags x%x refcnt %d\n",
1379 hosthandle, ndlp->nlp_DID, ndlp->fc4_xpt_flags,
1380 kref_read(&ndlp->kref));
1381 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1382 spin_lock_irq(&ndlp->lock);
1383 ndlp->fc4_xpt_flags &= ~NLP_XPT_HAS_HH;
1384 spin_unlock_irq(&ndlp->lock);
1386 atomic_set(&tgtp->state, 0);
1390 lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport)
1392 struct lpfc_nvmet_tgtport *tgtp;
1393 struct lpfc_hba *phba;
1396 tgtp = tgtport->private;
1399 rc = lpfc_issue_els_rscn(phba->pport, 0);
1400 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1401 "6420 NVMET subsystem change: Notification %s\n",
1402 (rc) ? "Failed" : "Sent");
1405 static struct nvmet_fc_target_template lpfc_tgttemplate = {
1406 .targetport_delete = lpfc_nvmet_targetport_delete,
1407 .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
1408 .fcp_op = lpfc_nvmet_xmt_fcp_op,
1409 .fcp_abort = lpfc_nvmet_xmt_fcp_abort,
1410 .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
1411 .defer_rcv = lpfc_nvmet_defer_rcv,
1412 .discovery_event = lpfc_nvmet_discovery_event,
1413 .ls_req = lpfc_nvmet_ls_req,
1414 .ls_abort = lpfc_nvmet_ls_abort,
1415 .host_release = lpfc_nvmet_host_release,
1418 .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1419 .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1420 .dma_boundary = 0xFFFFFFFF,
1422 /* optional features */
1423 .target_features = 0,
1424 /* sizes of additional private data for data structures */
1425 .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
1426 .lsrqst_priv_sz = 0,
1430 __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
1431 struct lpfc_nvmet_ctx_info *infop)
1433 struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
1434 unsigned long flags;
1436 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
1437 list_for_each_entry_safe(ctx_buf, next_ctx_buf,
1438 &infop->nvmet_ctx_list, list) {
1439 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1440 list_del_init(&ctx_buf->list);
1441 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1443 spin_lock(&phba->hbalock);
1444 __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
1445 spin_unlock(&phba->hbalock);
1447 ctx_buf->sglq->state = SGL_FREED;
1448 ctx_buf->sglq->ndlp = NULL;
1450 spin_lock(&phba->sli4_hba.sgl_list_lock);
1451 list_add_tail(&ctx_buf->sglq->list,
1452 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1453 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1455 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1456 kfree(ctx_buf->context);
1458 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
1462 lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
1464 struct lpfc_nvmet_ctx_info *infop;
1467 /* The first context list, MRQ 0 CPU 0 */
1468 infop = phba->sli4_hba.nvmet_ctx_info;
1472 /* Cycle the the entire CPU context list for every MRQ */
1473 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
1474 for_each_present_cpu(j) {
1475 infop = lpfc_get_ctx_list(phba, j, i);
1476 __lpfc_nvmet_clean_io_for_cpu(phba, infop);
1479 kfree(phba->sli4_hba.nvmet_ctx_info);
1480 phba->sli4_hba.nvmet_ctx_info = NULL;
1484 lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1486 struct lpfc_nvmet_ctxbuf *ctx_buf;
1487 struct lpfc_iocbq *nvmewqe;
1488 union lpfc_wqe128 *wqe;
1489 struct lpfc_nvmet_ctx_info *last_infop;
1490 struct lpfc_nvmet_ctx_info *infop;
1493 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1494 "6403 Allocate NVMET resources for %d XRIs\n",
1495 phba->sli4_hba.nvmet_xri_cnt);
1497 phba->sli4_hba.nvmet_ctx_info = kcalloc(
1498 phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq,
1499 sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
1500 if (!phba->sli4_hba.nvmet_ctx_info) {
1501 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1502 "6419 Failed allocate memory for "
1503 "nvmet context lists\n");
1508 * Assuming X CPUs in the system, and Y MRQs, allocate some
1509 * lpfc_nvmet_ctx_info structures as follows:
1511 * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
1512 * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
1514 * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
1516 * Each line represents a MRQ "silo" containing an entry for
1519 * MRQ X is initially assumed to be associated with CPU X, thus
1520 * contexts are initially distributed across all MRQs using
1521 * the MRQ index (N) as follows cpuN/mrqN. When contexts are
1522 * freed, the are freed to the MRQ silo based on the CPU number
1523 * of the IO completion. Thus a context that was allocated for MRQ A
1524 * whose IO completed on CPU B will be freed to cpuB/mrqA.
1526 for_each_possible_cpu(i) {
1527 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1528 infop = lpfc_get_ctx_list(phba, i, j);
1529 INIT_LIST_HEAD(&infop->nvmet_ctx_list);
1530 spin_lock_init(&infop->nvmet_ctx_list_lock);
1531 infop->nvmet_ctx_list_cnt = 0;
1536 * Setup the next CPU context info ptr for each MRQ.
1537 * MRQ 0 will cycle thru CPUs 0 - X separately from
1538 * MRQ 1 cycling thru CPUs 0 - X, and so on.
1540 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1541 last_infop = lpfc_get_ctx_list(phba,
1542 cpumask_first(cpu_present_mask),
1544 for (i = phba->sli4_hba.num_possible_cpu - 1; i >= 0; i--) {
1545 infop = lpfc_get_ctx_list(phba, i, j);
1546 infop->nvmet_ctx_next_cpu = last_infop;
1551 /* For all nvmet xris, allocate resources needed to process a
1552 * received command on a per xri basis.
1555 cpu = cpumask_first(cpu_present_mask);
1556 for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
1557 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
1559 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1560 "6404 Ran out of memory for NVMET\n");
1564 ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
1566 if (!ctx_buf->context) {
1568 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1569 "6405 Ran out of NVMET "
1570 "context memory\n");
1573 ctx_buf->context->ctxbuf = ctx_buf;
1574 ctx_buf->context->state = LPFC_NVME_STE_FREE;
1576 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
1577 if (!ctx_buf->iocbq) {
1578 kfree(ctx_buf->context);
1580 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1581 "6406 Ran out of NVMET iocb/WQEs\n");
1584 ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
1585 nvmewqe = ctx_buf->iocbq;
1586 wqe = &nvmewqe->wqe;
1588 /* Initialize WQE */
1589 memset(wqe, 0, sizeof(union lpfc_wqe));
1591 ctx_buf->iocbq->context1 = NULL;
1592 spin_lock(&phba->sli4_hba.sgl_list_lock);
1593 ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
1594 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1595 if (!ctx_buf->sglq) {
1596 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1597 kfree(ctx_buf->context);
1599 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1600 "6407 Ran out of NVMET XRIs\n");
1603 INIT_WORK(&ctx_buf->defer_work, lpfc_nvmet_fcp_rqst_defer_work);
1606 * Add ctx to MRQidx context list. Our initial assumption
1607 * is MRQidx will be associated with CPUidx. This association
1608 * can change on the fly.
1610 infop = lpfc_get_ctx_list(phba, cpu, idx);
1611 spin_lock(&infop->nvmet_ctx_list_lock);
1612 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
1613 infop->nvmet_ctx_list_cnt++;
1614 spin_unlock(&infop->nvmet_ctx_list_lock);
1616 /* Spread ctx structures evenly across all MRQs */
1618 if (idx >= phba->cfg_nvmet_mrq) {
1620 cpu = cpumask_first(cpu_present_mask);
1623 cpu = cpumask_next(cpu, cpu_present_mask);
1624 if (cpu == nr_cpu_ids)
1625 cpu = cpumask_first(cpu_present_mask);
1629 for_each_present_cpu(i) {
1630 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1631 infop = lpfc_get_ctx_list(phba, i, j);
1632 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1633 "6408 TOTAL NVMET ctx for CPU %d "
1634 "MRQ %d: cnt %d nextcpu x%px\n",
1635 i, j, infop->nvmet_ctx_list_cnt,
1636 infop->nvmet_ctx_next_cpu);
1643 lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1645 struct lpfc_vport *vport = phba->pport;
1646 struct lpfc_nvmet_tgtport *tgtp;
1647 struct nvmet_fc_port_info pinfo;
1650 if (phba->targetport)
1653 error = lpfc_nvmet_setup_io_context(phba);
1657 memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
1658 pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
1659 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
1660 pinfo.port_id = vport->fc_myDID;
1662 /* We need to tell the transport layer + 1 because it takes page
1663 * alignment into account. When space for the SGL is allocated we
1664 * allocate + 3, one for cmd, one for rsp and one for this alignment
1666 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
1667 lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue;
1668 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
1670 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1671 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
1678 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1679 "6025 Cannot register NVME targetport x%x: "
1680 "portnm %llx nodenm %llx segs %d qs %d\n",
1682 pinfo.port_name, pinfo.node_name,
1683 lpfc_tgttemplate.max_sgl_segments,
1684 lpfc_tgttemplate.max_hw_queues);
1685 phba->targetport = NULL;
1686 phba->nvmet_support = 0;
1688 lpfc_nvmet_cleanup_io_context(phba);
1691 tgtp = (struct lpfc_nvmet_tgtport *)
1692 phba->targetport->private;
1695 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1696 "6026 Registered NVME "
1697 "targetport: x%px, private x%px "
1698 "portnm %llx nodenm %llx segs %d qs %d\n",
1699 phba->targetport, tgtp,
1700 pinfo.port_name, pinfo.node_name,
1701 lpfc_tgttemplate.max_sgl_segments,
1702 lpfc_tgttemplate.max_hw_queues);
1704 atomic_set(&tgtp->rcv_ls_req_in, 0);
1705 atomic_set(&tgtp->rcv_ls_req_out, 0);
1706 atomic_set(&tgtp->rcv_ls_req_drop, 0);
1707 atomic_set(&tgtp->xmt_ls_abort, 0);
1708 atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
1709 atomic_set(&tgtp->xmt_ls_rsp, 0);
1710 atomic_set(&tgtp->xmt_ls_drop, 0);
1711 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
1712 atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
1713 atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
1714 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
1715 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1716 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1717 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
1718 atomic_set(&tgtp->xmt_fcp_drop, 0);
1719 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1720 atomic_set(&tgtp->xmt_fcp_read, 0);
1721 atomic_set(&tgtp->xmt_fcp_write, 0);
1722 atomic_set(&tgtp->xmt_fcp_rsp, 0);
1723 atomic_set(&tgtp->xmt_fcp_release, 0);
1724 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1725 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1726 atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
1727 atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
1728 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1729 atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
1730 atomic_set(&tgtp->xmt_fcp_abort, 0);
1731 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1732 atomic_set(&tgtp->xmt_abort_unsol, 0);
1733 atomic_set(&tgtp->xmt_abort_sol, 0);
1734 atomic_set(&tgtp->xmt_abort_rsp, 0);
1735 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
1736 atomic_set(&tgtp->defer_ctx, 0);
1737 atomic_set(&tgtp->defer_fod, 0);
1738 atomic_set(&tgtp->defer_wqfull, 0);
1744 lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
1746 struct lpfc_vport *vport = phba->pport;
1748 if (!phba->targetport)
1751 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1752 "6007 Update NVMET port x%px did x%x\n",
1753 phba->targetport, vport->fc_myDID);
1755 phba->targetport->port_id = vport->fc_myDID;
1760 * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
1761 * @phba: pointer to lpfc hba data structure.
1762 * @axri: pointer to the nvmet xri abort wcqe structure.
1764 * This routine is invoked by the worker thread to process a SLI4 fast-path
1765 * NVMET aborted xri.
1768 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1769 struct sli4_wcqe_xri_aborted *axri)
1771 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1772 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
1773 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1774 struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
1775 struct lpfc_nvmet_tgtport *tgtp;
1776 struct nvmefc_tgt_fcp_req *req = NULL;
1777 struct lpfc_nodelist *ndlp;
1778 unsigned long iflag = 0;
1780 bool released = false;
1782 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1783 "6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
1785 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1788 if (phba->targetport) {
1789 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1790 atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
1793 spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
1794 list_for_each_entry_safe(ctxp, next_ctxp,
1795 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1797 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1800 spin_lock(&ctxp->ctxlock);
1801 /* Check if we already received a free context call
1802 * and we have completed processing an abort situation.
1804 if (ctxp->flag & LPFC_NVME_CTX_RLS &&
1805 !(ctxp->flag & LPFC_NVME_ABORT_OP)) {
1806 list_del_init(&ctxp->list);
1809 ctxp->flag &= ~LPFC_NVME_XBUSY;
1810 spin_unlock(&ctxp->ctxlock);
1811 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
1814 rrq_empty = list_empty(&phba->active_rrq_list);
1815 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1817 (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
1818 ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
1819 lpfc_set_rrq_active(phba, ndlp,
1820 ctxp->ctxbuf->sglq->sli4_lxritag,
1822 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
1825 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1826 "6318 XB aborted oxid x%x flg x%x (%x)\n",
1827 ctxp->oxid, ctxp->flag, released);
1829 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1832 lpfc_worker_wake_up(phba);
1835 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
1836 ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri);
1839 * Abort already done by FW, so BA_ACC sent.
1840 * However, the transport may be unaware.
1842 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1843 "6323 NVMET Rcv ABTS xri x%x ctxp state x%x "
1844 "flag x%x oxid x%x rxid x%x\n",
1845 xri, ctxp->state, ctxp->flag, ctxp->oxid,
1848 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1849 ctxp->flag |= LPFC_NVME_ABTS_RCV;
1850 ctxp->state = LPFC_NVME_STE_ABORT;
1851 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1853 lpfc_nvmeio_data(phba,
1854 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1855 xri, raw_smp_processor_id(), 0);
1857 req = &ctxp->hdlrctx.fcp_req;
1859 nvmet_fc_rcv_fcp_abort(phba->targetport, req);
1865 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
1866 struct fc_frame_header *fc_hdr)
1868 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1869 struct lpfc_hba *phba = vport->phba;
1870 struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
1871 struct nvmefc_tgt_fcp_req *rsp;
1874 unsigned long iflag = 0;
1876 sid = sli4_sid_from_fc_hdr(fc_hdr);
1877 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1879 spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
1880 list_for_each_entry_safe(ctxp, next_ctxp,
1881 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1883 if (ctxp->oxid != oxid || ctxp->sid != sid)
1886 xri = ctxp->ctxbuf->sglq->sli4_xritag;
1888 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
1890 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1891 ctxp->flag |= LPFC_NVME_ABTS_RCV;
1892 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1894 lpfc_nvmeio_data(phba,
1895 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1896 xri, raw_smp_processor_id(), 0);
1898 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1899 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
1901 rsp = &ctxp->hdlrctx.fcp_req;
1902 nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
1904 /* Respond with BA_ACC accordingly */
1905 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1908 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
1909 /* check the wait list */
1910 if (phba->sli4_hba.nvmet_io_wait_cnt) {
1911 struct rqb_dmabuf *nvmebuf;
1912 struct fc_frame_header *fc_hdr_tmp;
1917 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
1919 /* match by oxid and s_id */
1920 list_for_each_entry(nvmebuf,
1921 &phba->sli4_hba.lpfc_nvmet_io_wait_list,
1923 fc_hdr_tmp = (struct fc_frame_header *)
1924 (nvmebuf->hbuf.virt);
1925 oxid_tmp = be16_to_cpu(fc_hdr_tmp->fh_ox_id);
1926 sid_tmp = sli4_sid_from_fc_hdr(fc_hdr_tmp);
1927 if (oxid_tmp != oxid || sid_tmp != sid)
1930 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1931 "6321 NVMET Rcv ABTS oxid x%x from x%x "
1932 "is waiting for a ctxp\n",
1935 list_del_init(&nvmebuf->hbuf.list);
1936 phba->sli4_hba.nvmet_io_wait_cnt--;
1940 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
1943 /* free buffer since already posted a new DMA buffer to RQ */
1945 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1946 /* Respond with BA_ACC accordingly */
1947 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1952 /* check active list */
1953 ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid);
1955 xri = ctxp->ctxbuf->sglq->sli4_xritag;
1957 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1958 ctxp->flag |= (LPFC_NVME_ABTS_RCV | LPFC_NVME_ABORT_OP);
1959 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1961 lpfc_nvmeio_data(phba,
1962 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1963 xri, raw_smp_processor_id(), 0);
1965 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1966 "6322 NVMET Rcv ABTS:acc oxid x%x xri x%x "
1967 "flag x%x state x%x\n",
1968 ctxp->oxid, xri, ctxp->flag, ctxp->state);
1970 if (ctxp->flag & LPFC_NVME_TNOTIFY) {
1971 /* Notify the transport */
1972 nvmet_fc_rcv_fcp_abort(phba->targetport,
1973 &ctxp->hdlrctx.fcp_req);
1975 cancel_work_sync(&ctxp->ctxbuf->defer_work);
1976 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1977 lpfc_nvmet_defer_release(phba, ctxp);
1978 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1980 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1983 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1987 lpfc_nvmeio_data(phba, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n",
1988 oxid, raw_smp_processor_id(), 1);
1990 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1991 "6320 NVMET Rcv ABTS:rjt oxid x%x\n", oxid);
1993 /* Respond with BA_RJT accordingly */
1994 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
2000 lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
2001 struct lpfc_async_xchg_ctx *ctxp)
2003 struct lpfc_sli_ring *pring;
2004 struct lpfc_iocbq *nvmewqeq;
2005 struct lpfc_iocbq *next_nvmewqeq;
2006 unsigned long iflags;
2007 struct lpfc_wcqe_complete wcqe;
2008 struct lpfc_wcqe_complete *wcqep;
2013 /* Fake an ABORT error code back to cmpl routine */
2014 memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete));
2015 bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT);
2016 wcqep->parameter = IOERR_ABORT_REQUESTED;
2018 spin_lock_irqsave(&pring->ring_lock, iflags);
2019 list_for_each_entry_safe(nvmewqeq, next_nvmewqeq,
2020 &wq->wqfull_list, list) {
2022 /* Checking for a specific IO to flush */
2023 if (nvmewqeq->context2 == ctxp) {
2024 list_del(&nvmewqeq->list);
2025 spin_unlock_irqrestore(&pring->ring_lock,
2027 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
2034 list_del(&nvmewqeq->list);
2035 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2036 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep);
2037 spin_lock_irqsave(&pring->ring_lock, iflags);
2041 wq->q_flag &= ~HBA_NVMET_WQFULL;
2042 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2046 lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
2047 struct lpfc_queue *wq)
2049 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2050 struct lpfc_sli_ring *pring;
2051 struct lpfc_iocbq *nvmewqeq;
2052 struct lpfc_async_xchg_ctx *ctxp;
2053 unsigned long iflags;
2057 * Some WQE slots are available, so try to re-issue anything
2058 * on the WQ wqfull_list.
2061 spin_lock_irqsave(&pring->ring_lock, iflags);
2062 while (!list_empty(&wq->wqfull_list)) {
2063 list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
2065 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2066 ctxp = (struct lpfc_async_xchg_ctx *)nvmewqeq->context2;
2067 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
2068 spin_lock_irqsave(&pring->ring_lock, iflags);
2070 /* WQ was full again, so put it back on the list */
2071 list_add(&nvmewqeq->list, &wq->wqfull_list);
2072 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2075 if (rc == WQE_SUCCESS) {
2076 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2077 if (ctxp->ts_cmd_nvme) {
2078 if (ctxp->hdlrctx.fcp_req.op == NVMET_FCOP_RSP)
2079 ctxp->ts_status_wqput = ktime_get_ns();
2081 ctxp->ts_data_wqput = ktime_get_ns();
2088 wq->q_flag &= ~HBA_NVMET_WQFULL;
2089 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2095 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
2097 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2098 struct lpfc_nvmet_tgtport *tgtp;
2099 struct lpfc_queue *wq;
2101 DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
2103 if (phba->nvmet_support == 0)
2105 if (phba->targetport) {
2106 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2107 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
2108 wq = phba->sli4_hba.hdwq[qidx].io_wq;
2109 lpfc_nvmet_wqfull_flush(phba, wq, NULL);
2111 tgtp->tport_unreg_cmp = &tport_unreg_cmp;
2112 nvmet_fc_unregister_targetport(phba->targetport);
2113 if (!wait_for_completion_timeout(&tport_unreg_cmp,
2114 msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
2115 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2116 "6179 Unreg targetport x%px timeout "
2117 "reached.\n", phba->targetport);
2118 lpfc_nvmet_cleanup_io_context(phba);
2120 phba->targetport = NULL;
2125 * lpfc_nvmet_handle_lsreq - Process an NVME LS request
2126 * @phba: pointer to lpfc hba data structure.
2127 * @axchg: pointer to exchange context for the NVME LS request
2129 * This routine is used for processing an asychronously received NVME LS
2130 * request. Any remaining validation is done and the LS is then forwarded
2131 * to the nvmet-fc transport via nvmet_fc_rcv_ls_req().
2133 * The calling sequence should be: nvmet_fc_rcv_ls_req() -> (processing)
2134 * -> lpfc_nvmet_xmt_ls_rsp/cmp -> req->done.
2135 * lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg.
2137 * Returns 0 if LS was handled and delivered to the transport
2138 * Returns 1 if LS failed to be handled and should be dropped
2141 lpfc_nvmet_handle_lsreq(struct lpfc_hba *phba,
2142 struct lpfc_async_xchg_ctx *axchg)
2144 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2145 struct lpfc_nvmet_tgtport *tgtp = phba->targetport->private;
2146 uint32_t *payload = axchg->payload;
2149 atomic_inc(&tgtp->rcv_ls_req_in);
2152 * Driver passes the ndlp as the hosthandle argument allowing
2153 * the transport to generate LS requests for any associateions
2156 rc = nvmet_fc_rcv_ls_req(phba->targetport, axchg->ndlp, &axchg->ls_rsp,
2157 axchg->payload, axchg->size);
2159 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2160 "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
2161 "%08x %08x %08x\n", axchg->size, rc,
2162 *payload, *(payload+1), *(payload+2),
2163 *(payload+3), *(payload+4), *(payload+5));
2166 atomic_inc(&tgtp->rcv_ls_req_out);
2170 atomic_inc(&tgtp->rcv_ls_req_drop);
2176 lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
2178 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2179 struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
2180 struct lpfc_hba *phba = ctxp->phba;
2181 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
2182 struct lpfc_nvmet_tgtport *tgtp;
2183 uint32_t *payload, qno;
2185 unsigned long iflags;
2188 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2189 "6159 process_rcv_fcp_req, nvmebuf is NULL, "
2190 "oxid: x%x flg: x%x state: x%x\n",
2191 ctxp->oxid, ctxp->flag, ctxp->state);
2192 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2193 lpfc_nvmet_defer_release(phba, ctxp);
2194 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2195 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
2200 if (ctxp->flag & LPFC_NVME_ABTS_RCV) {
2201 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2202 "6324 IO oxid x%x aborted\n",
2207 payload = (uint32_t *)(nvmebuf->dbuf.virt);
2208 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2209 ctxp->flag |= LPFC_NVME_TNOTIFY;
2210 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2211 if (ctxp->ts_isr_cmd)
2212 ctxp->ts_cmd_nvme = ktime_get_ns();
2215 * The calling sequence should be:
2216 * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
2217 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
2218 * When we return from nvmet_fc_rcv_fcp_req, all relevant info
2219 * the NVME command / FC header is stored.
2220 * A buffer has already been reposted for this IO, so just free
2223 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->hdlrctx.fcp_req,
2224 payload, ctxp->size);
2225 /* Process FCP command */
2227 atomic_inc(&tgtp->rcv_fcp_cmd_out);
2228 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2229 if ((ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) ||
2230 (nvmebuf != ctxp->rqb_buffer)) {
2231 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2234 ctxp->rqb_buffer = NULL;
2235 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2236 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2240 /* Processing of FCP command is deferred */
2241 if (rc == -EOVERFLOW) {
2242 lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d "
2244 ctxp->oxid, ctxp->size, ctxp->sid);
2245 atomic_inc(&tgtp->rcv_fcp_cmd_out);
2246 atomic_inc(&tgtp->defer_fod);
2247 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2248 if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
2249 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2252 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2254 * Post a replacement DMA buffer to RQ and defer
2255 * freeing rcv buffer till .defer_rcv callback
2258 lpfc_post_rq_buffer(
2259 phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2260 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2263 ctxp->flag &= ~LPFC_NVME_TNOTIFY;
2264 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2265 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2266 "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
2268 atomic_read(&tgtp->rcv_fcp_cmd_in),
2269 atomic_read(&tgtp->rcv_fcp_cmd_out),
2270 atomic_read(&tgtp->xmt_fcp_release));
2271 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
2272 ctxp->oxid, ctxp->size, ctxp->sid);
2273 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2274 lpfc_nvmet_defer_release(phba, ctxp);
2275 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2276 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
2281 lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work)
2283 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2284 struct lpfc_nvmet_ctxbuf *ctx_buf =
2285 container_of(work, struct lpfc_nvmet_ctxbuf, defer_work);
2287 lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2291 static struct lpfc_nvmet_ctxbuf *
2292 lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
2293 struct lpfc_nvmet_ctx_info *current_infop)
2295 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2296 struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
2297 struct lpfc_nvmet_ctx_info *get_infop;
2301 * The current_infop for the MRQ a NVME command IU was received
2302 * on is empty. Our goal is to replenish this MRQs context
2303 * list from a another CPUs.
2305 * First we need to pick a context list to start looking on.
2306 * nvmet_ctx_start_cpu has available context the last time
2307 * we needed to replenish this CPU where nvmet_ctx_next_cpu
2308 * is just the next sequential CPU for this MRQ.
2310 if (current_infop->nvmet_ctx_start_cpu)
2311 get_infop = current_infop->nvmet_ctx_start_cpu;
2313 get_infop = current_infop->nvmet_ctx_next_cpu;
2315 for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) {
2316 if (get_infop == current_infop) {
2317 get_infop = get_infop->nvmet_ctx_next_cpu;
2320 spin_lock(&get_infop->nvmet_ctx_list_lock);
2322 /* Just take the entire context list, if there are any */
2323 if (get_infop->nvmet_ctx_list_cnt) {
2324 list_splice_init(&get_infop->nvmet_ctx_list,
2325 ¤t_infop->nvmet_ctx_list);
2326 current_infop->nvmet_ctx_list_cnt =
2327 get_infop->nvmet_ctx_list_cnt - 1;
2328 get_infop->nvmet_ctx_list_cnt = 0;
2329 spin_unlock(&get_infop->nvmet_ctx_list_lock);
2331 current_infop->nvmet_ctx_start_cpu = get_infop;
2332 list_remove_head(¤t_infop->nvmet_ctx_list,
2333 ctx_buf, struct lpfc_nvmet_ctxbuf,
2338 /* Otherwise, move on to the next CPU for this MRQ */
2339 spin_unlock(&get_infop->nvmet_ctx_list_lock);
2340 get_infop = get_infop->nvmet_ctx_next_cpu;
2344 /* Nothing found, all contexts for the MRQ are in-flight */
2349 * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
2350 * @phba: pointer to lpfc hba data structure.
2351 * @idx: relative index of MRQ vector
2352 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
2353 * @isr_timestamp: in jiffies.
2354 * @cqflag: cq processing information regarding workload.
2356 * This routine is used for processing the WQE associated with a unsolicited
2357 * event. It first determines whether there is an existing ndlp that matches
2358 * the DID from the unsolicited WQE. If not, it will create a new one with
2359 * the DID from the unsolicited WQE. The ELS command from the unsolicited
2360 * WQE is then used to invoke the proper routine and to set up proper state
2361 * of the discovery state machine.
2364 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
2366 struct rqb_dmabuf *nvmebuf,
2367 uint64_t isr_timestamp,
2370 struct lpfc_async_xchg_ctx *ctxp;
2371 struct lpfc_nvmet_tgtport *tgtp;
2372 struct fc_frame_header *fc_hdr;
2373 struct lpfc_nvmet_ctxbuf *ctx_buf;
2374 struct lpfc_nvmet_ctx_info *current_infop;
2375 uint32_t size, oxid, sid, qno;
2376 unsigned long iflag;
2379 if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
2383 if (!nvmebuf || !phba->targetport) {
2384 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2385 "6157 NVMET FCP Drop IO\n");
2387 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2392 * Get a pointer to the context list for this MRQ based on
2393 * the CPU this MRQ IRQ is associated with. If the CPU association
2394 * changes from our initial assumption, the context list could
2395 * be empty, thus it would need to be replenished with the
2396 * context list from another CPU for this MRQ.
2398 current_cpu = raw_smp_processor_id();
2399 current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
2400 spin_lock_irqsave(¤t_infop->nvmet_ctx_list_lock, iflag);
2401 if (current_infop->nvmet_ctx_list_cnt) {
2402 list_remove_head(¤t_infop->nvmet_ctx_list,
2403 ctx_buf, struct lpfc_nvmet_ctxbuf, list);
2404 current_infop->nvmet_ctx_list_cnt--;
2406 ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
2408 spin_unlock_irqrestore(¤t_infop->nvmet_ctx_list_lock, iflag);
2410 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
2411 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2412 size = nvmebuf->bytes_recv;
2414 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2415 if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
2416 this_cpu_inc(phba->sli4_hba.c_stat->rcv_io);
2417 if (idx != current_cpu)
2418 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2419 "6703 CPU Check rcv: "
2420 "cpu %d expect %d\n",
2425 lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
2426 oxid, size, raw_smp_processor_id());
2428 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2431 /* Queue this NVME IO to process later */
2432 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
2433 list_add_tail(&nvmebuf->hbuf.list,
2434 &phba->sli4_hba.lpfc_nvmet_io_wait_list);
2435 phba->sli4_hba.nvmet_io_wait_cnt++;
2436 phba->sli4_hba.nvmet_io_wait_total++;
2437 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
2440 /* Post a brand new DMA buffer to RQ */
2442 lpfc_post_rq_buffer(
2443 phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2444 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2446 atomic_inc(&tgtp->defer_ctx);
2450 sid = sli4_sid_from_fc_hdr(fc_hdr);
2452 ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
2453 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
2454 list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list);
2455 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
2456 if (ctxp->state != LPFC_NVME_STE_FREE) {
2457 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2458 "6414 NVMET Context corrupt %d %d oxid x%x\n",
2459 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
2468 ctxp->state = LPFC_NVME_STE_RCV;
2469 ctxp->entry_cnt = 1;
2471 ctxp->ctxbuf = ctx_buf;
2472 ctxp->rqb_buffer = (void *)nvmebuf;
2474 spin_lock_init(&ctxp->ctxlock);
2476 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2478 ctxp->ts_isr_cmd = isr_timestamp;
2479 ctxp->ts_cmd_nvme = 0;
2480 ctxp->ts_nvme_data = 0;
2481 ctxp->ts_data_wqput = 0;
2482 ctxp->ts_isr_data = 0;
2483 ctxp->ts_data_nvme = 0;
2484 ctxp->ts_nvme_status = 0;
2485 ctxp->ts_status_wqput = 0;
2486 ctxp->ts_isr_status = 0;
2487 ctxp->ts_status_nvme = 0;
2490 atomic_inc(&tgtp->rcv_fcp_cmd_in);
2491 /* check for cq processing load */
2493 lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2497 if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
2498 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2499 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2500 "6325 Unable to queue work for oxid x%x. "
2501 "FCP Drop IO [x%x x%x x%x]\n",
2503 atomic_read(&tgtp->rcv_fcp_cmd_in),
2504 atomic_read(&tgtp->rcv_fcp_cmd_out),
2505 atomic_read(&tgtp->xmt_fcp_release));
2507 spin_lock_irqsave(&ctxp->ctxlock, iflag);
2508 lpfc_nvmet_defer_release(phba, ctxp);
2509 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
2510 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
2515 * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
2516 * @phba: pointer to lpfc hba data structure.
2517 * @idx: relative index of MRQ vector
2518 * @nvmebuf: pointer to received nvme data structure.
2519 * @isr_timestamp: in jiffies.
2520 * @cqflag: cq processing information regarding workload.
2522 * This routine is used to process an unsolicited event received from a SLI
2523 * (Service Level Interface) ring. The actual processing of the data buffer
2524 * associated with the unsolicited event is done by invoking the routine
2525 * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
2526 * SLI RQ on which the unsolicited event was received.
2529 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
2531 struct rqb_dmabuf *nvmebuf,
2532 uint64_t isr_timestamp,
2536 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2537 "3167 NVMET FCP Drop IO\n");
2540 if (phba->nvmet_support == 0) {
2541 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2544 lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp, cqflag);
2548 * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
2549 * @phba: pointer to a host N_Port data structure.
2550 * @ctxp: Context info for NVME LS Request
2551 * @rspbuf: DMA buffer of NVME command.
2552 * @rspsize: size of the NVME command.
2554 * This routine is used for allocating a lpfc-WQE data structure from
2555 * the driver lpfc-WQE free-list and prepare the WQE with the parameters
2556 * passed into the routine for discovery state machine to issue an Extended
2557 * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
2558 * and preparation routine that is used by all the discovery state machine
2559 * routines and the NVME command-specific fields will be later set up by
2560 * the individual discovery machine routines after calling this routine
2561 * allocating and preparing a generic WQE data structure. It fills in the
2562 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
2563 * payload and response payload (if expected). The reference count on the
2564 * ndlp is incremented by 1 and the reference to the ndlp is put into
2565 * context1 of the WQE data structure for this WQE to hold the ndlp
2566 * reference for the command's callback function to access later.
2569 * Pointer to the newly allocated/prepared nvme wqe data structure
2570 * NULL - when nvme wqe data structure allocation/preparation failed
2572 static struct lpfc_iocbq *
2573 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
2574 struct lpfc_async_xchg_ctx *ctxp,
2575 dma_addr_t rspbuf, uint16_t rspsize)
2577 struct lpfc_nodelist *ndlp;
2578 struct lpfc_iocbq *nvmewqe;
2579 union lpfc_wqe128 *wqe;
2581 if (!lpfc_is_link_up(phba)) {
2582 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2583 "6104 NVMET prep LS wqe: link err: "
2584 "NPORT x%x oxid:x%x ste %d\n",
2585 ctxp->sid, ctxp->oxid, ctxp->state);
2589 /* Allocate buffer for command wqe */
2590 nvmewqe = lpfc_sli_get_iocbq(phba);
2591 if (nvmewqe == NULL) {
2592 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2593 "6105 NVMET prep LS wqe: No WQE: "
2594 "NPORT x%x oxid x%x ste %d\n",
2595 ctxp->sid, ctxp->oxid, ctxp->state);
2599 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2601 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2602 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2603 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2604 "6106 NVMET prep LS wqe: No ndlp: "
2605 "NPORT x%x oxid x%x ste %d\n",
2606 ctxp->sid, ctxp->oxid, ctxp->state);
2607 goto nvme_wqe_free_wqeq_exit;
2609 ctxp->wqeq = nvmewqe;
2611 /* prevent preparing wqe with NULL ndlp reference */
2612 nvmewqe->context1 = lpfc_nlp_get(ndlp);
2613 if (nvmewqe->context1 == NULL)
2614 goto nvme_wqe_free_wqeq_exit;
2615 nvmewqe->context2 = ctxp;
2617 wqe = &nvmewqe->wqe;
2618 memset(wqe, 0, sizeof(union lpfc_wqe));
2621 wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2622 wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
2623 wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
2624 wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
2631 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
2632 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
2633 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
2634 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
2635 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
2638 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
2639 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2640 bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
2643 bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
2644 CMD_XMIT_SEQUENCE64_WQE);
2645 bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
2646 bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
2647 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
2650 wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
2653 bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
2654 /* Needs to be set by caller */
2655 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
2658 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
2659 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2660 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
2661 LPFC_WQE_LENLOC_WORD12);
2662 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
2665 bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
2666 LPFC_WQE_CQ_ID_DEFAULT);
2667 bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
2671 wqe->xmit_sequence.xmit_len = rspsize;
2674 nvmewqe->vport = phba->pport;
2675 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2676 nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
2678 /* Xmit NVMET response to remote NPORT <did> */
2679 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2680 "6039 Xmit NVMET LS response to remote "
2681 "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
2682 ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
2686 nvme_wqe_free_wqeq_exit:
2687 nvmewqe->context2 = NULL;
2688 nvmewqe->context3 = NULL;
2689 lpfc_sli_release_iocbq(phba, nvmewqe);
2694 static struct lpfc_iocbq *
2695 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
2696 struct lpfc_async_xchg_ctx *ctxp)
2698 struct nvmefc_tgt_fcp_req *rsp = &ctxp->hdlrctx.fcp_req;
2699 struct lpfc_nvmet_tgtport *tgtp;
2700 struct sli4_sge *sgl;
2701 struct lpfc_nodelist *ndlp;
2702 struct lpfc_iocbq *nvmewqe;
2703 struct scatterlist *sgel;
2704 union lpfc_wqe128 *wqe;
2705 struct ulp_bde64 *bde;
2706 dma_addr_t physaddr;
2711 if (!lpfc_is_link_up(phba)) {
2712 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2713 "6107 NVMET prep FCP wqe: link err:"
2714 "NPORT x%x oxid x%x ste %d\n",
2715 ctxp->sid, ctxp->oxid, ctxp->state);
2719 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2721 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2722 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2723 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2724 "6108 NVMET prep FCP wqe: no ndlp: "
2725 "NPORT x%x oxid x%x ste %d\n",
2726 ctxp->sid, ctxp->oxid, ctxp->state);
2730 if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
2731 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2732 "6109 NVMET prep FCP wqe: seg cnt err: "
2733 "NPORT x%x oxid x%x ste %d cnt %d\n",
2734 ctxp->sid, ctxp->oxid, ctxp->state,
2735 phba->cfg_nvme_seg_cnt);
2738 nsegs = rsp->sg_cnt;
2740 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2741 nvmewqe = ctxp->wqeq;
2742 if (nvmewqe == NULL) {
2743 /* Allocate buffer for command wqe */
2744 nvmewqe = ctxp->ctxbuf->iocbq;
2745 if (nvmewqe == NULL) {
2746 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2747 "6110 NVMET prep FCP wqe: No "
2748 "WQE: NPORT x%x oxid x%x ste %d\n",
2749 ctxp->sid, ctxp->oxid, ctxp->state);
2752 ctxp->wqeq = nvmewqe;
2753 xc = 0; /* create new XRI */
2754 nvmewqe->sli4_lxritag = NO_XRI;
2755 nvmewqe->sli4_xritag = NO_XRI;
2759 if (((ctxp->state == LPFC_NVME_STE_RCV) &&
2760 (ctxp->entry_cnt == 1)) ||
2761 (ctxp->state == LPFC_NVME_STE_DATA)) {
2762 wqe = &nvmewqe->wqe;
2764 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2765 "6111 Wrong state NVMET FCP: %d cnt %d\n",
2766 ctxp->state, ctxp->entry_cnt);
2770 sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
2772 case NVMET_FCOP_READDATA:
2773 case NVMET_FCOP_READDATA_RSP:
2774 /* From the tsend template, initialize words 7 - 11 */
2775 memcpy(&wqe->words[7],
2776 &lpfc_tsend_cmd_template.words[7],
2777 sizeof(uint32_t) * 5);
2779 /* Words 0 - 2 : The first sg segment */
2781 physaddr = sg_dma_address(sgel);
2782 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2783 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
2784 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
2785 wqe->fcp_tsend.bde.addrHigh =
2786 cpu_to_le32(putPaddrHigh(physaddr));
2789 wqe->fcp_tsend.payload_offset_len = 0;
2792 wqe->fcp_tsend.relative_offset = ctxp->offset;
2795 wqe->fcp_tsend.reserved = 0;
2798 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
2799 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2800 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
2801 nvmewqe->sli4_xritag);
2803 /* Word 7 - set ar later */
2806 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
2809 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
2810 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
2812 /* Word 10 - set wqes later, in template xc=1 */
2814 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0);
2816 /* Word 11 - set sup, irsp, irsplen later */
2820 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2822 /* Setup 2 SKIP SGEs */
2826 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2827 sgl->word2 = cpu_to_le32(sgl->word2);
2833 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2834 sgl->word2 = cpu_to_le32(sgl->word2);
2837 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
2838 atomic_inc(&tgtp->xmt_fcp_read_rsp);
2840 /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2842 if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
2843 if (ndlp->nlp_flag & NLP_SUPPRESS_RSP)
2845 &wqe->fcp_tsend.wqe_com, 1);
2847 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
2848 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
2849 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
2850 ((rsp->rsplen >> 2) - 1));
2851 memcpy(&wqe->words[16], rsp->rspaddr,
2855 atomic_inc(&tgtp->xmt_fcp_read);
2857 /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2858 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
2862 case NVMET_FCOP_WRITEDATA:
2863 /* From the treceive template, initialize words 3 - 11 */
2864 memcpy(&wqe->words[3],
2865 &lpfc_treceive_cmd_template.words[3],
2866 sizeof(uint32_t) * 9);
2868 /* Words 0 - 2 : First SGE is skipped, set invalid BDE type */
2869 wqe->fcp_treceive.bde.tus.f.bdeFlags = LPFC_SGE_TYPE_SKIP;
2870 wqe->fcp_treceive.bde.tus.f.bdeSize = 0;
2871 wqe->fcp_treceive.bde.addrLow = 0;
2872 wqe->fcp_treceive.bde.addrHigh = 0;
2875 wqe->fcp_treceive.relative_offset = ctxp->offset;
2878 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
2879 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2880 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
2881 nvmewqe->sli4_xritag);
2886 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
2889 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
2890 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
2892 /* Word 10 - in template xc=1 */
2894 bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
2896 /* Word 11 - set pbde later */
2897 if (phba->cfg_enable_pbde) {
2900 bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
2905 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2907 /* Setup 2 SKIP SGEs */
2911 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2912 sgl->word2 = cpu_to_le32(sgl->word2);
2918 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2919 sgl->word2 = cpu_to_le32(sgl->word2);
2922 atomic_inc(&tgtp->xmt_fcp_write);
2925 case NVMET_FCOP_RSP:
2926 /* From the treceive template, initialize words 4 - 11 */
2927 memcpy(&wqe->words[4],
2928 &lpfc_trsp_cmd_template.words[4],
2929 sizeof(uint32_t) * 8);
2932 physaddr = rsp->rspdma;
2933 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2934 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
2935 wqe->fcp_trsp.bde.addrLow =
2936 cpu_to_le32(putPaddrLow(physaddr));
2937 wqe->fcp_trsp.bde.addrHigh =
2938 cpu_to_le32(putPaddrHigh(physaddr));
2941 wqe->fcp_trsp.response_len = rsp->rsplen;
2944 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
2945 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2946 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
2947 nvmewqe->sli4_xritag);
2952 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
2955 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
2956 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
2960 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1);
2963 /* In template wqes=0 irsp=0 irsplen=0 - good response */
2964 if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) {
2965 /* Bad response - embed it */
2966 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
2967 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
2968 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
2969 ((rsp->rsplen >> 2) - 1));
2970 memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
2975 wqe->fcp_trsp.rsvd_12_15[0] = 0;
2977 /* Use rspbuf, NOT sg list */
2980 atomic_inc(&tgtp->xmt_fcp_rsp);
2984 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2985 "6064 Unknown Rsp Op %d\n",
2991 nvmewqe->vport = phba->pport;
2992 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2993 nvmewqe->context1 = ndlp;
2995 for_each_sg(rsp->sg, sgel, nsegs, i) {
2996 physaddr = sg_dma_address(sgel);
2997 cnt = sg_dma_len(sgel);
2998 sgl->addr_hi = putPaddrHigh(physaddr);
2999 sgl->addr_lo = putPaddrLow(physaddr);
3001 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
3002 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
3003 if ((i+1) == rsp->sg_cnt)
3004 bf_set(lpfc_sli4_sge_last, sgl, 1);
3005 sgl->word2 = cpu_to_le32(sgl->word2);
3006 sgl->sge_len = cpu_to_le32(cnt);
3008 bde = (struct ulp_bde64 *)&wqe->words[13];
3010 /* Words 13-15 (PBDE) */
3011 bde->addrLow = sgl->addr_lo;
3012 bde->addrHigh = sgl->addr_hi;
3013 bde->tus.f.bdeSize =
3014 le32_to_cpu(sgl->sge_len);
3015 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3016 bde->tus.w = cpu_to_le32(bde->tus.w);
3018 memset(bde, 0, sizeof(struct ulp_bde64));
3022 ctxp->offset += cnt;
3024 ctxp->state = LPFC_NVME_STE_DATA;
3030 * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
3031 * @phba: Pointer to HBA context object.
3032 * @cmdwqe: Pointer to driver command WQE object.
3033 * @wcqe: Pointer to driver response CQE object.
3035 * The function is called from SLI ring event handler with no
3036 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
3037 * The function frees memory resources used for the NVME commands.
3040 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3041 struct lpfc_wcqe_complete *wcqe)
3043 struct lpfc_async_xchg_ctx *ctxp;
3044 struct lpfc_nvmet_tgtport *tgtp;
3046 unsigned long flags;
3047 bool released = false;
3049 ctxp = cmdwqe->context2;
3050 result = wcqe->parameter;
3052 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3053 if (ctxp->flag & LPFC_NVME_ABORT_OP)
3054 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
3056 spin_lock_irqsave(&ctxp->ctxlock, flags);
3057 ctxp->state = LPFC_NVME_STE_DONE;
3059 /* Check if we already received a free context call
3060 * and we have completed processing an abort situation.
3062 if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
3063 !(ctxp->flag & LPFC_NVME_XBUSY)) {
3064 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3065 list_del_init(&ctxp->list);
3066 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3069 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3070 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3071 atomic_inc(&tgtp->xmt_abort_rsp);
3073 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3074 "6165 ABORT cmpl: oxid x%x flg x%x (%d) "
3075 "WCQE: %08x %08x %08x %08x\n",
3076 ctxp->oxid, ctxp->flag, released,
3077 wcqe->word0, wcqe->total_data_placed,
3078 result, wcqe->word3);
3080 cmdwqe->context2 = NULL;
3081 cmdwqe->context3 = NULL;
3083 * if transport has released ctx, then can reuse it. Otherwise,
3084 * will be recycled by transport release call.
3087 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3089 /* This is the iocbq for the abort, not the command */
3090 lpfc_sli_release_iocbq(phba, cmdwqe);
3092 /* Since iaab/iaar are NOT set, there is no work left.
3093 * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted
3094 * should have been called already.
3099 * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
3100 * @phba: Pointer to HBA context object.
3101 * @cmdwqe: Pointer to driver command WQE object.
3102 * @wcqe: Pointer to driver response CQE object.
3104 * The function is called from SLI ring event handler with no
3105 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
3106 * The function frees memory resources used for the NVME commands.
3109 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3110 struct lpfc_wcqe_complete *wcqe)
3112 struct lpfc_async_xchg_ctx *ctxp;
3113 struct lpfc_nvmet_tgtport *tgtp;
3114 unsigned long flags;
3116 bool released = false;
3118 ctxp = cmdwqe->context2;
3119 result = wcqe->parameter;
3122 /* if context is clear, related io alrady complete */
3123 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3124 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
3125 wcqe->word0, wcqe->total_data_placed,
3126 result, wcqe->word3);
3130 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3131 spin_lock_irqsave(&ctxp->ctxlock, flags);
3132 if (ctxp->flag & LPFC_NVME_ABORT_OP)
3133 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
3136 if (ctxp->state != LPFC_NVME_STE_ABORT) {
3137 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3138 "6112 ABTS Wrong state:%d oxid x%x\n",
3139 ctxp->state, ctxp->oxid);
3142 /* Check if we already received a free context call
3143 * and we have completed processing an abort situation.
3145 ctxp->state = LPFC_NVME_STE_DONE;
3146 if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
3147 !(ctxp->flag & LPFC_NVME_XBUSY)) {
3148 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3149 list_del_init(&ctxp->list);
3150 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3153 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3154 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3155 atomic_inc(&tgtp->xmt_abort_rsp);
3157 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3158 "6316 ABTS cmpl oxid x%x flg x%x (%x) "
3159 "WCQE: %08x %08x %08x %08x\n",
3160 ctxp->oxid, ctxp->flag, released,
3161 wcqe->word0, wcqe->total_data_placed,
3162 result, wcqe->word3);
3164 cmdwqe->context2 = NULL;
3165 cmdwqe->context3 = NULL;
3167 * if transport has released ctx, then can reuse it. Otherwise,
3168 * will be recycled by transport release call.
3171 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3173 /* Since iaab/iaar are NOT set, there is no work left.
3174 * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted
3175 * should have been called already.
3180 * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
3181 * @phba: Pointer to HBA context object.
3182 * @cmdwqe: Pointer to driver command WQE object.
3183 * @wcqe: Pointer to driver response CQE object.
3185 * The function is called from SLI ring event handler with no
3186 * lock held. This function is the completion handler for NVME ABTS for LS cmds
3187 * The function frees memory resources used for the NVME commands.
3190 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3191 struct lpfc_wcqe_complete *wcqe)
3193 struct lpfc_async_xchg_ctx *ctxp;
3194 struct lpfc_nvmet_tgtport *tgtp;
3197 ctxp = cmdwqe->context2;
3198 result = wcqe->parameter;
3200 if (phba->nvmet_support) {
3201 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3202 atomic_inc(&tgtp->xmt_ls_abort_cmpl);
3205 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3206 "6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n",
3207 ctxp, wcqe->word0, wcqe->total_data_placed,
3208 result, wcqe->word3);
3211 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3212 "6415 NVMET LS Abort No ctx: WCQE: "
3213 "%08x %08x %08x %08x\n",
3214 wcqe->word0, wcqe->total_data_placed,
3215 result, wcqe->word3);
3217 lpfc_sli_release_iocbq(phba, cmdwqe);
3221 if (ctxp->state != LPFC_NVME_STE_LS_ABORT) {
3222 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3223 "6416 NVMET LS abort cmpl state mismatch: "
3224 "oxid x%x: %d %d\n",
3225 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3228 cmdwqe->context2 = NULL;
3229 cmdwqe->context3 = NULL;
3230 lpfc_sli_release_iocbq(phba, cmdwqe);
3235 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
3236 struct lpfc_async_xchg_ctx *ctxp,
3237 uint32_t sid, uint16_t xri)
3239 struct lpfc_nvmet_tgtport *tgtp = NULL;
3240 struct lpfc_iocbq *abts_wqeq;
3241 union lpfc_wqe128 *wqe_abts;
3242 struct lpfc_nodelist *ndlp;
3244 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3245 "6067 ABTS: sid %x xri x%x/x%x\n",
3246 sid, xri, ctxp->wqeq->sli4_xritag);
3248 if (phba->nvmet_support && phba->targetport)
3249 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3251 ndlp = lpfc_findnode_did(phba->pport, sid);
3253 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3254 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3256 atomic_inc(&tgtp->xmt_abort_rsp_error);
3257 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3258 "6134 Drop ABTS - wrong NDLP state x%x.\n",
3259 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3261 /* No failure to an ABTS request. */
3265 abts_wqeq = ctxp->wqeq;
3266 wqe_abts = &abts_wqeq->wqe;
3269 * Since we zero the whole WQE, we need to ensure we set the WQE fields
3270 * that were initialized in lpfc_sli4_nvmet_alloc.
3272 memset(wqe_abts, 0, sizeof(union lpfc_wqe));
3275 bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
3276 bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
3277 bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
3278 bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
3279 bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
3282 bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
3283 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
3284 bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
3285 abts_wqeq->sli4_xritag);
3288 bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
3289 CMD_XMIT_SEQUENCE64_WQE);
3290 bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
3291 bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
3292 bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
3295 wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
3298 bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
3299 /* Needs to be set by caller */
3300 bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
3303 bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
3304 bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
3305 LPFC_WQE_LENLOC_WORD12);
3306 bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
3307 bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
3310 bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
3311 LPFC_WQE_CQ_ID_DEFAULT);
3312 bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
3315 abts_wqeq->vport = phba->pport;
3316 abts_wqeq->context1 = ndlp;
3317 abts_wqeq->context2 = ctxp;
3318 abts_wqeq->context3 = NULL;
3319 abts_wqeq->rsvd2 = 0;
3320 /* hba_wqidx should already be setup from command we are aborting */
3321 abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
3322 abts_wqeq->iocb.ulpLe = 1;
3324 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3325 "6069 Issue ABTS to xri x%x reqtag x%x\n",
3326 xri, abts_wqeq->iotag);
3331 * lpfc_nvmet_prep_abort_wqe - set up 'abort' work queue entry.
3332 * @pwqeq: Pointer to command iocb.
3333 * @xritag: Tag that uniqely identifies the local exchange resource.
3334 * @opt: Option bits -
3335 * bit 0 = inhibit sending abts on the link
3337 * This function is called with hbalock held.
3340 lpfc_nvmet_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt)
3342 union lpfc_wqe128 *wqe = &pwqeq->wqe;
3344 /* WQEs are reused. Clear stale data and set key fields to
3345 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
3347 memset(wqe, 0, sizeof(*wqe));
3349 if (opt & INHIBIT_ABORT)
3350 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
3351 /* Abort specified xri tag, with the mask deliberately zeroed */
3352 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
3354 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
3356 /* Abort the I/O associated with this outstanding exchange ID. */
3357 wqe->abort_cmd.wqe_com.abort_tag = xritag;
3359 /* iotag for the wqe completion. */
3360 bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, pwqeq->iotag);
3362 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
3363 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
3365 bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
3366 bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
3367 bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
3371 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
3372 struct lpfc_async_xchg_ctx *ctxp,
3373 uint32_t sid, uint16_t xri)
3375 struct lpfc_nvmet_tgtport *tgtp;
3376 struct lpfc_iocbq *abts_wqeq;
3377 struct lpfc_nodelist *ndlp;
3378 unsigned long flags;
3382 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3384 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3385 ctxp->wqeq->hba_wqidx = 0;
3388 ndlp = lpfc_findnode_did(phba->pport, sid);
3390 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3391 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3392 atomic_inc(&tgtp->xmt_abort_rsp_error);
3393 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3394 "6160 Drop ABORT - wrong NDLP state x%x.\n",
3395 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3397 /* No failure to an ABTS request. */
3398 spin_lock_irqsave(&ctxp->ctxlock, flags);
3399 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3400 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3404 /* Issue ABTS for this WQE based on iotag */
3405 ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
3406 spin_lock_irqsave(&ctxp->ctxlock, flags);
3407 if (!ctxp->abort_wqeq) {
3408 atomic_inc(&tgtp->xmt_abort_rsp_error);
3409 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3410 "6161 ABORT failed: No wqeqs: "
3411 "xri: x%x\n", ctxp->oxid);
3412 /* No failure to an ABTS request. */
3413 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3414 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3417 abts_wqeq = ctxp->abort_wqeq;
3418 ctxp->state = LPFC_NVME_STE_ABORT;
3419 opt = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? INHIBIT_ABORT : 0;
3420 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3422 /* Announce entry to new IO submit field. */
3423 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3424 "6162 ABORT Request to rport DID x%06x "
3425 "for xri x%x x%x\n",
3426 ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
3428 /* If the hba is getting reset, this flag is set. It is
3429 * cleared when the reset is complete and rings reestablished.
3431 spin_lock_irqsave(&phba->hbalock, flags);
3432 /* driver queued commands are in process of being flushed */
3433 if (phba->hba_flag & HBA_IOQ_FLUSH) {
3434 spin_unlock_irqrestore(&phba->hbalock, flags);
3435 atomic_inc(&tgtp->xmt_abort_rsp_error);
3436 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3437 "6163 Driver in reset cleanup - flushing "
3438 "NVME Req now. hba_flag x%x oxid x%x\n",
3439 phba->hba_flag, ctxp->oxid);
3440 lpfc_sli_release_iocbq(phba, abts_wqeq);
3441 spin_lock_irqsave(&ctxp->ctxlock, flags);
3442 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3443 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3447 /* Outstanding abort is in progress */
3448 if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
3449 spin_unlock_irqrestore(&phba->hbalock, flags);
3450 atomic_inc(&tgtp->xmt_abort_rsp_error);
3451 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3452 "6164 Outstanding NVME I/O Abort Request "
3453 "still pending on oxid x%x\n",
3455 lpfc_sli_release_iocbq(phba, abts_wqeq);
3456 spin_lock_irqsave(&ctxp->ctxlock, flags);
3457 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3458 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3462 /* Ready - mark outstanding as aborted by driver. */
3463 abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
3465 lpfc_nvmet_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt);
3467 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
3468 abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
3469 abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
3470 abts_wqeq->iocb_cmpl = NULL;
3471 abts_wqeq->iocb_flag |= LPFC_IO_NVME;
3472 abts_wqeq->context2 = ctxp;
3473 abts_wqeq->vport = phba->pport;
3475 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3477 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3478 spin_unlock_irqrestore(&phba->hbalock, flags);
3479 if (rc == WQE_SUCCESS) {
3480 atomic_inc(&tgtp->xmt_abort_sol);
3484 atomic_inc(&tgtp->xmt_abort_rsp_error);
3485 spin_lock_irqsave(&ctxp->ctxlock, flags);
3486 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3487 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3488 lpfc_sli_release_iocbq(phba, abts_wqeq);
3489 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3490 "6166 Failed ABORT issue_wqe with status x%x "
3497 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
3498 struct lpfc_async_xchg_ctx *ctxp,
3499 uint32_t sid, uint16_t xri)
3501 struct lpfc_nvmet_tgtport *tgtp;
3502 struct lpfc_iocbq *abts_wqeq;
3503 unsigned long flags;
3504 bool released = false;
3507 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3509 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3510 ctxp->wqeq->hba_wqidx = 0;
3513 if (ctxp->state == LPFC_NVME_STE_FREE) {
3514 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3515 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
3516 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
3520 ctxp->state = LPFC_NVME_STE_ABORT;
3522 rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
3526 spin_lock_irqsave(&phba->hbalock, flags);
3527 abts_wqeq = ctxp->wqeq;
3528 abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
3529 abts_wqeq->iocb_cmpl = NULL;
3530 abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
3532 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3534 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3535 spin_unlock_irqrestore(&phba->hbalock, flags);
3536 if (rc == WQE_SUCCESS) {
3541 spin_lock_irqsave(&ctxp->ctxlock, flags);
3542 if (ctxp->flag & LPFC_NVME_CTX_RLS) {
3543 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3544 list_del_init(&ctxp->list);
3545 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3548 ctxp->flag &= ~(LPFC_NVME_ABORT_OP | LPFC_NVME_CTX_RLS);
3549 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3551 atomic_inc(&tgtp->xmt_abort_rsp_error);
3552 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3553 "6135 Failed to Issue ABTS for oxid x%x. Status x%x "
3555 ctxp->oxid, rc, released);
3557 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3562 * lpfc_nvme_unsol_ls_issue_abort - issue ABTS on an exchange received
3563 * via async frame receive where the frame is not handled.
3564 * @phba: pointer to adapter structure
3565 * @ctxp: pointer to the asynchronously received received sequence
3566 * @sid: address of the remote port to send the ABTS to
3567 * @xri: oxid value to for the ABTS (other side's exchange id).
3570 lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
3571 struct lpfc_async_xchg_ctx *ctxp,
3572 uint32_t sid, uint16_t xri)
3574 struct lpfc_nvmet_tgtport *tgtp = NULL;
3575 struct lpfc_iocbq *abts_wqeq;
3576 unsigned long flags;
3579 if ((ctxp->state == LPFC_NVME_STE_LS_RCV && ctxp->entry_cnt == 1) ||
3580 (ctxp->state == LPFC_NVME_STE_LS_RSP && ctxp->entry_cnt == 2)) {
3581 ctxp->state = LPFC_NVME_STE_LS_ABORT;
3584 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3585 "6418 NVMET LS abort state mismatch "
3587 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3588 ctxp->state = LPFC_NVME_STE_LS_ABORT;
3591 if (phba->nvmet_support && phba->targetport)
3592 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3595 /* Issue ABTS for this WQE based on iotag */
3596 ctxp->wqeq = lpfc_sli_get_iocbq(phba);
3598 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3599 "6068 Abort failed: No wqeqs: "
3601 /* No failure to an ABTS request. */
3606 abts_wqeq = ctxp->wqeq;
3608 if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
3613 spin_lock_irqsave(&phba->hbalock, flags);
3614 abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
3615 abts_wqeq->iocb_cmpl = NULL;
3616 abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
3617 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3618 spin_unlock_irqrestore(&phba->hbalock, flags);
3619 if (rc == WQE_SUCCESS) {
3621 atomic_inc(&tgtp->xmt_abort_unsol);
3626 atomic_inc(&tgtp->xmt_abort_rsp_error);
3627 abts_wqeq->context2 = NULL;
3628 abts_wqeq->context3 = NULL;
3629 lpfc_sli_release_iocbq(phba, abts_wqeq);
3630 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3631 "6056 Failed to Issue ABTS. Status x%x\n", rc);
3636 * lpfc_nvmet_invalidate_host
3638 * @phba: pointer to the driver instance bound to an adapter port.
3639 * @ndlp: pointer to an lpfc_nodelist type
3641 * This routine upcalls the nvmet transport to invalidate an NVME
3642 * host to which this target instance had active connections.
3645 lpfc_nvmet_invalidate_host(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
3648 struct lpfc_nvmet_tgtport *tgtp;
3650 lpfc_printf_log(phba, KERN_INFO,
3651 LOG_NVME | LOG_NVME_ABTS | LOG_NVME_DISC,
3652 "6203 Invalidating hosthandle x%px\n",
3655 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3656 atomic_set(&tgtp->state, LPFC_NVMET_INV_HOST_ACTIVE);
3658 spin_lock_irq(&ndlp->lock);
3659 ndlp_has_hh = ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH;
3660 spin_unlock_irq(&ndlp->lock);
3662 /* Do not invalidate any nodes that do not have a hosthandle.
3663 * The host_release callbk will cause a node reference
3664 * count imbalance and a crash.
3667 lpfc_printf_log(phba, KERN_INFO,
3668 LOG_NVME | LOG_NVME_ABTS | LOG_NVME_DISC,
3669 "6204 Skip invalidate on node x%px DID x%x\n",
3670 ndlp, ndlp->nlp_DID);
3674 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
3675 /* Need to get the nvmet_fc_target_port pointer here.*/
3676 nvmet_fc_invalidate_host(phba->targetport, ndlp);