1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channsel Host Bus Adapters. *
4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
39 #include <linux/nvme.h>
40 #include <linux/nvme-fc-driver.h>
41 #include <linux/nvme-fc.h>
43 #include "lpfc_version.h"
47 #include "lpfc_sli4.h"
49 #include "lpfc_disc.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_nvme.h"
53 #include "lpfc_nvmet.h"
54 #include "lpfc_logmsg.h"
55 #include "lpfc_crtn.h"
56 #include "lpfc_vport.h"
57 #include "lpfc_debugfs.h"
59 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
60 struct lpfc_nvmet_rcv_ctx *,
63 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
64 struct lpfc_nvmet_rcv_ctx *);
65 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
66 struct lpfc_nvmet_rcv_ctx *,
68 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
69 struct lpfc_nvmet_rcv_ctx *,
71 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
72 struct lpfc_nvmet_rcv_ctx *,
74 static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
75 struct lpfc_nvmet_rcv_ctx *);
77 static union lpfc_wqe128 lpfc_tsend_cmd_template;
78 static union lpfc_wqe128 lpfc_treceive_cmd_template;
79 static union lpfc_wqe128 lpfc_trsp_cmd_template;
81 /* Setup WQE templates for NVME IOs */
83 lpfc_nvmet_cmd_template(void)
85 union lpfc_wqe128 *wqe;
88 wqe = &lpfc_tsend_cmd_template;
89 memset(wqe, 0, sizeof(union lpfc_wqe128));
91 /* Word 0, 1, 2 - BDE is variable */
93 /* Word 3 - payload_offset_len is zero */
95 /* Word 4 - relative_offset is variable */
97 /* Word 5 - is zero */
99 /* Word 6 - ctxt_tag, xri_tag is variable */
101 /* Word 7 - wqe_ar is variable */
102 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
103 bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF);
104 bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3);
105 bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI);
106 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
108 /* Word 8 - abort_tag is variable */
110 /* Word 9 - reqtag, rcvoxid is variable */
112 /* Word 10 - wqes, xc is variable */
113 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
114 bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
115 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
116 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
117 bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
118 bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12);
120 /* Word 11 - sup, irsp, irsplen is variable */
121 bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND);
122 bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
123 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
124 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
125 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
126 bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0);
128 /* Word 12 - fcp_data_len is variable */
130 /* Word 13, 14, 15 - PBDE is zero */
132 /* TRECEIVE template */
133 wqe = &lpfc_treceive_cmd_template;
134 memset(wqe, 0, sizeof(union lpfc_wqe128));
136 /* Word 0, 1, 2 - BDE is variable */
139 wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
141 /* Word 4 - relative_offset is variable */
143 /* Word 5 - is zero */
145 /* Word 6 - ctxt_tag, xri_tag is variable */
148 bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE);
149 bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF);
150 bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3);
151 bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI);
152 bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
154 /* Word 8 - abort_tag is variable */
156 /* Word 9 - reqtag, rcvoxid is variable */
158 /* Word 10 - xc is variable */
159 bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
160 bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
161 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
162 bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
163 bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
164 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
166 /* Word 11 - pbde is variable */
167 bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE);
168 bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
169 bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0);
170 bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
171 bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
172 bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1);
174 /* Word 12 - fcp_data_len is variable */
176 /* Word 13, 14, 15 - PBDE is variable */
179 wqe = &lpfc_trsp_cmd_template;
180 memset(wqe, 0, sizeof(union lpfc_wqe128));
182 /* Word 0, 1, 2 - BDE is variable */
184 /* Word 3 - response_len is variable */
186 /* Word 4, 5 - is zero */
188 /* Word 6 - ctxt_tag, xri_tag is variable */
191 bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
192 bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED);
193 bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3);
194 bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI);
195 bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); /* wqe_ar */
197 /* Word 8 - abort_tag is variable */
199 /* Word 9 - reqtag is variable */
201 /* Word 10 wqes, xc is variable */
202 bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1);
203 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
204 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
205 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0);
206 bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE);
207 bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3);
209 /* Word 11 irsp, irsplen is variable */
210 bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP);
211 bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
212 bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0);
213 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
214 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
215 bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0);
217 /* Word 12, 13, 14, 15 - is zero */
221 lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
225 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
226 "6313 NVMET Defer ctx release xri x%x flg x%x\n",
227 ctxp->oxid, ctxp->flag);
229 spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
230 if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
231 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
235 ctxp->flag |= LPFC_NVMET_CTX_RLS;
236 list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
237 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
241 * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
242 * @phba: Pointer to HBA context object.
243 * @cmdwqe: Pointer to driver command WQE object.
244 * @wcqe: Pointer to driver response CQE object.
246 * The function is called from SLI ring event handler with no
247 * lock held. This function is the completion handler for NVME LS commands
248 * The function frees memory resources used for the NVME commands.
251 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
252 struct lpfc_wcqe_complete *wcqe)
254 struct lpfc_nvmet_tgtport *tgtp;
255 struct nvmefc_tgt_ls_req *rsp;
256 struct lpfc_nvmet_rcv_ctx *ctxp;
257 uint32_t status, result;
259 status = bf_get(lpfc_wcqe_c_status, wcqe);
260 result = wcqe->parameter;
261 ctxp = cmdwqe->context2;
263 if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) {
264 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
265 "6410 NVMET LS cmpl state mismatch IO x%x: "
267 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
270 if (!phba->targetport)
273 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
277 atomic_inc(&tgtp->xmt_ls_rsp_error);
278 if (result == IOERR_ABORT_REQUESTED)
279 atomic_inc(&tgtp->xmt_ls_rsp_aborted);
280 if (bf_get(lpfc_wcqe_c_xb, wcqe))
281 atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
283 atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
288 rsp = &ctxp->ctx.ls_req;
290 lpfc_nvmeio_data(phba, "NVMET LS CMPL: xri x%x stat x%x result x%x\n",
291 ctxp->oxid, status, result);
293 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
294 "6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
295 status, result, ctxp->oxid);
297 lpfc_nlp_put(cmdwqe->context1);
298 cmdwqe->context2 = NULL;
299 cmdwqe->context3 = NULL;
300 lpfc_sli_release_iocbq(phba, cmdwqe);
306 * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
307 * @phba: HBA buffer is associated with
308 * @ctxp: context to clean up
309 * @mp: Buffer to free
311 * Description: Frees the given DMA buffer in the appropriate way given by
312 * reposting it to its associated RQ so it can be reused.
314 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
319 lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
321 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
322 struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
323 struct lpfc_nvmet_tgtport *tgtp;
324 struct fc_frame_header *fc_hdr;
325 struct rqb_dmabuf *nvmebuf;
326 struct lpfc_nvmet_ctx_info *infop;
328 uint32_t size, oxid, sid, rc;
333 dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
336 ctxp->txrdy_phys = 0;
339 if (ctxp->state == LPFC_NVMET_STE_FREE) {
340 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
341 "6411 NVMET free, already free IO x%x: %d %d\n",
342 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
344 ctxp->state = LPFC_NVMET_STE_FREE;
346 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
347 if (phba->sli4_hba.nvmet_io_wait_cnt) {
348 list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
349 nvmebuf, struct rqb_dmabuf,
351 phba->sli4_hba.nvmet_io_wait_cnt--;
352 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
355 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
356 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
357 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
358 payload = (uint32_t *)(nvmebuf->dbuf.virt);
359 size = nvmebuf->bytes_recv;
360 sid = sli4_sid_from_fc_hdr(fc_hdr);
362 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
370 ctxp->state = LPFC_NVMET_STE_RCV;
373 ctxp->ctxbuf = ctx_buf;
374 ctxp->rqb_buffer = (void *)nvmebuf;
375 spin_lock_init(&ctxp->ctxlock);
377 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
378 if (ctxp->ts_cmd_nvme) {
379 ctxp->ts_cmd_nvme = ktime_get_ns();
380 ctxp->ts_nvme_data = 0;
381 ctxp->ts_data_wqput = 0;
382 ctxp->ts_isr_data = 0;
383 ctxp->ts_data_nvme = 0;
384 ctxp->ts_nvme_status = 0;
385 ctxp->ts_status_wqput = 0;
386 ctxp->ts_isr_status = 0;
387 ctxp->ts_status_nvme = 0;
390 atomic_inc(&tgtp->rcv_fcp_cmd_in);
392 * The calling sequence should be:
393 * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
394 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
395 * When we return from nvmet_fc_rcv_fcp_req, all relevant info
396 * the NVME command / FC header is stored.
397 * A buffer has already been reposted for this IO, so just free
400 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
403 /* Process FCP command */
405 ctxp->rqb_buffer = NULL;
406 atomic_inc(&tgtp->rcv_fcp_cmd_out);
407 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
411 /* Processing of FCP command is deferred */
412 if (rc == -EOVERFLOW) {
413 lpfc_nvmeio_data(phba,
414 "NVMET RCV BUSY: xri x%x sz %d "
417 atomic_inc(&tgtp->rcv_fcp_cmd_out);
420 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
421 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
422 "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
424 atomic_read(&tgtp->rcv_fcp_cmd_in),
425 atomic_read(&tgtp->rcv_fcp_cmd_out),
426 atomic_read(&tgtp->xmt_fcp_release));
428 lpfc_nvmet_defer_release(phba, ctxp);
429 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
430 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
433 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
436 * Use the CPU context list, from the MRQ the IO was received on
437 * (ctxp->idx), to save context structure.
439 cpu = smp_processor_id();
440 infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
441 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
442 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
443 infop->nvmet_ctx_list_cnt++;
444 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
448 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
450 lpfc_nvmet_ktime(struct lpfc_hba *phba,
451 struct lpfc_nvmet_rcv_ctx *ctxp)
453 uint64_t seg1, seg2, seg3, seg4, seg5;
454 uint64_t seg6, seg7, seg8, seg9, seg10;
457 if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
458 !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
459 !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
460 !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
461 !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
464 if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
466 if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme)
468 if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
470 if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
472 if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
474 if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
476 if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
478 if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
480 if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
482 if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
485 * Segment 1 - Time from FCP command received by MSI-X ISR
486 * to FCP command is passed to NVME Layer.
487 * Segment 2 - Time from FCP command payload handed
488 * off to NVME Layer to Driver receives a Command op
490 * Segment 3 - Time from Driver receives a Command op
491 * from NVME Layer to Command is put on WQ.
492 * Segment 4 - Time from Driver WQ put is done
493 * to MSI-X ISR for Command cmpl.
494 * Segment 5 - Time from MSI-X ISR for Command cmpl to
495 * Command cmpl is passed to NVME Layer.
496 * Segment 6 - Time from Command cmpl is passed to NVME
497 * Layer to Driver receives a RSP op from NVME Layer.
498 * Segment 7 - Time from Driver receives a RSP op from
499 * NVME Layer to WQ put is done on TRSP FCP Status.
500 * Segment 8 - Time from Driver WQ put is done on TRSP
501 * FCP Status to MSI-X ISR for TRSP cmpl.
502 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
503 * TRSP cmpl is passed to NVME Layer.
504 * Segment 10 - Time from FCP command received by
505 * MSI-X ISR to command is completed on wire.
506 * (Segments 1 thru 8) for READDATA / WRITEDATA
507 * (Segments 1 thru 4) for READDATA_RSP
509 seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
512 seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
518 seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
524 seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
530 seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
537 /* For auto rsp commands seg6 thru seg10 will be 0 */
538 if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
539 seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
545 seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
551 seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
557 seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
563 if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
565 seg10 = (ctxp->ts_isr_status -
568 if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
574 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
577 phba->ktime_seg1_total += seg1;
578 if (seg1 < phba->ktime_seg1_min)
579 phba->ktime_seg1_min = seg1;
580 else if (seg1 > phba->ktime_seg1_max)
581 phba->ktime_seg1_max = seg1;
583 phba->ktime_seg2_total += seg2;
584 if (seg2 < phba->ktime_seg2_min)
585 phba->ktime_seg2_min = seg2;
586 else if (seg2 > phba->ktime_seg2_max)
587 phba->ktime_seg2_max = seg2;
589 phba->ktime_seg3_total += seg3;
590 if (seg3 < phba->ktime_seg3_min)
591 phba->ktime_seg3_min = seg3;
592 else if (seg3 > phba->ktime_seg3_max)
593 phba->ktime_seg3_max = seg3;
595 phba->ktime_seg4_total += seg4;
596 if (seg4 < phba->ktime_seg4_min)
597 phba->ktime_seg4_min = seg4;
598 else if (seg4 > phba->ktime_seg4_max)
599 phba->ktime_seg4_max = seg4;
601 phba->ktime_seg5_total += seg5;
602 if (seg5 < phba->ktime_seg5_min)
603 phba->ktime_seg5_min = seg5;
604 else if (seg5 > phba->ktime_seg5_max)
605 phba->ktime_seg5_max = seg5;
607 phba->ktime_data_samples++;
611 phba->ktime_seg6_total += seg6;
612 if (seg6 < phba->ktime_seg6_min)
613 phba->ktime_seg6_min = seg6;
614 else if (seg6 > phba->ktime_seg6_max)
615 phba->ktime_seg6_max = seg6;
617 phba->ktime_seg7_total += seg7;
618 if (seg7 < phba->ktime_seg7_min)
619 phba->ktime_seg7_min = seg7;
620 else if (seg7 > phba->ktime_seg7_max)
621 phba->ktime_seg7_max = seg7;
623 phba->ktime_seg8_total += seg8;
624 if (seg8 < phba->ktime_seg8_min)
625 phba->ktime_seg8_min = seg8;
626 else if (seg8 > phba->ktime_seg8_max)
627 phba->ktime_seg8_max = seg8;
629 phba->ktime_seg9_total += seg9;
630 if (seg9 < phba->ktime_seg9_min)
631 phba->ktime_seg9_min = seg9;
632 else if (seg9 > phba->ktime_seg9_max)
633 phba->ktime_seg9_max = seg9;
635 phba->ktime_seg10_total += seg10;
636 if (seg10 < phba->ktime_seg10_min)
637 phba->ktime_seg10_min = seg10;
638 else if (seg10 > phba->ktime_seg10_max)
639 phba->ktime_seg10_max = seg10;
640 phba->ktime_status_samples++;
645 * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
646 * @phba: Pointer to HBA context object.
647 * @cmdwqe: Pointer to driver command WQE object.
648 * @wcqe: Pointer to driver response CQE object.
650 * The function is called from SLI ring event handler with no
651 * lock held. This function is the completion handler for NVME FCP commands
652 * The function frees memory resources used for the NVME commands.
655 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
656 struct lpfc_wcqe_complete *wcqe)
658 struct lpfc_nvmet_tgtport *tgtp;
659 struct nvmefc_tgt_fcp_req *rsp;
660 struct lpfc_nvmet_rcv_ctx *ctxp;
661 uint32_t status, result, op, start_clean, logerr;
662 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
666 ctxp = cmdwqe->context2;
667 ctxp->flag &= ~LPFC_NVMET_IO_INP;
669 rsp = &ctxp->ctx.fcp_req;
672 status = bf_get(lpfc_wcqe_c_status, wcqe);
673 result = wcqe->parameter;
675 if (phba->targetport)
676 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
680 lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
681 ctxp->oxid, op, status);
684 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
685 rsp->transferred_length = 0;
687 atomic_inc(&tgtp->xmt_fcp_rsp_error);
688 if (result == IOERR_ABORT_REQUESTED)
689 atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
692 logerr = LOG_NVME_IOERR;
694 /* pick up SLI4 exhange busy condition */
695 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
696 ctxp->flag |= LPFC_NVMET_XBUSY;
697 logerr |= LOG_NVME_ABTS;
699 atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
702 ctxp->flag &= ~LPFC_NVMET_XBUSY;
705 lpfc_printf_log(phba, KERN_INFO, logerr,
706 "6315 IO Error Cmpl xri x%x: %x/%x XBUSY:x%x\n",
707 ctxp->oxid, status, result, ctxp->flag);
710 rsp->fcp_error = NVME_SC_SUCCESS;
711 if (op == NVMET_FCOP_RSP)
712 rsp->transferred_length = rsp->rsplen;
714 rsp->transferred_length = rsp->transfer_length;
716 atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
719 if ((op == NVMET_FCOP_READDATA_RSP) ||
720 (op == NVMET_FCOP_RSP)) {
722 ctxp->state = LPFC_NVMET_STE_DONE;
725 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
726 if (ctxp->ts_cmd_nvme) {
727 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
729 cmdwqe->isr_timestamp;
732 ctxp->ts_nvme_status =
734 ctxp->ts_status_wqput =
736 ctxp->ts_isr_status =
738 ctxp->ts_status_nvme =
741 ctxp->ts_isr_status =
742 cmdwqe->isr_timestamp;
743 ctxp->ts_status_nvme =
749 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
750 if (ctxp->ts_cmd_nvme)
751 lpfc_nvmet_ktime(phba, ctxp);
753 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
756 start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
757 memset(((char *)cmdwqe) + start_clean, 0,
758 (sizeof(struct lpfc_iocbq) - start_clean));
759 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
760 if (ctxp->ts_cmd_nvme) {
761 ctxp->ts_isr_data = cmdwqe->isr_timestamp;
762 ctxp->ts_data_nvme = ktime_get_ns();
767 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
768 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
769 id = smp_processor_id();
770 if (id < LPFC_CHECK_CPU_CNT) {
772 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
773 "6704 CPU Check cmdcmpl: "
774 "cpu %d expect %d\n",
776 phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_cmpl_io[id]++;
783 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
784 struct nvmefc_tgt_ls_req *rsp)
786 struct lpfc_nvmet_rcv_ctx *ctxp =
787 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
788 struct lpfc_hba *phba = ctxp->phba;
789 struct hbq_dmabuf *nvmebuf =
790 (struct hbq_dmabuf *)ctxp->rqb_buffer;
791 struct lpfc_iocbq *nvmewqeq;
792 struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
793 struct lpfc_dmabuf dmabuf;
794 struct ulp_bde64 bpl;
797 if (phba->pport->load_flag & FC_UNLOADING)
800 if (phba->pport->load_flag & FC_UNLOADING)
803 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
804 "6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
806 if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) ||
807 (ctxp->entry_cnt != 1)) {
808 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
809 "6412 NVMET LS rsp state mismatch "
811 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
813 ctxp->state = LPFC_NVMET_STE_LS_RSP;
816 nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
818 if (nvmewqeq == NULL) {
819 atomic_inc(&nvmep->xmt_ls_drop);
820 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
821 "6150 LS Drop IO x%x: Prep\n",
823 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
824 atomic_inc(&nvmep->xmt_ls_abort);
825 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
826 ctxp->sid, ctxp->oxid);
830 /* Save numBdes for bpl2sgl */
832 nvmewqeq->hba_wqidx = 0;
833 nvmewqeq->context3 = &dmabuf;
835 bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
836 bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
837 bpl.tus.f.bdeSize = rsp->rsplen;
838 bpl.tus.f.bdeFlags = 0;
839 bpl.tus.w = le32_to_cpu(bpl.tus.w);
841 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
842 nvmewqeq->iocb_cmpl = NULL;
843 nvmewqeq->context2 = ctxp;
845 lpfc_nvmeio_data(phba, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n",
846 ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
848 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq);
849 if (rc == WQE_SUCCESS) {
851 * Okay to repost buffer here, but wait till cmpl
852 * before freeing ctxp and iocbq.
854 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
855 ctxp->rqb_buffer = 0;
856 atomic_inc(&nvmep->xmt_ls_rsp);
859 /* Give back resources */
860 atomic_inc(&nvmep->xmt_ls_drop);
861 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
862 "6151 LS Drop IO x%x: Issue %d\n",
865 lpfc_nlp_put(nvmewqeq->context1);
867 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
868 atomic_inc(&nvmep->xmt_ls_abort);
869 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
874 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
875 struct nvmefc_tgt_fcp_req *rsp)
877 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
878 struct lpfc_nvmet_rcv_ctx *ctxp =
879 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
880 struct lpfc_hba *phba = ctxp->phba;
881 struct lpfc_queue *wq;
882 struct lpfc_iocbq *nvmewqeq;
883 struct lpfc_sli_ring *pring;
884 unsigned long iflags;
887 if (phba->pport->load_flag & FC_UNLOADING) {
892 if (phba->pport->load_flag & FC_UNLOADING) {
897 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
898 if (ctxp->ts_cmd_nvme) {
899 if (rsp->op == NVMET_FCOP_RSP)
900 ctxp->ts_nvme_status = ktime_get_ns();
902 ctxp->ts_nvme_data = ktime_get_ns();
904 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
905 int id = smp_processor_id();
906 if (id < LPFC_CHECK_CPU_CNT) {
907 if (rsp->hwqid != id)
908 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
909 "6705 CPU Check OP: "
910 "cpu %d expect %d\n",
912 phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_xmt_io[id]++;
914 ctxp->cpu = id; /* Setup cpu for cmpl check */
919 if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) ||
920 (ctxp->state == LPFC_NVMET_STE_ABORT)) {
921 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
922 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
923 "6102 IO xri x%x aborted\n",
929 nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
930 if (nvmewqeq == NULL) {
931 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
932 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
933 "6152 FCP Drop IO x%x: Prep\n",
939 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
940 nvmewqeq->iocb_cmpl = NULL;
941 nvmewqeq->context2 = ctxp;
942 nvmewqeq->iocb_flag |= LPFC_IO_NVMET;
943 ctxp->wqeq->hba_wqidx = rsp->hwqid;
945 lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
946 ctxp->oxid, rsp->op, rsp->rsplen);
948 ctxp->flag |= LPFC_NVMET_IO_INP;
949 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
950 if (rc == WQE_SUCCESS) {
951 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
952 if (!ctxp->ts_cmd_nvme)
954 if (rsp->op == NVMET_FCOP_RSP)
955 ctxp->ts_status_wqput = ktime_get_ns();
957 ctxp->ts_data_wqput = ktime_get_ns();
964 * WQ was full, so queue nvmewqeq to be sent after
967 ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
968 wq = phba->sli4_hba.hdwq[rsp->hwqid].nvme_wq;
970 spin_lock_irqsave(&pring->ring_lock, iflags);
971 list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
972 wq->q_flag |= HBA_NVMET_WQFULL;
973 spin_unlock_irqrestore(&pring->ring_lock, iflags);
974 atomic_inc(&lpfc_nvmep->defer_wqfull);
978 /* Give back resources */
979 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
980 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
981 "6153 FCP Drop IO x%x: Issue: %d\n",
984 ctxp->wqeq->hba_wqidx = 0;
985 nvmewqeq->context2 = NULL;
986 nvmewqeq->context3 = NULL;
993 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
995 struct lpfc_nvmet_tgtport *tport = targetport->private;
997 /* release any threads waiting for the unreg to complete */
998 complete(&tport->tport_unreg_done);
1002 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
1003 struct nvmefc_tgt_fcp_req *req)
1005 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1006 struct lpfc_nvmet_rcv_ctx *ctxp =
1007 container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1008 struct lpfc_hba *phba = ctxp->phba;
1009 struct lpfc_queue *wq;
1010 unsigned long flags;
1012 if (phba->pport->load_flag & FC_UNLOADING)
1015 if (phba->pport->load_flag & FC_UNLOADING)
1018 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1019 "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
1020 ctxp->oxid, ctxp->flag, ctxp->state);
1022 lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
1023 ctxp->oxid, ctxp->flag, ctxp->state);
1025 atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
1027 spin_lock_irqsave(&ctxp->ctxlock, flags);
1028 ctxp->state = LPFC_NVMET_STE_ABORT;
1030 /* Since iaab/iaar are NOT set, we need to check
1031 * if the firmware is in process of aborting IO
1033 if (ctxp->flag & LPFC_NVMET_XBUSY) {
1034 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1037 ctxp->flag |= LPFC_NVMET_ABORT_OP;
1039 if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) {
1040 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1042 wq = phba->sli4_hba.hdwq[ctxp->wqeq->hba_wqidx].nvme_wq;
1043 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1044 lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
1048 /* An state of LPFC_NVMET_STE_RCV means we have just received
1049 * the NVME command and have not started processing it.
1050 * (by issuing any IO WQEs on this exchange yet)
1052 if (ctxp->state == LPFC_NVMET_STE_RCV)
1053 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1056 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1058 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1062 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
1063 struct nvmefc_tgt_fcp_req *rsp)
1065 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1066 struct lpfc_nvmet_rcv_ctx *ctxp =
1067 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1068 struct lpfc_hba *phba = ctxp->phba;
1069 unsigned long flags;
1070 bool aborting = false;
1072 if (ctxp->state != LPFC_NVMET_STE_DONE &&
1073 ctxp->state != LPFC_NVMET_STE_ABORT) {
1074 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1075 "6413 NVMET release bad state %d %d oxid x%x\n",
1076 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1079 spin_lock_irqsave(&ctxp->ctxlock, flags);
1080 if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
1081 (ctxp->flag & LPFC_NVMET_XBUSY)) {
1083 /* let the abort path do the real release */
1084 lpfc_nvmet_defer_release(phba, ctxp);
1086 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1088 lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
1089 ctxp->state, aborting);
1091 atomic_inc(&lpfc_nvmep->xmt_fcp_release);
1096 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1100 lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
1101 struct nvmefc_tgt_fcp_req *rsp)
1103 struct lpfc_nvmet_tgtport *tgtp;
1104 struct lpfc_nvmet_rcv_ctx *ctxp =
1105 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1106 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
1107 struct lpfc_hba *phba = ctxp->phba;
1109 lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
1110 ctxp->oxid, ctxp->size, smp_processor_id());
1113 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1114 "6425 Defer rcv: no buffer xri x%x: "
1116 ctxp->oxid, ctxp->flag, ctxp->state);
1120 tgtp = phba->targetport->private;
1122 atomic_inc(&tgtp->rcv_fcp_cmd_defer);
1124 /* Free the nvmebuf since a new buffer already replaced it */
1125 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1128 static struct nvmet_fc_target_template lpfc_tgttemplate = {
1129 .targetport_delete = lpfc_nvmet_targetport_delete,
1130 .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
1131 .fcp_op = lpfc_nvmet_xmt_fcp_op,
1132 .fcp_abort = lpfc_nvmet_xmt_fcp_abort,
1133 .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
1134 .defer_rcv = lpfc_nvmet_defer_rcv,
1137 .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1138 .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1139 .dma_boundary = 0xFFFFFFFF,
1141 /* optional features */
1142 .target_features = 0,
1143 /* sizes of additional private data for data structures */
1144 .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
1148 __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
1149 struct lpfc_nvmet_ctx_info *infop)
1151 struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
1152 unsigned long flags;
1154 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
1155 list_for_each_entry_safe(ctx_buf, next_ctx_buf,
1156 &infop->nvmet_ctx_list, list) {
1157 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1158 list_del_init(&ctx_buf->list);
1159 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1161 __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
1162 ctx_buf->sglq->state = SGL_FREED;
1163 ctx_buf->sglq->ndlp = NULL;
1165 spin_lock(&phba->sli4_hba.sgl_list_lock);
1166 list_add_tail(&ctx_buf->sglq->list,
1167 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1168 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1170 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1171 kfree(ctx_buf->context);
1173 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
1177 lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
1179 struct lpfc_nvmet_ctx_info *infop;
1182 /* The first context list, MRQ 0 CPU 0 */
1183 infop = phba->sli4_hba.nvmet_ctx_info;
1187 /* Cycle the the entire CPU context list for every MRQ */
1188 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
1189 for (j = 0; j < phba->sli4_hba.num_present_cpu; j++) {
1190 __lpfc_nvmet_clean_io_for_cpu(phba, infop);
1194 kfree(phba->sli4_hba.nvmet_ctx_info);
1195 phba->sli4_hba.nvmet_ctx_info = NULL;
1199 lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1201 struct lpfc_nvmet_ctxbuf *ctx_buf;
1202 struct lpfc_iocbq *nvmewqe;
1203 union lpfc_wqe128 *wqe;
1204 struct lpfc_nvmet_ctx_info *last_infop;
1205 struct lpfc_nvmet_ctx_info *infop;
1208 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1209 "6403 Allocate NVMET resources for %d XRIs\n",
1210 phba->sli4_hba.nvmet_xri_cnt);
1212 phba->sli4_hba.nvmet_ctx_info = kcalloc(
1213 phba->sli4_hba.num_present_cpu * phba->cfg_nvmet_mrq,
1214 sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
1215 if (!phba->sli4_hba.nvmet_ctx_info) {
1216 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1217 "6419 Failed allocate memory for "
1218 "nvmet context lists\n");
1223 * Assuming X CPUs in the system, and Y MRQs, allocate some
1224 * lpfc_nvmet_ctx_info structures as follows:
1226 * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
1227 * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
1229 * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
1231 * Each line represents a MRQ "silo" containing an entry for
1234 * MRQ X is initially assumed to be associated with CPU X, thus
1235 * contexts are initially distributed across all MRQs using
1236 * the MRQ index (N) as follows cpuN/mrqN. When contexts are
1237 * freed, the are freed to the MRQ silo based on the CPU number
1238 * of the IO completion. Thus a context that was allocated for MRQ A
1239 * whose IO completed on CPU B will be freed to cpuB/mrqA.
1241 infop = phba->sli4_hba.nvmet_ctx_info;
1242 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1243 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1244 INIT_LIST_HEAD(&infop->nvmet_ctx_list);
1245 spin_lock_init(&infop->nvmet_ctx_list_lock);
1246 infop->nvmet_ctx_list_cnt = 0;
1252 * Setup the next CPU context info ptr for each MRQ.
1253 * MRQ 0 will cycle thru CPUs 0 - X separately from
1254 * MRQ 1 cycling thru CPUs 0 - X, and so on.
1256 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1257 last_infop = lpfc_get_ctx_list(phba, 0, j);
1258 for (i = phba->sli4_hba.num_present_cpu - 1; i >= 0; i--) {
1259 infop = lpfc_get_ctx_list(phba, i, j);
1260 infop->nvmet_ctx_next_cpu = last_infop;
1265 /* For all nvmet xris, allocate resources needed to process a
1266 * received command on a per xri basis.
1269 for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
1270 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
1272 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1273 "6404 Ran out of memory for NVMET\n");
1277 ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
1279 if (!ctx_buf->context) {
1281 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1282 "6405 Ran out of NVMET "
1283 "context memory\n");
1286 ctx_buf->context->ctxbuf = ctx_buf;
1287 ctx_buf->context->state = LPFC_NVMET_STE_FREE;
1289 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
1290 if (!ctx_buf->iocbq) {
1291 kfree(ctx_buf->context);
1293 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1294 "6406 Ran out of NVMET iocb/WQEs\n");
1297 ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
1298 nvmewqe = ctx_buf->iocbq;
1299 wqe = &nvmewqe->wqe;
1301 /* Initialize WQE */
1302 memset(wqe, 0, sizeof(union lpfc_wqe));
1304 ctx_buf->iocbq->context1 = NULL;
1305 spin_lock(&phba->sli4_hba.sgl_list_lock);
1306 ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
1307 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1308 if (!ctx_buf->sglq) {
1309 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1310 kfree(ctx_buf->context);
1312 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1313 "6407 Ran out of NVMET XRIs\n");
1318 * Add ctx to MRQidx context list. Our initial assumption
1319 * is MRQidx will be associated with CPUidx. This association
1320 * can change on the fly.
1322 infop = lpfc_get_ctx_list(phba, idx, idx);
1323 spin_lock(&infop->nvmet_ctx_list_lock);
1324 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
1325 infop->nvmet_ctx_list_cnt++;
1326 spin_unlock(&infop->nvmet_ctx_list_lock);
1328 /* Spread ctx structures evenly across all MRQs */
1330 if (idx >= phba->cfg_nvmet_mrq)
1334 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1335 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1336 infop = lpfc_get_ctx_list(phba, i, j);
1337 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1338 "6408 TOTAL NVMET ctx for CPU %d "
1339 "MRQ %d: cnt %d nextcpu %p\n",
1340 i, j, infop->nvmet_ctx_list_cnt,
1341 infop->nvmet_ctx_next_cpu);
1348 lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1350 struct lpfc_vport *vport = phba->pport;
1351 struct lpfc_nvmet_tgtport *tgtp;
1352 struct nvmet_fc_port_info pinfo;
1355 if (phba->targetport)
1358 error = lpfc_nvmet_setup_io_context(phba);
1362 memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
1363 pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
1364 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
1365 pinfo.port_id = vport->fc_myDID;
1367 /* We need to tell the transport layer + 1 because it takes page
1368 * alignment into account. When space for the SGL is allocated we
1369 * allocate + 3, one for cmd, one for rsp and one for this alignment
1371 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
1372 lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue;
1373 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
1375 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1376 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
1383 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1384 "6025 Cannot register NVME targetport x%x: "
1385 "portnm %llx nodenm %llx segs %d qs %d\n",
1387 pinfo.port_name, pinfo.node_name,
1388 lpfc_tgttemplate.max_sgl_segments,
1389 lpfc_tgttemplate.max_hw_queues);
1390 phba->targetport = NULL;
1391 phba->nvmet_support = 0;
1393 lpfc_nvmet_cleanup_io_context(phba);
1396 tgtp = (struct lpfc_nvmet_tgtport *)
1397 phba->targetport->private;
1400 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1401 "6026 Registered NVME "
1402 "targetport: %p, private %p "
1403 "portnm %llx nodenm %llx segs %d qs %d\n",
1404 phba->targetport, tgtp,
1405 pinfo.port_name, pinfo.node_name,
1406 lpfc_tgttemplate.max_sgl_segments,
1407 lpfc_tgttemplate.max_hw_queues);
1409 atomic_set(&tgtp->rcv_ls_req_in, 0);
1410 atomic_set(&tgtp->rcv_ls_req_out, 0);
1411 atomic_set(&tgtp->rcv_ls_req_drop, 0);
1412 atomic_set(&tgtp->xmt_ls_abort, 0);
1413 atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
1414 atomic_set(&tgtp->xmt_ls_rsp, 0);
1415 atomic_set(&tgtp->xmt_ls_drop, 0);
1416 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
1417 atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
1418 atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
1419 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
1420 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1421 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1422 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
1423 atomic_set(&tgtp->xmt_fcp_drop, 0);
1424 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1425 atomic_set(&tgtp->xmt_fcp_read, 0);
1426 atomic_set(&tgtp->xmt_fcp_write, 0);
1427 atomic_set(&tgtp->xmt_fcp_rsp, 0);
1428 atomic_set(&tgtp->xmt_fcp_release, 0);
1429 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1430 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1431 atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
1432 atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
1433 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1434 atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
1435 atomic_set(&tgtp->xmt_fcp_abort, 0);
1436 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1437 atomic_set(&tgtp->xmt_abort_unsol, 0);
1438 atomic_set(&tgtp->xmt_abort_sol, 0);
1439 atomic_set(&tgtp->xmt_abort_rsp, 0);
1440 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
1441 atomic_set(&tgtp->defer_ctx, 0);
1442 atomic_set(&tgtp->defer_fod, 0);
1443 atomic_set(&tgtp->defer_wqfull, 0);
1449 lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
1451 struct lpfc_vport *vport = phba->pport;
1453 if (!phba->targetport)
1456 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1457 "6007 Update NVMET port %p did x%x\n",
1458 phba->targetport, vport->fc_myDID);
1460 phba->targetport->port_id = vport->fc_myDID;
1465 * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
1466 * @phba: pointer to lpfc hba data structure.
1467 * @axri: pointer to the nvmet xri abort wcqe structure.
1469 * This routine is invoked by the worker thread to process a SLI4 fast-path
1470 * NVMET aborted xri.
1473 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1474 struct sli4_wcqe_xri_aborted *axri)
1476 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
1477 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1478 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1479 struct lpfc_nvmet_tgtport *tgtp;
1480 struct lpfc_nodelist *ndlp;
1481 unsigned long iflag = 0;
1483 bool released = false;
1485 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1486 "6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
1488 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1491 if (phba->targetport) {
1492 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1493 atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
1496 spin_lock_irqsave(&phba->hbalock, iflag);
1497 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1498 list_for_each_entry_safe(ctxp, next_ctxp,
1499 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1501 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1504 /* Check if we already received a free context call
1505 * and we have completed processing an abort situation.
1507 if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
1508 !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
1509 list_del(&ctxp->list);
1512 ctxp->flag &= ~LPFC_NVMET_XBUSY;
1513 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1515 rrq_empty = list_empty(&phba->active_rrq_list);
1516 spin_unlock_irqrestore(&phba->hbalock, iflag);
1517 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1518 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1519 (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
1520 ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
1521 lpfc_set_rrq_active(phba, ndlp,
1522 ctxp->ctxbuf->sglq->sli4_lxritag,
1524 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
1527 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1528 "6318 XB aborted oxid %x flg x%x (%x)\n",
1529 ctxp->oxid, ctxp->flag, released);
1531 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1534 lpfc_worker_wake_up(phba);
1537 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1538 spin_unlock_irqrestore(&phba->hbalock, iflag);
1542 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
1543 struct fc_frame_header *fc_hdr)
1546 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1547 struct lpfc_hba *phba = vport->phba;
1548 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1549 struct nvmefc_tgt_fcp_req *rsp;
1551 unsigned long iflag = 0;
1553 xri = be16_to_cpu(fc_hdr->fh_ox_id);
1555 spin_lock_irqsave(&phba->hbalock, iflag);
1556 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1557 list_for_each_entry_safe(ctxp, next_ctxp,
1558 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1560 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1563 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1564 spin_unlock_irqrestore(&phba->hbalock, iflag);
1566 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1567 ctxp->flag |= LPFC_NVMET_ABTS_RCV;
1568 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1570 lpfc_nvmeio_data(phba,
1571 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1572 xri, smp_processor_id(), 0);
1574 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1575 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
1577 rsp = &ctxp->ctx.fcp_req;
1578 nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
1580 /* Respond with BA_ACC accordingly */
1581 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1584 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1585 spin_unlock_irqrestore(&phba->hbalock, iflag);
1587 lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1588 xri, smp_processor_id(), 1);
1590 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1591 "6320 NVMET Rcv ABTS:rjt xri x%x\n", xri);
1593 /* Respond with BA_RJT accordingly */
1594 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
1600 lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
1601 struct lpfc_nvmet_rcv_ctx *ctxp)
1603 struct lpfc_sli_ring *pring;
1604 struct lpfc_iocbq *nvmewqeq;
1605 struct lpfc_iocbq *next_nvmewqeq;
1606 unsigned long iflags;
1607 struct lpfc_wcqe_complete wcqe;
1608 struct lpfc_wcqe_complete *wcqep;
1613 /* Fake an ABORT error code back to cmpl routine */
1614 memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete));
1615 bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT);
1616 wcqep->parameter = IOERR_ABORT_REQUESTED;
1618 spin_lock_irqsave(&pring->ring_lock, iflags);
1619 list_for_each_entry_safe(nvmewqeq, next_nvmewqeq,
1620 &wq->wqfull_list, list) {
1622 /* Checking for a specific IO to flush */
1623 if (nvmewqeq->context2 == ctxp) {
1624 list_del(&nvmewqeq->list);
1625 spin_unlock_irqrestore(&pring->ring_lock,
1627 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
1634 list_del(&nvmewqeq->list);
1635 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1636 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep);
1637 spin_lock_irqsave(&pring->ring_lock, iflags);
1641 wq->q_flag &= ~HBA_NVMET_WQFULL;
1642 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1646 lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
1647 struct lpfc_queue *wq)
1649 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1650 struct lpfc_sli_ring *pring;
1651 struct lpfc_iocbq *nvmewqeq;
1652 unsigned long iflags;
1656 * Some WQE slots are available, so try to re-issue anything
1657 * on the WQ wqfull_list.
1660 spin_lock_irqsave(&pring->ring_lock, iflags);
1661 while (!list_empty(&wq->wqfull_list)) {
1662 list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
1664 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1665 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
1666 spin_lock_irqsave(&pring->ring_lock, iflags);
1668 /* WQ was full again, so put it back on the list */
1669 list_add(&nvmewqeq->list, &wq->wqfull_list);
1670 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1674 wq->q_flag &= ~HBA_NVMET_WQFULL;
1675 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1681 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
1683 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1684 struct lpfc_nvmet_tgtport *tgtp;
1685 struct lpfc_queue *wq;
1688 if (phba->nvmet_support == 0)
1690 if (phba->targetport) {
1691 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1692 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
1693 wq = phba->sli4_hba.hdwq[qidx].nvme_wq;
1694 lpfc_nvmet_wqfull_flush(phba, wq, NULL);
1696 init_completion(&tgtp->tport_unreg_done);
1697 nvmet_fc_unregister_targetport(phba->targetport);
1698 wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
1699 lpfc_nvmet_cleanup_io_context(phba);
1701 phba->targetport = NULL;
1706 * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
1707 * @phba: pointer to lpfc hba data structure.
1708 * @pring: pointer to a SLI ring.
1709 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1711 * This routine is used for processing the WQE associated with a unsolicited
1712 * event. It first determines whether there is an existing ndlp that matches
1713 * the DID from the unsolicited WQE. If not, it will create a new one with
1714 * the DID from the unsolicited WQE. The ELS command from the unsolicited
1715 * WQE is then used to invoke the proper routine and to set up proper state
1716 * of the discovery state machine.
1719 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1720 struct hbq_dmabuf *nvmebuf)
1722 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1723 struct lpfc_nvmet_tgtport *tgtp;
1724 struct fc_frame_header *fc_hdr;
1725 struct lpfc_nvmet_rcv_ctx *ctxp;
1727 uint32_t size, oxid, sid, rc;
1729 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1730 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1732 if (!phba->targetport) {
1733 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1734 "6154 LS Drop IO x%x\n", oxid);
1742 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1743 payload = (uint32_t *)(nvmebuf->dbuf.virt);
1744 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
1745 sid = sli4_sid_from_fc_hdr(fc_hdr);
1747 ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
1749 atomic_inc(&tgtp->rcv_ls_req_drop);
1750 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1751 "6155 LS Drop IO x%x: Alloc\n",
1754 lpfc_nvmeio_data(phba, "NVMET LS DROP: "
1755 "xri x%x sz %d from %06x\n",
1757 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1765 ctxp->state = LPFC_NVMET_STE_LS_RCV;
1766 ctxp->entry_cnt = 1;
1767 ctxp->rqb_buffer = (void *)nvmebuf;
1769 lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n",
1772 * The calling sequence should be:
1773 * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
1774 * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
1776 atomic_inc(&tgtp->rcv_ls_req_in);
1777 rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
1780 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1781 "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
1782 "%08x %08x %08x\n", size, rc,
1783 *payload, *(payload+1), *(payload+2),
1784 *(payload+3), *(payload+4), *(payload+5));
1787 atomic_inc(&tgtp->rcv_ls_req_out);
1791 lpfc_nvmeio_data(phba, "NVMET LS DROP: xri x%x sz %d from %06x\n",
1794 atomic_inc(&tgtp->rcv_ls_req_drop);
1795 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1796 "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
1799 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
1800 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1802 atomic_inc(&tgtp->xmt_ls_abort);
1803 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
1807 static struct lpfc_nvmet_ctxbuf *
1808 lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
1809 struct lpfc_nvmet_ctx_info *current_infop)
1811 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1812 struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
1813 struct lpfc_nvmet_ctx_info *get_infop;
1817 * The current_infop for the MRQ a NVME command IU was received
1818 * on is empty. Our goal is to replenish this MRQs context
1819 * list from a another CPUs.
1821 * First we need to pick a context list to start looking on.
1822 * nvmet_ctx_start_cpu has available context the last time
1823 * we needed to replenish this CPU where nvmet_ctx_next_cpu
1824 * is just the next sequential CPU for this MRQ.
1826 if (current_infop->nvmet_ctx_start_cpu)
1827 get_infop = current_infop->nvmet_ctx_start_cpu;
1829 get_infop = current_infop->nvmet_ctx_next_cpu;
1831 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1832 if (get_infop == current_infop) {
1833 get_infop = get_infop->nvmet_ctx_next_cpu;
1836 spin_lock(&get_infop->nvmet_ctx_list_lock);
1838 /* Just take the entire context list, if there are any */
1839 if (get_infop->nvmet_ctx_list_cnt) {
1840 list_splice_init(&get_infop->nvmet_ctx_list,
1841 ¤t_infop->nvmet_ctx_list);
1842 current_infop->nvmet_ctx_list_cnt =
1843 get_infop->nvmet_ctx_list_cnt - 1;
1844 get_infop->nvmet_ctx_list_cnt = 0;
1845 spin_unlock(&get_infop->nvmet_ctx_list_lock);
1847 current_infop->nvmet_ctx_start_cpu = get_infop;
1848 list_remove_head(¤t_infop->nvmet_ctx_list,
1849 ctx_buf, struct lpfc_nvmet_ctxbuf,
1854 /* Otherwise, move on to the next CPU for this MRQ */
1855 spin_unlock(&get_infop->nvmet_ctx_list_lock);
1856 get_infop = get_infop->nvmet_ctx_next_cpu;
1860 /* Nothing found, all contexts for the MRQ are in-flight */
1865 * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
1866 * @phba: pointer to lpfc hba data structure.
1867 * @idx: relative index of MRQ vector
1868 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1870 * This routine is used for processing the WQE associated with a unsolicited
1871 * event. It first determines whether there is an existing ndlp that matches
1872 * the DID from the unsolicited WQE. If not, it will create a new one with
1873 * the DID from the unsolicited WQE. The ELS command from the unsolicited
1874 * WQE is then used to invoke the proper routine and to set up proper state
1875 * of the discovery state machine.
1878 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1880 struct rqb_dmabuf *nvmebuf,
1881 uint64_t isr_timestamp)
1883 struct lpfc_nvmet_rcv_ctx *ctxp;
1884 struct lpfc_nvmet_tgtport *tgtp;
1885 struct fc_frame_header *fc_hdr;
1886 struct lpfc_nvmet_ctxbuf *ctx_buf;
1887 struct lpfc_nvmet_ctx_info *current_infop;
1889 uint32_t size, oxid, sid, rc, qno;
1890 unsigned long iflag;
1893 if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
1897 if (!nvmebuf || !phba->targetport) {
1898 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1899 "6157 NVMET FCP Drop IO\n");
1908 * Get a pointer to the context list for this MRQ based on
1909 * the CPU this MRQ IRQ is associated with. If the CPU association
1910 * changes from our initial assumption, the context list could
1911 * be empty, thus it would need to be replenished with the
1912 * context list from another CPU for this MRQ.
1914 current_cpu = smp_processor_id();
1915 current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
1916 spin_lock_irqsave(¤t_infop->nvmet_ctx_list_lock, iflag);
1917 if (current_infop->nvmet_ctx_list_cnt) {
1918 list_remove_head(¤t_infop->nvmet_ctx_list,
1919 ctx_buf, struct lpfc_nvmet_ctxbuf, list);
1920 current_infop->nvmet_ctx_list_cnt--;
1922 ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
1924 spin_unlock_irqrestore(¤t_infop->nvmet_ctx_list_lock, iflag);
1926 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1927 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1928 size = nvmebuf->bytes_recv;
1930 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1931 if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
1932 if (current_cpu < LPFC_CHECK_CPU_CNT) {
1933 if (idx != current_cpu)
1934 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1935 "6703 CPU Check rcv: "
1936 "cpu %d expect %d\n",
1938 phba->sli4_hba.hdwq[idx].cpucheck_rcv_io[current_cpu]++;
1943 lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
1944 oxid, size, smp_processor_id());
1946 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1949 /* Queue this NVME IO to process later */
1950 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
1951 list_add_tail(&nvmebuf->hbuf.list,
1952 &phba->sli4_hba.lpfc_nvmet_io_wait_list);
1953 phba->sli4_hba.nvmet_io_wait_cnt++;
1954 phba->sli4_hba.nvmet_io_wait_total++;
1955 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
1958 /* Post a brand new DMA buffer to RQ */
1960 lpfc_post_rq_buffer(
1961 phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
1962 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
1964 atomic_inc(&tgtp->defer_ctx);
1968 payload = (uint32_t *)(nvmebuf->dbuf.virt);
1969 sid = sli4_sid_from_fc_hdr(fc_hdr);
1971 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
1972 if (ctxp->state != LPFC_NVMET_STE_FREE) {
1973 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1974 "6414 NVMET Context corrupt %d %d oxid x%x\n",
1975 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1985 ctxp->state = LPFC_NVMET_STE_RCV;
1986 ctxp->entry_cnt = 1;
1988 ctxp->ctxbuf = ctx_buf;
1989 ctxp->rqb_buffer = (void *)nvmebuf;
1990 spin_lock_init(&ctxp->ctxlock);
1992 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1993 if (isr_timestamp) {
1994 ctxp->ts_isr_cmd = isr_timestamp;
1995 ctxp->ts_cmd_nvme = ktime_get_ns();
1996 ctxp->ts_nvme_data = 0;
1997 ctxp->ts_data_wqput = 0;
1998 ctxp->ts_isr_data = 0;
1999 ctxp->ts_data_nvme = 0;
2000 ctxp->ts_nvme_status = 0;
2001 ctxp->ts_status_wqput = 0;
2002 ctxp->ts_isr_status = 0;
2003 ctxp->ts_status_nvme = 0;
2005 ctxp->ts_cmd_nvme = 0;
2009 atomic_inc(&tgtp->rcv_fcp_cmd_in);
2011 * The calling sequence should be:
2012 * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
2013 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
2014 * When we return from nvmet_fc_rcv_fcp_req, all relevant info in
2015 * the NVME command / FC header is stored, so we are free to repost
2018 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
2021 /* Process FCP command */
2023 ctxp->rqb_buffer = NULL;
2024 atomic_inc(&tgtp->rcv_fcp_cmd_out);
2025 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2029 /* Processing of FCP command is deferred */
2030 if (rc == -EOVERFLOW) {
2032 * Post a brand new DMA buffer to RQ and defer
2033 * freeing rcv buffer till .defer_rcv callback
2036 lpfc_post_rq_buffer(
2037 phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2038 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2040 lpfc_nvmeio_data(phba,
2041 "NVMET RCV BUSY: xri x%x sz %d from %06x\n",
2043 atomic_inc(&tgtp->rcv_fcp_cmd_out);
2044 atomic_inc(&tgtp->defer_fod);
2047 ctxp->rqb_buffer = nvmebuf;
2049 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2050 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2051 "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
2053 atomic_read(&tgtp->rcv_fcp_cmd_in),
2054 atomic_read(&tgtp->rcv_fcp_cmd_out),
2055 atomic_read(&tgtp->xmt_fcp_release));
2057 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
2060 lpfc_nvmet_defer_release(phba, ctxp);
2061 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
2062 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2067 lpfc_nvmet_ctxbuf_post(phba, ctx_buf);
2070 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2074 * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
2075 * @phba: pointer to lpfc hba data structure.
2076 * @pring: pointer to a SLI ring.
2077 * @nvmebuf: pointer to received nvme data structure.
2079 * This routine is used to process an unsolicited event received from a SLI
2080 * (Service Level Interface) ring. The actual processing of the data buffer
2081 * associated with the unsolicited event is done by invoking the routine
2082 * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
2083 * SLI RQ on which the unsolicited event was received.
2086 lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2087 struct lpfc_iocbq *piocb)
2089 struct lpfc_dmabuf *d_buf;
2090 struct hbq_dmabuf *nvmebuf;
2092 d_buf = piocb->context2;
2093 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2095 if (phba->nvmet_support == 0) {
2096 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
2099 lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
2103 * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
2104 * @phba: pointer to lpfc hba data structure.
2105 * @idx: relative index of MRQ vector
2106 * @nvmebuf: pointer to received nvme data structure.
2108 * This routine is used to process an unsolicited event received from a SLI
2109 * (Service Level Interface) ring. The actual processing of the data buffer
2110 * associated with the unsolicited event is done by invoking the routine
2111 * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
2112 * SLI RQ on which the unsolicited event was received.
2115 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
2117 struct rqb_dmabuf *nvmebuf,
2118 uint64_t isr_timestamp)
2120 if (phba->nvmet_support == 0) {
2121 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2124 lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf,
2129 * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
2130 * @phba: pointer to a host N_Port data structure.
2131 * @ctxp: Context info for NVME LS Request
2132 * @rspbuf: DMA buffer of NVME command.
2133 * @rspsize: size of the NVME command.
2135 * This routine is used for allocating a lpfc-WQE data structure from
2136 * the driver lpfc-WQE free-list and prepare the WQE with the parameters
2137 * passed into the routine for discovery state machine to issue an Extended
2138 * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
2139 * and preparation routine that is used by all the discovery state machine
2140 * routines and the NVME command-specific fields will be later set up by
2141 * the individual discovery machine routines after calling this routine
2142 * allocating and preparing a generic WQE data structure. It fills in the
2143 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
2144 * payload and response payload (if expected). The reference count on the
2145 * ndlp is incremented by 1 and the reference to the ndlp is put into
2146 * context1 of the WQE data structure for this WQE to hold the ndlp
2147 * reference for the command's callback function to access later.
2150 * Pointer to the newly allocated/prepared nvme wqe data structure
2151 * NULL - when nvme wqe data structure allocation/preparation failed
2153 static struct lpfc_iocbq *
2154 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
2155 struct lpfc_nvmet_rcv_ctx *ctxp,
2156 dma_addr_t rspbuf, uint16_t rspsize)
2158 struct lpfc_nodelist *ndlp;
2159 struct lpfc_iocbq *nvmewqe;
2160 union lpfc_wqe128 *wqe;
2162 if (!lpfc_is_link_up(phba)) {
2163 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2164 "6104 NVMET prep LS wqe: link err: "
2165 "NPORT x%x oxid:x%x ste %d\n",
2166 ctxp->sid, ctxp->oxid, ctxp->state);
2170 /* Allocate buffer for command wqe */
2171 nvmewqe = lpfc_sli_get_iocbq(phba);
2172 if (nvmewqe == NULL) {
2173 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2174 "6105 NVMET prep LS wqe: No WQE: "
2175 "NPORT x%x oxid x%x ste %d\n",
2176 ctxp->sid, ctxp->oxid, ctxp->state);
2180 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2181 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2182 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2183 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2184 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2185 "6106 NVMET prep LS wqe: No ndlp: "
2186 "NPORT x%x oxid x%x ste %d\n",
2187 ctxp->sid, ctxp->oxid, ctxp->state);
2188 goto nvme_wqe_free_wqeq_exit;
2190 ctxp->wqeq = nvmewqe;
2192 /* prevent preparing wqe with NULL ndlp reference */
2193 nvmewqe->context1 = lpfc_nlp_get(ndlp);
2194 if (nvmewqe->context1 == NULL)
2195 goto nvme_wqe_free_wqeq_exit;
2196 nvmewqe->context2 = ctxp;
2198 wqe = &nvmewqe->wqe;
2199 memset(wqe, 0, sizeof(union lpfc_wqe));
2202 wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2203 wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
2204 wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
2205 wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
2212 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
2213 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
2214 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
2215 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
2216 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
2219 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
2220 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2221 bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
2224 bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
2225 CMD_XMIT_SEQUENCE64_WQE);
2226 bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
2227 bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
2228 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
2231 wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
2234 bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
2235 /* Needs to be set by caller */
2236 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
2239 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
2240 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2241 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
2242 LPFC_WQE_LENLOC_WORD12);
2243 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
2246 bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
2247 LPFC_WQE_CQ_ID_DEFAULT);
2248 bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
2252 wqe->xmit_sequence.xmit_len = rspsize;
2255 nvmewqe->vport = phba->pport;
2256 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2257 nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
2259 /* Xmit NVMET response to remote NPORT <did> */
2260 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2261 "6039 Xmit NVMET LS response to remote "
2262 "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
2263 ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
2267 nvme_wqe_free_wqeq_exit:
2268 nvmewqe->context2 = NULL;
2269 nvmewqe->context3 = NULL;
2270 lpfc_sli_release_iocbq(phba, nvmewqe);
2275 static struct lpfc_iocbq *
2276 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
2277 struct lpfc_nvmet_rcv_ctx *ctxp)
2279 struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
2280 struct lpfc_nvmet_tgtport *tgtp;
2281 struct sli4_sge *sgl;
2282 struct lpfc_nodelist *ndlp;
2283 struct lpfc_iocbq *nvmewqe;
2284 struct scatterlist *sgel;
2285 union lpfc_wqe128 *wqe;
2286 struct ulp_bde64 *bde;
2288 dma_addr_t physaddr;
2293 if (!lpfc_is_link_up(phba)) {
2294 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2295 "6107 NVMET prep FCP wqe: link err:"
2296 "NPORT x%x oxid x%x ste %d\n",
2297 ctxp->sid, ctxp->oxid, ctxp->state);
2301 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2302 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2303 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2304 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2305 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2306 "6108 NVMET prep FCP wqe: no ndlp: "
2307 "NPORT x%x oxid x%x ste %d\n",
2308 ctxp->sid, ctxp->oxid, ctxp->state);
2312 if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
2313 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2314 "6109 NVMET prep FCP wqe: seg cnt err: "
2315 "NPORT x%x oxid x%x ste %d cnt %d\n",
2316 ctxp->sid, ctxp->oxid, ctxp->state,
2317 phba->cfg_nvme_seg_cnt);
2321 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2322 nvmewqe = ctxp->wqeq;
2323 if (nvmewqe == NULL) {
2324 /* Allocate buffer for command wqe */
2325 nvmewqe = ctxp->ctxbuf->iocbq;
2326 if (nvmewqe == NULL) {
2327 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2328 "6110 NVMET prep FCP wqe: No "
2329 "WQE: NPORT x%x oxid x%x ste %d\n",
2330 ctxp->sid, ctxp->oxid, ctxp->state);
2333 ctxp->wqeq = nvmewqe;
2334 xc = 0; /* create new XRI */
2335 nvmewqe->sli4_lxritag = NO_XRI;
2336 nvmewqe->sli4_xritag = NO_XRI;
2340 if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
2341 (ctxp->entry_cnt == 1)) ||
2342 (ctxp->state == LPFC_NVMET_STE_DATA)) {
2343 wqe = &nvmewqe->wqe;
2345 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2346 "6111 Wrong state NVMET FCP: %d cnt %d\n",
2347 ctxp->state, ctxp->entry_cnt);
2351 sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
2353 case NVMET_FCOP_READDATA:
2354 case NVMET_FCOP_READDATA_RSP:
2355 /* From the tsend template, initialize words 7 - 11 */
2356 memcpy(&wqe->words[7],
2357 &lpfc_tsend_cmd_template.words[7],
2358 sizeof(uint32_t) * 5);
2360 /* Words 0 - 2 : The first sg segment */
2362 physaddr = sg_dma_address(sgel);
2363 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2364 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
2365 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
2366 wqe->fcp_tsend.bde.addrHigh =
2367 cpu_to_le32(putPaddrHigh(physaddr));
2370 wqe->fcp_tsend.payload_offset_len = 0;
2373 wqe->fcp_tsend.relative_offset = ctxp->offset;
2376 wqe->fcp_tsend.reserved = 0;
2379 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
2380 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2381 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
2382 nvmewqe->sli4_xritag);
2384 /* Word 7 - set ar later */
2387 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
2390 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
2391 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
2393 /* Word 10 - set wqes later, in template xc=1 */
2395 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0);
2397 /* Word 11 - set sup, irsp, irsplen later */
2401 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2403 /* Setup 2 SKIP SGEs */
2407 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2408 sgl->word2 = cpu_to_le32(sgl->word2);
2414 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2415 sgl->word2 = cpu_to_le32(sgl->word2);
2418 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
2419 atomic_inc(&tgtp->xmt_fcp_read_rsp);
2421 /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2423 if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
2424 if (ndlp->nlp_flag & NLP_SUPPRESS_RSP)
2426 &wqe->fcp_tsend.wqe_com, 1);
2428 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
2429 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
2430 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
2431 ((rsp->rsplen >> 2) - 1));
2432 memcpy(&wqe->words[16], rsp->rspaddr,
2436 atomic_inc(&tgtp->xmt_fcp_read);
2438 /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2439 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
2443 case NVMET_FCOP_WRITEDATA:
2444 /* From the treceive template, initialize words 3 - 11 */
2445 memcpy(&wqe->words[3],
2446 &lpfc_treceive_cmd_template.words[3],
2447 sizeof(uint32_t) * 9);
2449 /* Words 0 - 2 : The first sg segment */
2450 txrdy = dma_pool_alloc(phba->txrdy_payload_pool,
2451 GFP_KERNEL, &physaddr);
2453 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2454 "6041 Bad txrdy buffer: oxid x%x\n",
2458 ctxp->txrdy = txrdy;
2459 ctxp->txrdy_phys = physaddr;
2460 wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2461 wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
2462 wqe->fcp_treceive.bde.addrLow =
2463 cpu_to_le32(putPaddrLow(physaddr));
2464 wqe->fcp_treceive.bde.addrHigh =
2465 cpu_to_le32(putPaddrHigh(physaddr));
2468 wqe->fcp_treceive.relative_offset = ctxp->offset;
2471 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
2472 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2473 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
2474 nvmewqe->sli4_xritag);
2479 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
2482 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
2483 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
2485 /* Word 10 - in template xc=1 */
2487 bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
2489 /* Word 11 - set pbde later */
2490 if (phba->cfg_enable_pbde) {
2493 bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
2498 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2500 /* Setup 1 TXRDY and 1 SKIP SGE */
2502 txrdy[1] = cpu_to_be32(rsp->transfer_length);
2505 sgl->addr_hi = putPaddrHigh(physaddr);
2506 sgl->addr_lo = putPaddrLow(physaddr);
2508 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2509 sgl->word2 = cpu_to_le32(sgl->word2);
2510 sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
2515 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2516 sgl->word2 = cpu_to_le32(sgl->word2);
2519 atomic_inc(&tgtp->xmt_fcp_write);
2522 case NVMET_FCOP_RSP:
2523 /* From the treceive template, initialize words 4 - 11 */
2524 memcpy(&wqe->words[4],
2525 &lpfc_trsp_cmd_template.words[4],
2526 sizeof(uint32_t) * 8);
2529 physaddr = rsp->rspdma;
2530 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2531 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
2532 wqe->fcp_trsp.bde.addrLow =
2533 cpu_to_le32(putPaddrLow(physaddr));
2534 wqe->fcp_trsp.bde.addrHigh =
2535 cpu_to_le32(putPaddrHigh(physaddr));
2538 wqe->fcp_trsp.response_len = rsp->rsplen;
2541 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
2542 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2543 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
2544 nvmewqe->sli4_xritag);
2549 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
2552 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
2553 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
2557 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1);
2560 /* In template wqes=0 irsp=0 irsplen=0 - good response */
2561 if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) {
2562 /* Bad response - embed it */
2563 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
2564 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
2565 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
2566 ((rsp->rsplen >> 2) - 1));
2567 memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
2572 wqe->fcp_trsp.rsvd_12_15[0] = 0;
2574 /* Use rspbuf, NOT sg list */
2577 atomic_inc(&tgtp->xmt_fcp_rsp);
2581 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2582 "6064 Unknown Rsp Op %d\n",
2588 nvmewqe->vport = phba->pport;
2589 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2590 nvmewqe->context1 = ndlp;
2592 for (i = 0; i < rsp->sg_cnt; i++) {
2594 physaddr = sg_dma_address(sgel);
2595 cnt = sg_dma_len(sgel);
2596 sgl->addr_hi = putPaddrHigh(physaddr);
2597 sgl->addr_lo = putPaddrLow(physaddr);
2599 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2600 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
2601 if ((i+1) == rsp->sg_cnt)
2602 bf_set(lpfc_sli4_sge_last, sgl, 1);
2603 sgl->word2 = cpu_to_le32(sgl->word2);
2604 sgl->sge_len = cpu_to_le32(cnt);
2606 bde = (struct ulp_bde64 *)&wqe->words[13];
2608 /* Words 13-15 (PBDE) */
2609 bde->addrLow = sgl->addr_lo;
2610 bde->addrHigh = sgl->addr_hi;
2611 bde->tus.f.bdeSize =
2612 le32_to_cpu(sgl->sge_len);
2613 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2614 bde->tus.w = cpu_to_le32(bde->tus.w);
2616 memset(bde, 0, sizeof(struct ulp_bde64));
2620 ctxp->offset += cnt;
2622 ctxp->state = LPFC_NVMET_STE_DATA;
2628 * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
2629 * @phba: Pointer to HBA context object.
2630 * @cmdwqe: Pointer to driver command WQE object.
2631 * @wcqe: Pointer to driver response CQE object.
2633 * The function is called from SLI ring event handler with no
2634 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2635 * The function frees memory resources used for the NVME commands.
2638 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2639 struct lpfc_wcqe_complete *wcqe)
2641 struct lpfc_nvmet_rcv_ctx *ctxp;
2642 struct lpfc_nvmet_tgtport *tgtp;
2643 uint32_t status, result;
2644 unsigned long flags;
2645 bool released = false;
2647 ctxp = cmdwqe->context2;
2648 status = bf_get(lpfc_wcqe_c_status, wcqe);
2649 result = wcqe->parameter;
2651 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2652 if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2653 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2655 ctxp->state = LPFC_NVMET_STE_DONE;
2657 /* Check if we already received a free context call
2658 * and we have completed processing an abort situation.
2660 spin_lock_irqsave(&ctxp->ctxlock, flags);
2661 if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2662 !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2663 list_del(&ctxp->list);
2666 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2667 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2668 atomic_inc(&tgtp->xmt_abort_rsp);
2670 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2671 "6165 ABORT cmpl: xri x%x flg x%x (%d) "
2672 "WCQE: %08x %08x %08x %08x\n",
2673 ctxp->oxid, ctxp->flag, released,
2674 wcqe->word0, wcqe->total_data_placed,
2675 result, wcqe->word3);
2677 cmdwqe->context2 = NULL;
2678 cmdwqe->context3 = NULL;
2680 * if transport has released ctx, then can reuse it. Otherwise,
2681 * will be recycled by transport release call.
2684 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2686 /* This is the iocbq for the abort, not the command */
2687 lpfc_sli_release_iocbq(phba, cmdwqe);
2689 /* Since iaab/iaar are NOT set, there is no work left.
2690 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2691 * should have been called already.
2696 * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
2697 * @phba: Pointer to HBA context object.
2698 * @cmdwqe: Pointer to driver command WQE object.
2699 * @wcqe: Pointer to driver response CQE object.
2701 * The function is called from SLI ring event handler with no
2702 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2703 * The function frees memory resources used for the NVME commands.
2706 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2707 struct lpfc_wcqe_complete *wcqe)
2709 struct lpfc_nvmet_rcv_ctx *ctxp;
2710 struct lpfc_nvmet_tgtport *tgtp;
2711 unsigned long flags;
2712 uint32_t status, result;
2713 bool released = false;
2715 ctxp = cmdwqe->context2;
2716 status = bf_get(lpfc_wcqe_c_status, wcqe);
2717 result = wcqe->parameter;
2720 /* if context is clear, related io alrady complete */
2721 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2722 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
2723 wcqe->word0, wcqe->total_data_placed,
2724 result, wcqe->word3);
2728 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2729 if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2730 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2733 if (ctxp->state != LPFC_NVMET_STE_ABORT) {
2734 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2735 "6112 ABTS Wrong state:%d oxid x%x\n",
2736 ctxp->state, ctxp->oxid);
2739 /* Check if we already received a free context call
2740 * and we have completed processing an abort situation.
2742 ctxp->state = LPFC_NVMET_STE_DONE;
2743 spin_lock_irqsave(&ctxp->ctxlock, flags);
2744 if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2745 !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2746 list_del(&ctxp->list);
2749 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2750 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2751 atomic_inc(&tgtp->xmt_abort_rsp);
2753 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2754 "6316 ABTS cmpl xri x%x flg x%x (%x) "
2755 "WCQE: %08x %08x %08x %08x\n",
2756 ctxp->oxid, ctxp->flag, released,
2757 wcqe->word0, wcqe->total_data_placed,
2758 result, wcqe->word3);
2760 cmdwqe->context2 = NULL;
2761 cmdwqe->context3 = NULL;
2763 * if transport has released ctx, then can reuse it. Otherwise,
2764 * will be recycled by transport release call.
2767 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2769 /* Since iaab/iaar are NOT set, there is no work left.
2770 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2771 * should have been called already.
2776 * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
2777 * @phba: Pointer to HBA context object.
2778 * @cmdwqe: Pointer to driver command WQE object.
2779 * @wcqe: Pointer to driver response CQE object.
2781 * The function is called from SLI ring event handler with no
2782 * lock held. This function is the completion handler for NVME ABTS for LS cmds
2783 * The function frees memory resources used for the NVME commands.
2786 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2787 struct lpfc_wcqe_complete *wcqe)
2789 struct lpfc_nvmet_rcv_ctx *ctxp;
2790 struct lpfc_nvmet_tgtport *tgtp;
2791 uint32_t status, result;
2793 ctxp = cmdwqe->context2;
2794 status = bf_get(lpfc_wcqe_c_status, wcqe);
2795 result = wcqe->parameter;
2797 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2798 atomic_inc(&tgtp->xmt_ls_abort_cmpl);
2800 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2801 "6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n",
2802 ctxp, wcqe->word0, wcqe->total_data_placed,
2803 result, wcqe->word3);
2806 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2807 "6415 NVMET LS Abort No ctx: WCQE: "
2808 "%08x %08x %08x %08x\n",
2809 wcqe->word0, wcqe->total_data_placed,
2810 result, wcqe->word3);
2812 lpfc_sli_release_iocbq(phba, cmdwqe);
2816 if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) {
2817 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2818 "6416 NVMET LS abort cmpl state mismatch: "
2819 "oxid x%x: %d %d\n",
2820 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
2823 cmdwqe->context2 = NULL;
2824 cmdwqe->context3 = NULL;
2825 lpfc_sli_release_iocbq(phba, cmdwqe);
2830 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
2831 struct lpfc_nvmet_rcv_ctx *ctxp,
2832 uint32_t sid, uint16_t xri)
2834 struct lpfc_nvmet_tgtport *tgtp;
2835 struct lpfc_iocbq *abts_wqeq;
2836 union lpfc_wqe128 *wqe_abts;
2837 struct lpfc_nodelist *ndlp;
2839 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2840 "6067 ABTS: sid %x xri x%x/x%x\n",
2841 sid, xri, ctxp->wqeq->sli4_xritag);
2843 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2845 ndlp = lpfc_findnode_did(phba->pport, sid);
2846 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2847 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2848 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2849 atomic_inc(&tgtp->xmt_abort_rsp_error);
2850 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2851 "6134 Drop ABTS - wrong NDLP state x%x.\n",
2852 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
2854 /* No failure to an ABTS request. */
2858 abts_wqeq = ctxp->wqeq;
2859 wqe_abts = &abts_wqeq->wqe;
2862 * Since we zero the whole WQE, we need to ensure we set the WQE fields
2863 * that were initialized in lpfc_sli4_nvmet_alloc.
2865 memset(wqe_abts, 0, sizeof(union lpfc_wqe));
2868 bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
2869 bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
2870 bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
2871 bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
2872 bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
2875 bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
2876 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2877 bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
2878 abts_wqeq->sli4_xritag);
2881 bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
2882 CMD_XMIT_SEQUENCE64_WQE);
2883 bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
2884 bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
2885 bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
2888 wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
2891 bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
2892 /* Needs to be set by caller */
2893 bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
2896 bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
2897 bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2898 bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
2899 LPFC_WQE_LENLOC_WORD12);
2900 bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
2901 bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
2904 bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
2905 LPFC_WQE_CQ_ID_DEFAULT);
2906 bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
2909 abts_wqeq->vport = phba->pport;
2910 abts_wqeq->context1 = ndlp;
2911 abts_wqeq->context2 = ctxp;
2912 abts_wqeq->context3 = NULL;
2913 abts_wqeq->rsvd2 = 0;
2914 /* hba_wqidx should already be setup from command we are aborting */
2915 abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
2916 abts_wqeq->iocb.ulpLe = 1;
2918 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2919 "6069 Issue ABTS to xri x%x reqtag x%x\n",
2920 xri, abts_wqeq->iotag);
2925 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
2926 struct lpfc_nvmet_rcv_ctx *ctxp,
2927 uint32_t sid, uint16_t xri)
2929 struct lpfc_nvmet_tgtport *tgtp;
2930 struct lpfc_iocbq *abts_wqeq;
2931 union lpfc_wqe128 *abts_wqe;
2932 struct lpfc_nodelist *ndlp;
2933 unsigned long flags;
2936 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2938 ctxp->wqeq = ctxp->ctxbuf->iocbq;
2939 ctxp->wqeq->hba_wqidx = 0;
2942 ndlp = lpfc_findnode_did(phba->pport, sid);
2943 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2944 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2945 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2946 atomic_inc(&tgtp->xmt_abort_rsp_error);
2947 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2948 "6160 Drop ABORT - wrong NDLP state x%x.\n",
2949 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
2951 /* No failure to an ABTS request. */
2952 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2956 /* Issue ABTS for this WQE based on iotag */
2957 ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
2958 if (!ctxp->abort_wqeq) {
2959 atomic_inc(&tgtp->xmt_abort_rsp_error);
2960 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2961 "6161 ABORT failed: No wqeqs: "
2962 "xri: x%x\n", ctxp->oxid);
2963 /* No failure to an ABTS request. */
2964 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2967 abts_wqeq = ctxp->abort_wqeq;
2968 abts_wqe = &abts_wqeq->wqe;
2969 ctxp->state = LPFC_NVMET_STE_ABORT;
2971 /* Announce entry to new IO submit field. */
2972 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2973 "6162 ABORT Request to rport DID x%06x "
2974 "for xri x%x x%x\n",
2975 ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
2977 /* If the hba is getting reset, this flag is set. It is
2978 * cleared when the reset is complete and rings reestablished.
2980 spin_lock_irqsave(&phba->hbalock, flags);
2981 /* driver queued commands are in process of being flushed */
2982 if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
2983 spin_unlock_irqrestore(&phba->hbalock, flags);
2984 atomic_inc(&tgtp->xmt_abort_rsp_error);
2985 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2986 "6163 Driver in reset cleanup - flushing "
2987 "NVME Req now. hba_flag x%x oxid x%x\n",
2988 phba->hba_flag, ctxp->oxid);
2989 lpfc_sli_release_iocbq(phba, abts_wqeq);
2990 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2994 /* Outstanding abort is in progress */
2995 if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
2996 spin_unlock_irqrestore(&phba->hbalock, flags);
2997 atomic_inc(&tgtp->xmt_abort_rsp_error);
2998 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2999 "6164 Outstanding NVME I/O Abort Request "
3000 "still pending on oxid x%x\n",
3002 lpfc_sli_release_iocbq(phba, abts_wqeq);
3003 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3007 /* Ready - mark outstanding as aborted by driver. */
3008 abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
3010 /* WQEs are reused. Clear stale data and set key fields to
3011 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
3013 memset(abts_wqe, 0, sizeof(union lpfc_wqe));
3016 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
3019 bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
3020 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
3022 /* word 8 - tell the FW to abort the IO associated with this
3023 * outstanding exchange ID.
3025 abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
3027 /* word 9 - this is the iotag for the abts_wqe completion. */
3028 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
3032 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
3033 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
3036 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
3037 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
3038 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
3040 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
3041 abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
3042 abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
3043 abts_wqeq->iocb_cmpl = 0;
3044 abts_wqeq->iocb_flag |= LPFC_IO_NVME;
3045 abts_wqeq->context2 = ctxp;
3046 abts_wqeq->vport = phba->pport;
3047 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
3048 spin_unlock_irqrestore(&phba->hbalock, flags);
3049 if (rc == WQE_SUCCESS) {
3050 atomic_inc(&tgtp->xmt_abort_sol);
3054 atomic_inc(&tgtp->xmt_abort_rsp_error);
3055 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3056 lpfc_sli_release_iocbq(phba, abts_wqeq);
3057 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3058 "6166 Failed ABORT issue_wqe with status x%x "
3066 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
3067 struct lpfc_nvmet_rcv_ctx *ctxp,
3068 uint32_t sid, uint16_t xri)
3070 struct lpfc_nvmet_tgtport *tgtp;
3071 struct lpfc_iocbq *abts_wqeq;
3072 unsigned long flags;
3075 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3077 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3078 ctxp->wqeq->hba_wqidx = 0;
3081 if (ctxp->state == LPFC_NVMET_STE_FREE) {
3082 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3083 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
3084 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
3088 ctxp->state = LPFC_NVMET_STE_ABORT;
3090 rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
3094 spin_lock_irqsave(&phba->hbalock, flags);
3095 abts_wqeq = ctxp->wqeq;
3096 abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
3097 abts_wqeq->iocb_cmpl = NULL;
3098 abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
3099 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
3100 spin_unlock_irqrestore(&phba->hbalock, flags);
3101 if (rc == WQE_SUCCESS) {
3106 spin_lock_irqsave(&ctxp->ctxlock, flags);
3107 if (ctxp->flag & LPFC_NVMET_CTX_RLS)
3108 list_del(&ctxp->list);
3109 ctxp->flag &= ~(LPFC_NVMET_ABORT_OP | LPFC_NVMET_CTX_RLS);
3110 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3112 atomic_inc(&tgtp->xmt_abort_rsp_error);
3113 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3114 "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
3116 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3121 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
3122 struct lpfc_nvmet_rcv_ctx *ctxp,
3123 uint32_t sid, uint16_t xri)
3125 struct lpfc_nvmet_tgtport *tgtp;
3126 struct lpfc_iocbq *abts_wqeq;
3127 union lpfc_wqe128 *wqe_abts;
3128 unsigned long flags;
3131 if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) ||
3132 (ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) {
3133 ctxp->state = LPFC_NVMET_STE_LS_ABORT;
3136 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3137 "6418 NVMET LS abort state mismatch "
3139 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3140 ctxp->state = LPFC_NVMET_STE_LS_ABORT;
3143 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3145 /* Issue ABTS for this WQE based on iotag */
3146 ctxp->wqeq = lpfc_sli_get_iocbq(phba);
3148 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3149 "6068 Abort failed: No wqeqs: "
3151 /* No failure to an ABTS request. */
3156 abts_wqeq = ctxp->wqeq;
3157 wqe_abts = &abts_wqeq->wqe;
3159 if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
3164 spin_lock_irqsave(&phba->hbalock, flags);
3165 abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
3166 abts_wqeq->iocb_cmpl = 0;
3167 abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
3168 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq);
3169 spin_unlock_irqrestore(&phba->hbalock, flags);
3170 if (rc == WQE_SUCCESS) {
3171 atomic_inc(&tgtp->xmt_abort_unsol);
3175 atomic_inc(&tgtp->xmt_abort_rsp_error);
3176 abts_wqeq->context2 = NULL;
3177 abts_wqeq->context3 = NULL;
3178 lpfc_sli_release_iocbq(phba, abts_wqeq);
3180 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3181 "6056 Failed to Issue ABTS. Status x%x\n", rc);