1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channsel Host Bus Adapters. *
4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
39 #include <linux/nvme.h>
40 #include <linux/nvme-fc-driver.h>
41 #include <linux/nvme-fc.h>
43 #include "lpfc_version.h"
47 #include "lpfc_sli4.h"
49 #include "lpfc_disc.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_nvme.h"
53 #include "lpfc_nvmet.h"
54 #include "lpfc_logmsg.h"
55 #include "lpfc_crtn.h"
56 #include "lpfc_vport.h"
57 #include "lpfc_debugfs.h"
59 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
60 struct lpfc_nvmet_rcv_ctx *,
63 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
64 struct lpfc_nvmet_rcv_ctx *);
65 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
66 struct lpfc_nvmet_rcv_ctx *,
68 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
69 struct lpfc_nvmet_rcv_ctx *,
71 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
72 struct lpfc_nvmet_rcv_ctx *,
74 static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
75 struct lpfc_nvmet_rcv_ctx *);
77 static union lpfc_wqe128 lpfc_tsend_cmd_template;
78 static union lpfc_wqe128 lpfc_treceive_cmd_template;
79 static union lpfc_wqe128 lpfc_trsp_cmd_template;
81 /* Setup WQE templates for NVME IOs */
83 lpfc_nvmet_cmd_template(void)
85 union lpfc_wqe128 *wqe;
88 wqe = &lpfc_tsend_cmd_template;
89 memset(wqe, 0, sizeof(union lpfc_wqe128));
91 /* Word 0, 1, 2 - BDE is variable */
93 /* Word 3 - payload_offset_len is zero */
95 /* Word 4 - relative_offset is variable */
97 /* Word 5 - is zero */
99 /* Word 6 - ctxt_tag, xri_tag is variable */
101 /* Word 7 - wqe_ar is variable */
102 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
103 bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF);
104 bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3);
105 bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI);
106 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
108 /* Word 8 - abort_tag is variable */
110 /* Word 9 - reqtag, rcvoxid is variable */
112 /* Word 10 - wqes, xc is variable */
113 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
114 bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
115 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
116 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
117 bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
118 bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12);
120 /* Word 11 - sup, irsp, irsplen is variable */
121 bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND);
122 bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
123 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
124 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
125 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
126 bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0);
128 /* Word 12 - fcp_data_len is variable */
130 /* Word 13, 14, 15 - PBDE is zero */
132 /* TRECEIVE template */
133 wqe = &lpfc_treceive_cmd_template;
134 memset(wqe, 0, sizeof(union lpfc_wqe128));
136 /* Word 0, 1, 2 - BDE is variable */
139 wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
141 /* Word 4 - relative_offset is variable */
143 /* Word 5 - is zero */
145 /* Word 6 - ctxt_tag, xri_tag is variable */
148 bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE);
149 bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF);
150 bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3);
151 bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI);
152 bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
154 /* Word 8 - abort_tag is variable */
156 /* Word 9 - reqtag, rcvoxid is variable */
158 /* Word 10 - xc is variable */
159 bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
160 bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
161 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
162 bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
163 bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
164 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
166 /* Word 11 - pbde is variable */
167 bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE);
168 bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
169 bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0);
170 bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
171 bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
172 bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1);
174 /* Word 12 - fcp_data_len is variable */
176 /* Word 13, 14, 15 - PBDE is variable */
179 wqe = &lpfc_trsp_cmd_template;
180 memset(wqe, 0, sizeof(union lpfc_wqe128));
182 /* Word 0, 1, 2 - BDE is variable */
184 /* Word 3 - response_len is variable */
186 /* Word 4, 5 - is zero */
188 /* Word 6 - ctxt_tag, xri_tag is variable */
191 bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
192 bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED);
193 bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3);
194 bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI);
195 bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); /* wqe_ar */
197 /* Word 8 - abort_tag is variable */
199 /* Word 9 - reqtag is variable */
201 /* Word 10 wqes, xc is variable */
202 bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1);
203 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
204 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
205 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0);
206 bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE);
207 bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3);
209 /* Word 11 irsp, irsplen is variable */
210 bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP);
211 bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
212 bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0);
213 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
214 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
215 bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0);
217 /* Word 12, 13, 14, 15 - is zero */
221 lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
225 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
226 "6313 NVMET Defer ctx release xri x%x flg x%x\n",
227 ctxp->oxid, ctxp->flag);
229 spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
230 if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
231 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
235 ctxp->flag |= LPFC_NVMET_CTX_RLS;
236 list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
237 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
241 * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
242 * @phba: Pointer to HBA context object.
243 * @cmdwqe: Pointer to driver command WQE object.
244 * @wcqe: Pointer to driver response CQE object.
246 * The function is called from SLI ring event handler with no
247 * lock held. This function is the completion handler for NVME LS commands
248 * The function frees memory resources used for the NVME commands.
251 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
252 struct lpfc_wcqe_complete *wcqe)
254 struct lpfc_nvmet_tgtport *tgtp;
255 struct nvmefc_tgt_ls_req *rsp;
256 struct lpfc_nvmet_rcv_ctx *ctxp;
257 uint32_t status, result;
259 status = bf_get(lpfc_wcqe_c_status, wcqe);
260 result = wcqe->parameter;
261 ctxp = cmdwqe->context2;
263 if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) {
264 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
265 "6410 NVMET LS cmpl state mismatch IO x%x: "
267 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
270 if (!phba->targetport)
273 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
277 atomic_inc(&tgtp->xmt_ls_rsp_error);
278 if (result == IOERR_ABORT_REQUESTED)
279 atomic_inc(&tgtp->xmt_ls_rsp_aborted);
280 if (bf_get(lpfc_wcqe_c_xb, wcqe))
281 atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
283 atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
288 rsp = &ctxp->ctx.ls_req;
290 lpfc_nvmeio_data(phba, "NVMET LS CMPL: xri x%x stat x%x result x%x\n",
291 ctxp->oxid, status, result);
293 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
294 "6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
295 status, result, ctxp->oxid);
297 lpfc_nlp_put(cmdwqe->context1);
298 cmdwqe->context2 = NULL;
299 cmdwqe->context3 = NULL;
300 lpfc_sli_release_iocbq(phba, cmdwqe);
306 * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
307 * @phba: HBA buffer is associated with
308 * @ctxp: context to clean up
309 * @mp: Buffer to free
311 * Description: Frees the given DMA buffer in the appropriate way given by
312 * reposting it to its associated RQ so it can be reused.
314 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
319 lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
321 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
322 struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
323 struct lpfc_nvmet_tgtport *tgtp;
324 struct fc_frame_header *fc_hdr;
325 struct rqb_dmabuf *nvmebuf;
326 struct lpfc_nvmet_ctx_info *infop;
328 uint32_t size, oxid, sid, rc;
333 dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
336 ctxp->txrdy_phys = 0;
339 if (ctxp->state == LPFC_NVMET_STE_FREE) {
340 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
341 "6411 NVMET free, already free IO x%x: %d %d\n",
342 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
344 ctxp->state = LPFC_NVMET_STE_FREE;
346 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
347 if (phba->sli4_hba.nvmet_io_wait_cnt) {
348 list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
349 nvmebuf, struct rqb_dmabuf,
351 phba->sli4_hba.nvmet_io_wait_cnt--;
352 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
355 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
356 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
357 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
358 payload = (uint32_t *)(nvmebuf->dbuf.virt);
359 size = nvmebuf->bytes_recv;
360 sid = sli4_sid_from_fc_hdr(fc_hdr);
362 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
370 ctxp->state = LPFC_NVMET_STE_RCV;
373 ctxp->ctxbuf = ctx_buf;
374 ctxp->rqb_buffer = (void *)nvmebuf;
375 spin_lock_init(&ctxp->ctxlock);
377 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
378 if (ctxp->ts_cmd_nvme) {
379 ctxp->ts_cmd_nvme = ktime_get_ns();
380 ctxp->ts_nvme_data = 0;
381 ctxp->ts_data_wqput = 0;
382 ctxp->ts_isr_data = 0;
383 ctxp->ts_data_nvme = 0;
384 ctxp->ts_nvme_status = 0;
385 ctxp->ts_status_wqput = 0;
386 ctxp->ts_isr_status = 0;
387 ctxp->ts_status_nvme = 0;
390 atomic_inc(&tgtp->rcv_fcp_cmd_in);
392 * The calling sequence should be:
393 * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
394 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
395 * When we return from nvmet_fc_rcv_fcp_req, all relevant info
396 * the NVME command / FC header is stored.
397 * A buffer has already been reposted for this IO, so just free
400 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
403 /* Process FCP command */
405 ctxp->rqb_buffer = NULL;
406 atomic_inc(&tgtp->rcv_fcp_cmd_out);
407 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
411 /* Processing of FCP command is deferred */
412 if (rc == -EOVERFLOW) {
413 lpfc_nvmeio_data(phba,
414 "NVMET RCV BUSY: xri x%x sz %d "
417 atomic_inc(&tgtp->rcv_fcp_cmd_out);
420 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
421 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
422 "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
424 atomic_read(&tgtp->rcv_fcp_cmd_in),
425 atomic_read(&tgtp->rcv_fcp_cmd_out),
426 atomic_read(&tgtp->xmt_fcp_release));
428 lpfc_nvmet_defer_release(phba, ctxp);
429 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
430 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
433 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
436 * Use the CPU context list, from the MRQ the IO was received on
437 * (ctxp->idx), to save context structure.
439 cpu = smp_processor_id();
440 infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
441 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
442 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
443 infop->nvmet_ctx_list_cnt++;
444 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
448 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
450 lpfc_nvmet_ktime(struct lpfc_hba *phba,
451 struct lpfc_nvmet_rcv_ctx *ctxp)
453 uint64_t seg1, seg2, seg3, seg4, seg5;
454 uint64_t seg6, seg7, seg8, seg9, seg10;
457 if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
458 !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
459 !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
460 !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
461 !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
464 if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
466 if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme)
468 if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
470 if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
472 if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
474 if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
476 if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
478 if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
480 if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
482 if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
485 * Segment 1 - Time from FCP command received by MSI-X ISR
486 * to FCP command is passed to NVME Layer.
487 * Segment 2 - Time from FCP command payload handed
488 * off to NVME Layer to Driver receives a Command op
490 * Segment 3 - Time from Driver receives a Command op
491 * from NVME Layer to Command is put on WQ.
492 * Segment 4 - Time from Driver WQ put is done
493 * to MSI-X ISR for Command cmpl.
494 * Segment 5 - Time from MSI-X ISR for Command cmpl to
495 * Command cmpl is passed to NVME Layer.
496 * Segment 6 - Time from Command cmpl is passed to NVME
497 * Layer to Driver receives a RSP op from NVME Layer.
498 * Segment 7 - Time from Driver receives a RSP op from
499 * NVME Layer to WQ put is done on TRSP FCP Status.
500 * Segment 8 - Time from Driver WQ put is done on TRSP
501 * FCP Status to MSI-X ISR for TRSP cmpl.
502 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
503 * TRSP cmpl is passed to NVME Layer.
504 * Segment 10 - Time from FCP command received by
505 * MSI-X ISR to command is completed on wire.
506 * (Segments 1 thru 8) for READDATA / WRITEDATA
507 * (Segments 1 thru 4) for READDATA_RSP
509 seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
512 seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
518 seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
524 seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
530 seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
537 /* For auto rsp commands seg6 thru seg10 will be 0 */
538 if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
539 seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
545 seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
551 seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
557 seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
563 if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
565 seg10 = (ctxp->ts_isr_status -
568 if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
574 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
577 phba->ktime_seg1_total += seg1;
578 if (seg1 < phba->ktime_seg1_min)
579 phba->ktime_seg1_min = seg1;
580 else if (seg1 > phba->ktime_seg1_max)
581 phba->ktime_seg1_max = seg1;
583 phba->ktime_seg2_total += seg2;
584 if (seg2 < phba->ktime_seg2_min)
585 phba->ktime_seg2_min = seg2;
586 else if (seg2 > phba->ktime_seg2_max)
587 phba->ktime_seg2_max = seg2;
589 phba->ktime_seg3_total += seg3;
590 if (seg3 < phba->ktime_seg3_min)
591 phba->ktime_seg3_min = seg3;
592 else if (seg3 > phba->ktime_seg3_max)
593 phba->ktime_seg3_max = seg3;
595 phba->ktime_seg4_total += seg4;
596 if (seg4 < phba->ktime_seg4_min)
597 phba->ktime_seg4_min = seg4;
598 else if (seg4 > phba->ktime_seg4_max)
599 phba->ktime_seg4_max = seg4;
601 phba->ktime_seg5_total += seg5;
602 if (seg5 < phba->ktime_seg5_min)
603 phba->ktime_seg5_min = seg5;
604 else if (seg5 > phba->ktime_seg5_max)
605 phba->ktime_seg5_max = seg5;
607 phba->ktime_data_samples++;
611 phba->ktime_seg6_total += seg6;
612 if (seg6 < phba->ktime_seg6_min)
613 phba->ktime_seg6_min = seg6;
614 else if (seg6 > phba->ktime_seg6_max)
615 phba->ktime_seg6_max = seg6;
617 phba->ktime_seg7_total += seg7;
618 if (seg7 < phba->ktime_seg7_min)
619 phba->ktime_seg7_min = seg7;
620 else if (seg7 > phba->ktime_seg7_max)
621 phba->ktime_seg7_max = seg7;
623 phba->ktime_seg8_total += seg8;
624 if (seg8 < phba->ktime_seg8_min)
625 phba->ktime_seg8_min = seg8;
626 else if (seg8 > phba->ktime_seg8_max)
627 phba->ktime_seg8_max = seg8;
629 phba->ktime_seg9_total += seg9;
630 if (seg9 < phba->ktime_seg9_min)
631 phba->ktime_seg9_min = seg9;
632 else if (seg9 > phba->ktime_seg9_max)
633 phba->ktime_seg9_max = seg9;
635 phba->ktime_seg10_total += seg10;
636 if (seg10 < phba->ktime_seg10_min)
637 phba->ktime_seg10_min = seg10;
638 else if (seg10 > phba->ktime_seg10_max)
639 phba->ktime_seg10_max = seg10;
640 phba->ktime_status_samples++;
645 * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
646 * @phba: Pointer to HBA context object.
647 * @cmdwqe: Pointer to driver command WQE object.
648 * @wcqe: Pointer to driver response CQE object.
650 * The function is called from SLI ring event handler with no
651 * lock held. This function is the completion handler for NVME FCP commands
652 * The function frees memory resources used for the NVME commands.
655 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
656 struct lpfc_wcqe_complete *wcqe)
658 struct lpfc_nvmet_tgtport *tgtp;
659 struct nvmefc_tgt_fcp_req *rsp;
660 struct lpfc_nvmet_rcv_ctx *ctxp;
661 uint32_t status, result, op, start_clean, logerr;
662 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
666 ctxp = cmdwqe->context2;
667 ctxp->flag &= ~LPFC_NVMET_IO_INP;
669 rsp = &ctxp->ctx.fcp_req;
672 status = bf_get(lpfc_wcqe_c_status, wcqe);
673 result = wcqe->parameter;
675 if (phba->targetport)
676 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
680 lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
681 ctxp->oxid, op, status);
684 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
685 rsp->transferred_length = 0;
687 atomic_inc(&tgtp->xmt_fcp_rsp_error);
688 if (result == IOERR_ABORT_REQUESTED)
689 atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
692 logerr = LOG_NVME_IOERR;
694 /* pick up SLI4 exhange busy condition */
695 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
696 ctxp->flag |= LPFC_NVMET_XBUSY;
697 logerr |= LOG_NVME_ABTS;
699 atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
702 ctxp->flag &= ~LPFC_NVMET_XBUSY;
705 lpfc_printf_log(phba, KERN_INFO, logerr,
706 "6315 IO Error Cmpl xri x%x: %x/%x XBUSY:x%x\n",
707 ctxp->oxid, status, result, ctxp->flag);
710 rsp->fcp_error = NVME_SC_SUCCESS;
711 if (op == NVMET_FCOP_RSP)
712 rsp->transferred_length = rsp->rsplen;
714 rsp->transferred_length = rsp->transfer_length;
716 atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
719 if ((op == NVMET_FCOP_READDATA_RSP) ||
720 (op == NVMET_FCOP_RSP)) {
722 ctxp->state = LPFC_NVMET_STE_DONE;
725 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
726 if (ctxp->ts_cmd_nvme) {
727 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
729 cmdwqe->isr_timestamp;
732 ctxp->ts_nvme_status =
734 ctxp->ts_status_wqput =
736 ctxp->ts_isr_status =
738 ctxp->ts_status_nvme =
741 ctxp->ts_isr_status =
742 cmdwqe->isr_timestamp;
743 ctxp->ts_status_nvme =
747 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
748 id = smp_processor_id();
750 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
751 "6703 CPU Check cmpl: "
752 "cpu %d expect %d\n",
754 if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
755 phba->cpucheck_cmpl_io[id]++;
759 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
760 if (ctxp->ts_cmd_nvme)
761 lpfc_nvmet_ktime(phba, ctxp);
763 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
766 start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
767 memset(((char *)cmdwqe) + start_clean, 0,
768 (sizeof(struct lpfc_iocbq) - start_clean));
769 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
770 if (ctxp->ts_cmd_nvme) {
771 ctxp->ts_isr_data = cmdwqe->isr_timestamp;
772 ctxp->ts_data_nvme = ktime_get_ns();
774 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
775 id = smp_processor_id();
777 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
778 "6704 CPU Check cmdcmpl: "
779 "cpu %d expect %d\n",
781 if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
782 phba->cpucheck_ccmpl_io[id]++;
790 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
791 struct nvmefc_tgt_ls_req *rsp)
793 struct lpfc_nvmet_rcv_ctx *ctxp =
794 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
795 struct lpfc_hba *phba = ctxp->phba;
796 struct hbq_dmabuf *nvmebuf =
797 (struct hbq_dmabuf *)ctxp->rqb_buffer;
798 struct lpfc_iocbq *nvmewqeq;
799 struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
800 struct lpfc_dmabuf dmabuf;
801 struct ulp_bde64 bpl;
804 if (phba->pport->load_flag & FC_UNLOADING)
807 if (phba->pport->load_flag & FC_UNLOADING)
810 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
811 "6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
813 if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) ||
814 (ctxp->entry_cnt != 1)) {
815 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
816 "6412 NVMET LS rsp state mismatch "
818 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
820 ctxp->state = LPFC_NVMET_STE_LS_RSP;
823 nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
825 if (nvmewqeq == NULL) {
826 atomic_inc(&nvmep->xmt_ls_drop);
827 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
828 "6150 LS Drop IO x%x: Prep\n",
830 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
831 atomic_inc(&nvmep->xmt_ls_abort);
832 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
833 ctxp->sid, ctxp->oxid);
837 /* Save numBdes for bpl2sgl */
839 nvmewqeq->hba_wqidx = 0;
840 nvmewqeq->context3 = &dmabuf;
842 bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
843 bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
844 bpl.tus.f.bdeSize = rsp->rsplen;
845 bpl.tus.f.bdeFlags = 0;
846 bpl.tus.w = le32_to_cpu(bpl.tus.w);
848 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
849 nvmewqeq->iocb_cmpl = NULL;
850 nvmewqeq->context2 = ctxp;
852 lpfc_nvmeio_data(phba, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n",
853 ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
855 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq);
856 if (rc == WQE_SUCCESS) {
858 * Okay to repost buffer here, but wait till cmpl
859 * before freeing ctxp and iocbq.
861 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
862 ctxp->rqb_buffer = 0;
863 atomic_inc(&nvmep->xmt_ls_rsp);
866 /* Give back resources */
867 atomic_inc(&nvmep->xmt_ls_drop);
868 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
869 "6151 LS Drop IO x%x: Issue %d\n",
872 lpfc_nlp_put(nvmewqeq->context1);
874 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
875 atomic_inc(&nvmep->xmt_ls_abort);
876 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
881 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
882 struct nvmefc_tgt_fcp_req *rsp)
884 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
885 struct lpfc_nvmet_rcv_ctx *ctxp =
886 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
887 struct lpfc_hba *phba = ctxp->phba;
888 struct lpfc_queue *wq;
889 struct lpfc_iocbq *nvmewqeq;
890 struct lpfc_sli_ring *pring;
891 unsigned long iflags;
894 if (phba->pport->load_flag & FC_UNLOADING) {
899 if (phba->pport->load_flag & FC_UNLOADING) {
904 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
905 if (ctxp->ts_cmd_nvme) {
906 if (rsp->op == NVMET_FCOP_RSP)
907 ctxp->ts_nvme_status = ktime_get_ns();
909 ctxp->ts_nvme_data = ktime_get_ns();
911 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
912 int id = smp_processor_id();
914 if (id < LPFC_CHECK_CPU_CNT)
915 phba->cpucheck_xmt_io[id]++;
916 if (rsp->hwqid != id) {
917 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
918 "6705 CPU Check OP: "
919 "cpu %d expect %d\n",
921 ctxp->cpu = rsp->hwqid;
927 if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) ||
928 (ctxp->state == LPFC_NVMET_STE_ABORT)) {
929 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
930 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
931 "6102 IO xri x%x aborted\n",
937 nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
938 if (nvmewqeq == NULL) {
939 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
940 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
941 "6152 FCP Drop IO x%x: Prep\n",
947 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
948 nvmewqeq->iocb_cmpl = NULL;
949 nvmewqeq->context2 = ctxp;
950 nvmewqeq->iocb_flag |= LPFC_IO_NVMET;
951 ctxp->wqeq->hba_wqidx = rsp->hwqid;
953 lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
954 ctxp->oxid, rsp->op, rsp->rsplen);
956 ctxp->flag |= LPFC_NVMET_IO_INP;
957 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
958 if (rc == WQE_SUCCESS) {
959 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
960 if (!ctxp->ts_cmd_nvme)
962 if (rsp->op == NVMET_FCOP_RSP)
963 ctxp->ts_status_wqput = ktime_get_ns();
965 ctxp->ts_data_wqput = ktime_get_ns();
972 * WQ was full, so queue nvmewqeq to be sent after
975 ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
976 wq = phba->sli4_hba.hdwq[rsp->hwqid].nvme_wq;
978 spin_lock_irqsave(&pring->ring_lock, iflags);
979 list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
980 wq->q_flag |= HBA_NVMET_WQFULL;
981 spin_unlock_irqrestore(&pring->ring_lock, iflags);
982 atomic_inc(&lpfc_nvmep->defer_wqfull);
986 /* Give back resources */
987 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
988 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
989 "6153 FCP Drop IO x%x: Issue: %d\n",
992 ctxp->wqeq->hba_wqidx = 0;
993 nvmewqeq->context2 = NULL;
994 nvmewqeq->context3 = NULL;
1001 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
1003 struct lpfc_nvmet_tgtport *tport = targetport->private;
1005 /* release any threads waiting for the unreg to complete */
1006 complete(&tport->tport_unreg_done);
1010 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
1011 struct nvmefc_tgt_fcp_req *req)
1013 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1014 struct lpfc_nvmet_rcv_ctx *ctxp =
1015 container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1016 struct lpfc_hba *phba = ctxp->phba;
1017 struct lpfc_queue *wq;
1018 unsigned long flags;
1020 if (phba->pport->load_flag & FC_UNLOADING)
1023 if (phba->pport->load_flag & FC_UNLOADING)
1026 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1027 "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
1028 ctxp->oxid, ctxp->flag, ctxp->state);
1030 lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
1031 ctxp->oxid, ctxp->flag, ctxp->state);
1033 atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
1035 spin_lock_irqsave(&ctxp->ctxlock, flags);
1036 ctxp->state = LPFC_NVMET_STE_ABORT;
1038 /* Since iaab/iaar are NOT set, we need to check
1039 * if the firmware is in process of aborting IO
1041 if (ctxp->flag & LPFC_NVMET_XBUSY) {
1042 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1045 ctxp->flag |= LPFC_NVMET_ABORT_OP;
1047 if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) {
1048 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1050 wq = phba->sli4_hba.hdwq[ctxp->wqeq->hba_wqidx].nvme_wq;
1051 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1052 lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
1056 /* An state of LPFC_NVMET_STE_RCV means we have just received
1057 * the NVME command and have not started processing it.
1058 * (by issuing any IO WQEs on this exchange yet)
1060 if (ctxp->state == LPFC_NVMET_STE_RCV)
1061 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1064 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1066 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1070 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
1071 struct nvmefc_tgt_fcp_req *rsp)
1073 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1074 struct lpfc_nvmet_rcv_ctx *ctxp =
1075 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1076 struct lpfc_hba *phba = ctxp->phba;
1077 unsigned long flags;
1078 bool aborting = false;
1080 if (ctxp->state != LPFC_NVMET_STE_DONE &&
1081 ctxp->state != LPFC_NVMET_STE_ABORT) {
1082 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1083 "6413 NVMET release bad state %d %d oxid x%x\n",
1084 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1087 spin_lock_irqsave(&ctxp->ctxlock, flags);
1088 if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
1089 (ctxp->flag & LPFC_NVMET_XBUSY)) {
1091 /* let the abort path do the real release */
1092 lpfc_nvmet_defer_release(phba, ctxp);
1094 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1096 lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
1097 ctxp->state, aborting);
1099 atomic_inc(&lpfc_nvmep->xmt_fcp_release);
1104 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1108 lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
1109 struct nvmefc_tgt_fcp_req *rsp)
1111 struct lpfc_nvmet_tgtport *tgtp;
1112 struct lpfc_nvmet_rcv_ctx *ctxp =
1113 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1114 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
1115 struct lpfc_hba *phba = ctxp->phba;
1117 lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
1118 ctxp->oxid, ctxp->size, smp_processor_id());
1121 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1122 "6425 Defer rcv: no buffer xri x%x: "
1124 ctxp->oxid, ctxp->flag, ctxp->state);
1128 tgtp = phba->targetport->private;
1130 atomic_inc(&tgtp->rcv_fcp_cmd_defer);
1132 /* Free the nvmebuf since a new buffer already replaced it */
1133 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1136 static struct nvmet_fc_target_template lpfc_tgttemplate = {
1137 .targetport_delete = lpfc_nvmet_targetport_delete,
1138 .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
1139 .fcp_op = lpfc_nvmet_xmt_fcp_op,
1140 .fcp_abort = lpfc_nvmet_xmt_fcp_abort,
1141 .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
1142 .defer_rcv = lpfc_nvmet_defer_rcv,
1145 .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1146 .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1147 .dma_boundary = 0xFFFFFFFF,
1149 /* optional features */
1150 .target_features = 0,
1151 /* sizes of additional private data for data structures */
1152 .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
1156 __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
1157 struct lpfc_nvmet_ctx_info *infop)
1159 struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
1160 unsigned long flags;
1162 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
1163 list_for_each_entry_safe(ctx_buf, next_ctx_buf,
1164 &infop->nvmet_ctx_list, list) {
1165 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1166 list_del_init(&ctx_buf->list);
1167 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1169 __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
1170 ctx_buf->sglq->state = SGL_FREED;
1171 ctx_buf->sglq->ndlp = NULL;
1173 spin_lock(&phba->sli4_hba.sgl_list_lock);
1174 list_add_tail(&ctx_buf->sglq->list,
1175 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1176 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1178 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1179 kfree(ctx_buf->context);
1181 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
1185 lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
1187 struct lpfc_nvmet_ctx_info *infop;
1190 /* The first context list, MRQ 0 CPU 0 */
1191 infop = phba->sli4_hba.nvmet_ctx_info;
1195 /* Cycle the the entire CPU context list for every MRQ */
1196 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
1197 for (j = 0; j < phba->sli4_hba.num_present_cpu; j++) {
1198 __lpfc_nvmet_clean_io_for_cpu(phba, infop);
1202 kfree(phba->sli4_hba.nvmet_ctx_info);
1203 phba->sli4_hba.nvmet_ctx_info = NULL;
1207 lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1209 struct lpfc_nvmet_ctxbuf *ctx_buf;
1210 struct lpfc_iocbq *nvmewqe;
1211 union lpfc_wqe128 *wqe;
1212 struct lpfc_nvmet_ctx_info *last_infop;
1213 struct lpfc_nvmet_ctx_info *infop;
1216 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1217 "6403 Allocate NVMET resources for %d XRIs\n",
1218 phba->sli4_hba.nvmet_xri_cnt);
1220 phba->sli4_hba.nvmet_ctx_info = kcalloc(
1221 phba->sli4_hba.num_present_cpu * phba->cfg_nvmet_mrq,
1222 sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
1223 if (!phba->sli4_hba.nvmet_ctx_info) {
1224 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1225 "6419 Failed allocate memory for "
1226 "nvmet context lists\n");
1231 * Assuming X CPUs in the system, and Y MRQs, allocate some
1232 * lpfc_nvmet_ctx_info structures as follows:
1234 * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
1235 * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
1237 * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
1239 * Each line represents a MRQ "silo" containing an entry for
1242 * MRQ X is initially assumed to be associated with CPU X, thus
1243 * contexts are initially distributed across all MRQs using
1244 * the MRQ index (N) as follows cpuN/mrqN. When contexts are
1245 * freed, the are freed to the MRQ silo based on the CPU number
1246 * of the IO completion. Thus a context that was allocated for MRQ A
1247 * whose IO completed on CPU B will be freed to cpuB/mrqA.
1249 infop = phba->sli4_hba.nvmet_ctx_info;
1250 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1251 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1252 INIT_LIST_HEAD(&infop->nvmet_ctx_list);
1253 spin_lock_init(&infop->nvmet_ctx_list_lock);
1254 infop->nvmet_ctx_list_cnt = 0;
1260 * Setup the next CPU context info ptr for each MRQ.
1261 * MRQ 0 will cycle thru CPUs 0 - X separately from
1262 * MRQ 1 cycling thru CPUs 0 - X, and so on.
1264 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1265 last_infop = lpfc_get_ctx_list(phba, 0, j);
1266 for (i = phba->sli4_hba.num_present_cpu - 1; i >= 0; i--) {
1267 infop = lpfc_get_ctx_list(phba, i, j);
1268 infop->nvmet_ctx_next_cpu = last_infop;
1273 /* For all nvmet xris, allocate resources needed to process a
1274 * received command on a per xri basis.
1277 for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
1278 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
1280 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1281 "6404 Ran out of memory for NVMET\n");
1285 ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
1287 if (!ctx_buf->context) {
1289 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1290 "6405 Ran out of NVMET "
1291 "context memory\n");
1294 ctx_buf->context->ctxbuf = ctx_buf;
1295 ctx_buf->context->state = LPFC_NVMET_STE_FREE;
1297 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
1298 if (!ctx_buf->iocbq) {
1299 kfree(ctx_buf->context);
1301 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1302 "6406 Ran out of NVMET iocb/WQEs\n");
1305 ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
1306 nvmewqe = ctx_buf->iocbq;
1307 wqe = &nvmewqe->wqe;
1309 /* Initialize WQE */
1310 memset(wqe, 0, sizeof(union lpfc_wqe));
1312 ctx_buf->iocbq->context1 = NULL;
1313 spin_lock(&phba->sli4_hba.sgl_list_lock);
1314 ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
1315 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1316 if (!ctx_buf->sglq) {
1317 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1318 kfree(ctx_buf->context);
1320 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1321 "6407 Ran out of NVMET XRIs\n");
1326 * Add ctx to MRQidx context list. Our initial assumption
1327 * is MRQidx will be associated with CPUidx. This association
1328 * can change on the fly.
1330 infop = lpfc_get_ctx_list(phba, idx, idx);
1331 spin_lock(&infop->nvmet_ctx_list_lock);
1332 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
1333 infop->nvmet_ctx_list_cnt++;
1334 spin_unlock(&infop->nvmet_ctx_list_lock);
1336 /* Spread ctx structures evenly across all MRQs */
1338 if (idx >= phba->cfg_nvmet_mrq)
1342 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1343 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1344 infop = lpfc_get_ctx_list(phba, i, j);
1345 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1346 "6408 TOTAL NVMET ctx for CPU %d "
1347 "MRQ %d: cnt %d nextcpu %p\n",
1348 i, j, infop->nvmet_ctx_list_cnt,
1349 infop->nvmet_ctx_next_cpu);
1356 lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1358 struct lpfc_vport *vport = phba->pport;
1359 struct lpfc_nvmet_tgtport *tgtp;
1360 struct nvmet_fc_port_info pinfo;
1363 if (phba->targetport)
1366 error = lpfc_nvmet_setup_io_context(phba);
1370 memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
1371 pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
1372 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
1373 pinfo.port_id = vport->fc_myDID;
1375 /* We need to tell the transport layer + 1 because it takes page
1376 * alignment into account. When space for the SGL is allocated we
1377 * allocate + 3, one for cmd, one for rsp and one for this alignment
1379 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
1380 lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue;
1381 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
1383 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1384 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
1391 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1392 "6025 Cannot register NVME targetport x%x: "
1393 "portnm %llx nodenm %llx segs %d qs %d\n",
1395 pinfo.port_name, pinfo.node_name,
1396 lpfc_tgttemplate.max_sgl_segments,
1397 lpfc_tgttemplate.max_hw_queues);
1398 phba->targetport = NULL;
1399 phba->nvmet_support = 0;
1401 lpfc_nvmet_cleanup_io_context(phba);
1404 tgtp = (struct lpfc_nvmet_tgtport *)
1405 phba->targetport->private;
1408 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1409 "6026 Registered NVME "
1410 "targetport: %p, private %p "
1411 "portnm %llx nodenm %llx segs %d qs %d\n",
1412 phba->targetport, tgtp,
1413 pinfo.port_name, pinfo.node_name,
1414 lpfc_tgttemplate.max_sgl_segments,
1415 lpfc_tgttemplate.max_hw_queues);
1417 atomic_set(&tgtp->rcv_ls_req_in, 0);
1418 atomic_set(&tgtp->rcv_ls_req_out, 0);
1419 atomic_set(&tgtp->rcv_ls_req_drop, 0);
1420 atomic_set(&tgtp->xmt_ls_abort, 0);
1421 atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
1422 atomic_set(&tgtp->xmt_ls_rsp, 0);
1423 atomic_set(&tgtp->xmt_ls_drop, 0);
1424 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
1425 atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
1426 atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
1427 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
1428 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1429 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1430 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
1431 atomic_set(&tgtp->xmt_fcp_drop, 0);
1432 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1433 atomic_set(&tgtp->xmt_fcp_read, 0);
1434 atomic_set(&tgtp->xmt_fcp_write, 0);
1435 atomic_set(&tgtp->xmt_fcp_rsp, 0);
1436 atomic_set(&tgtp->xmt_fcp_release, 0);
1437 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1438 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1439 atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
1440 atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
1441 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1442 atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
1443 atomic_set(&tgtp->xmt_fcp_abort, 0);
1444 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1445 atomic_set(&tgtp->xmt_abort_unsol, 0);
1446 atomic_set(&tgtp->xmt_abort_sol, 0);
1447 atomic_set(&tgtp->xmt_abort_rsp, 0);
1448 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
1449 atomic_set(&tgtp->defer_ctx, 0);
1450 atomic_set(&tgtp->defer_fod, 0);
1451 atomic_set(&tgtp->defer_wqfull, 0);
1457 lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
1459 struct lpfc_vport *vport = phba->pport;
1461 if (!phba->targetport)
1464 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1465 "6007 Update NVMET port %p did x%x\n",
1466 phba->targetport, vport->fc_myDID);
1468 phba->targetport->port_id = vport->fc_myDID;
1473 * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
1474 * @phba: pointer to lpfc hba data structure.
1475 * @axri: pointer to the nvmet xri abort wcqe structure.
1477 * This routine is invoked by the worker thread to process a SLI4 fast-path
1478 * NVMET aborted xri.
1481 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1482 struct sli4_wcqe_xri_aborted *axri)
1484 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
1485 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1486 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1487 struct lpfc_nvmet_tgtport *tgtp;
1488 struct lpfc_nodelist *ndlp;
1489 unsigned long iflag = 0;
1491 bool released = false;
1493 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1494 "6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
1496 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1499 if (phba->targetport) {
1500 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1501 atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
1504 spin_lock_irqsave(&phba->hbalock, iflag);
1505 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1506 list_for_each_entry_safe(ctxp, next_ctxp,
1507 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1509 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1512 /* Check if we already received a free context call
1513 * and we have completed processing an abort situation.
1515 if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
1516 !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
1517 list_del(&ctxp->list);
1520 ctxp->flag &= ~LPFC_NVMET_XBUSY;
1521 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1523 rrq_empty = list_empty(&phba->active_rrq_list);
1524 spin_unlock_irqrestore(&phba->hbalock, iflag);
1525 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1526 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1527 (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
1528 ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
1529 lpfc_set_rrq_active(phba, ndlp,
1530 ctxp->ctxbuf->sglq->sli4_lxritag,
1532 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
1535 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1536 "6318 XB aborted oxid %x flg x%x (%x)\n",
1537 ctxp->oxid, ctxp->flag, released);
1539 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1542 lpfc_worker_wake_up(phba);
1545 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1546 spin_unlock_irqrestore(&phba->hbalock, iflag);
1550 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
1551 struct fc_frame_header *fc_hdr)
1554 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1555 struct lpfc_hba *phba = vport->phba;
1556 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1557 struct nvmefc_tgt_fcp_req *rsp;
1559 unsigned long iflag = 0;
1561 xri = be16_to_cpu(fc_hdr->fh_ox_id);
1563 spin_lock_irqsave(&phba->hbalock, iflag);
1564 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1565 list_for_each_entry_safe(ctxp, next_ctxp,
1566 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1568 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1571 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1572 spin_unlock_irqrestore(&phba->hbalock, iflag);
1574 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1575 ctxp->flag |= LPFC_NVMET_ABTS_RCV;
1576 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1578 lpfc_nvmeio_data(phba,
1579 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1580 xri, smp_processor_id(), 0);
1582 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1583 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
1585 rsp = &ctxp->ctx.fcp_req;
1586 nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
1588 /* Respond with BA_ACC accordingly */
1589 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1592 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1593 spin_unlock_irqrestore(&phba->hbalock, iflag);
1595 lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1596 xri, smp_processor_id(), 1);
1598 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1599 "6320 NVMET Rcv ABTS:rjt xri x%x\n", xri);
1601 /* Respond with BA_RJT accordingly */
1602 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
1608 lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
1609 struct lpfc_nvmet_rcv_ctx *ctxp)
1611 struct lpfc_sli_ring *pring;
1612 struct lpfc_iocbq *nvmewqeq;
1613 struct lpfc_iocbq *next_nvmewqeq;
1614 unsigned long iflags;
1615 struct lpfc_wcqe_complete wcqe;
1616 struct lpfc_wcqe_complete *wcqep;
1621 /* Fake an ABORT error code back to cmpl routine */
1622 memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete));
1623 bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT);
1624 wcqep->parameter = IOERR_ABORT_REQUESTED;
1626 spin_lock_irqsave(&pring->ring_lock, iflags);
1627 list_for_each_entry_safe(nvmewqeq, next_nvmewqeq,
1628 &wq->wqfull_list, list) {
1630 /* Checking for a specific IO to flush */
1631 if (nvmewqeq->context2 == ctxp) {
1632 list_del(&nvmewqeq->list);
1633 spin_unlock_irqrestore(&pring->ring_lock,
1635 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
1642 list_del(&nvmewqeq->list);
1643 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1644 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep);
1645 spin_lock_irqsave(&pring->ring_lock, iflags);
1649 wq->q_flag &= ~HBA_NVMET_WQFULL;
1650 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1654 lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
1655 struct lpfc_queue *wq)
1657 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1658 struct lpfc_sli_ring *pring;
1659 struct lpfc_iocbq *nvmewqeq;
1660 unsigned long iflags;
1664 * Some WQE slots are available, so try to re-issue anything
1665 * on the WQ wqfull_list.
1668 spin_lock_irqsave(&pring->ring_lock, iflags);
1669 while (!list_empty(&wq->wqfull_list)) {
1670 list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
1672 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1673 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
1674 spin_lock_irqsave(&pring->ring_lock, iflags);
1676 /* WQ was full again, so put it back on the list */
1677 list_add(&nvmewqeq->list, &wq->wqfull_list);
1678 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1682 wq->q_flag &= ~HBA_NVMET_WQFULL;
1683 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1689 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
1691 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1692 struct lpfc_nvmet_tgtport *tgtp;
1693 struct lpfc_queue *wq;
1696 if (phba->nvmet_support == 0)
1698 if (phba->targetport) {
1699 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1700 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
1701 wq = phba->sli4_hba.hdwq[qidx].nvme_wq;
1702 lpfc_nvmet_wqfull_flush(phba, wq, NULL);
1704 init_completion(&tgtp->tport_unreg_done);
1705 nvmet_fc_unregister_targetport(phba->targetport);
1706 wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
1707 lpfc_nvmet_cleanup_io_context(phba);
1709 phba->targetport = NULL;
1714 * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
1715 * @phba: pointer to lpfc hba data structure.
1716 * @pring: pointer to a SLI ring.
1717 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1719 * This routine is used for processing the WQE associated with a unsolicited
1720 * event. It first determines whether there is an existing ndlp that matches
1721 * the DID from the unsolicited WQE. If not, it will create a new one with
1722 * the DID from the unsolicited WQE. The ELS command from the unsolicited
1723 * WQE is then used to invoke the proper routine and to set up proper state
1724 * of the discovery state machine.
1727 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1728 struct hbq_dmabuf *nvmebuf)
1730 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1731 struct lpfc_nvmet_tgtport *tgtp;
1732 struct fc_frame_header *fc_hdr;
1733 struct lpfc_nvmet_rcv_ctx *ctxp;
1735 uint32_t size, oxid, sid, rc;
1737 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1738 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1740 if (!phba->targetport) {
1741 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1742 "6154 LS Drop IO x%x\n", oxid);
1750 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1751 payload = (uint32_t *)(nvmebuf->dbuf.virt);
1752 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
1753 sid = sli4_sid_from_fc_hdr(fc_hdr);
1755 ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
1757 atomic_inc(&tgtp->rcv_ls_req_drop);
1758 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1759 "6155 LS Drop IO x%x: Alloc\n",
1762 lpfc_nvmeio_data(phba, "NVMET LS DROP: "
1763 "xri x%x sz %d from %06x\n",
1765 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1773 ctxp->state = LPFC_NVMET_STE_LS_RCV;
1774 ctxp->entry_cnt = 1;
1775 ctxp->rqb_buffer = (void *)nvmebuf;
1777 lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n",
1780 * The calling sequence should be:
1781 * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
1782 * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
1784 atomic_inc(&tgtp->rcv_ls_req_in);
1785 rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
1788 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1789 "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
1790 "%08x %08x %08x\n", size, rc,
1791 *payload, *(payload+1), *(payload+2),
1792 *(payload+3), *(payload+4), *(payload+5));
1795 atomic_inc(&tgtp->rcv_ls_req_out);
1799 lpfc_nvmeio_data(phba, "NVMET LS DROP: xri x%x sz %d from %06x\n",
1802 atomic_inc(&tgtp->rcv_ls_req_drop);
1803 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1804 "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
1807 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
1808 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1810 atomic_inc(&tgtp->xmt_ls_abort);
1811 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
1815 static struct lpfc_nvmet_ctxbuf *
1816 lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
1817 struct lpfc_nvmet_ctx_info *current_infop)
1819 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1820 struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
1821 struct lpfc_nvmet_ctx_info *get_infop;
1825 * The current_infop for the MRQ a NVME command IU was received
1826 * on is empty. Our goal is to replenish this MRQs context
1827 * list from a another CPUs.
1829 * First we need to pick a context list to start looking on.
1830 * nvmet_ctx_start_cpu has available context the last time
1831 * we needed to replenish this CPU where nvmet_ctx_next_cpu
1832 * is just the next sequential CPU for this MRQ.
1834 if (current_infop->nvmet_ctx_start_cpu)
1835 get_infop = current_infop->nvmet_ctx_start_cpu;
1837 get_infop = current_infop->nvmet_ctx_next_cpu;
1839 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1840 if (get_infop == current_infop) {
1841 get_infop = get_infop->nvmet_ctx_next_cpu;
1844 spin_lock(&get_infop->nvmet_ctx_list_lock);
1846 /* Just take the entire context list, if there are any */
1847 if (get_infop->nvmet_ctx_list_cnt) {
1848 list_splice_init(&get_infop->nvmet_ctx_list,
1849 ¤t_infop->nvmet_ctx_list);
1850 current_infop->nvmet_ctx_list_cnt =
1851 get_infop->nvmet_ctx_list_cnt - 1;
1852 get_infop->nvmet_ctx_list_cnt = 0;
1853 spin_unlock(&get_infop->nvmet_ctx_list_lock);
1855 current_infop->nvmet_ctx_start_cpu = get_infop;
1856 list_remove_head(¤t_infop->nvmet_ctx_list,
1857 ctx_buf, struct lpfc_nvmet_ctxbuf,
1862 /* Otherwise, move on to the next CPU for this MRQ */
1863 spin_unlock(&get_infop->nvmet_ctx_list_lock);
1864 get_infop = get_infop->nvmet_ctx_next_cpu;
1868 /* Nothing found, all contexts for the MRQ are in-flight */
1873 * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
1874 * @phba: pointer to lpfc hba data structure.
1875 * @idx: relative index of MRQ vector
1876 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1878 * This routine is used for processing the WQE associated with a unsolicited
1879 * event. It first determines whether there is an existing ndlp that matches
1880 * the DID from the unsolicited WQE. If not, it will create a new one with
1881 * the DID from the unsolicited WQE. The ELS command from the unsolicited
1882 * WQE is then used to invoke the proper routine and to set up proper state
1883 * of the discovery state machine.
1886 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1888 struct rqb_dmabuf *nvmebuf,
1889 uint64_t isr_timestamp)
1891 struct lpfc_nvmet_rcv_ctx *ctxp;
1892 struct lpfc_nvmet_tgtport *tgtp;
1893 struct fc_frame_header *fc_hdr;
1894 struct lpfc_nvmet_ctxbuf *ctx_buf;
1895 struct lpfc_nvmet_ctx_info *current_infop;
1897 uint32_t size, oxid, sid, rc, qno;
1898 unsigned long iflag;
1900 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1904 if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
1908 if (!nvmebuf || !phba->targetport) {
1909 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1910 "6157 NVMET FCP Drop IO\n");
1919 * Get a pointer to the context list for this MRQ based on
1920 * the CPU this MRQ IRQ is associated with. If the CPU association
1921 * changes from our initial assumption, the context list could
1922 * be empty, thus it would need to be replenished with the
1923 * context list from another CPU for this MRQ.
1925 current_cpu = smp_processor_id();
1926 current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
1927 spin_lock_irqsave(¤t_infop->nvmet_ctx_list_lock, iflag);
1928 if (current_infop->nvmet_ctx_list_cnt) {
1929 list_remove_head(¤t_infop->nvmet_ctx_list,
1930 ctx_buf, struct lpfc_nvmet_ctxbuf, list);
1931 current_infop->nvmet_ctx_list_cnt--;
1933 ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
1935 spin_unlock_irqrestore(¤t_infop->nvmet_ctx_list_lock, iflag);
1937 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1938 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1939 size = nvmebuf->bytes_recv;
1941 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1942 if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
1943 id = smp_processor_id();
1944 if (id < LPFC_CHECK_CPU_CNT)
1945 phba->cpucheck_rcv_io[id]++;
1949 lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
1950 oxid, size, smp_processor_id());
1952 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1955 /* Queue this NVME IO to process later */
1956 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
1957 list_add_tail(&nvmebuf->hbuf.list,
1958 &phba->sli4_hba.lpfc_nvmet_io_wait_list);
1959 phba->sli4_hba.nvmet_io_wait_cnt++;
1960 phba->sli4_hba.nvmet_io_wait_total++;
1961 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
1964 /* Post a brand new DMA buffer to RQ */
1966 lpfc_post_rq_buffer(
1967 phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
1968 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
1970 atomic_inc(&tgtp->defer_ctx);
1974 payload = (uint32_t *)(nvmebuf->dbuf.virt);
1975 sid = sli4_sid_from_fc_hdr(fc_hdr);
1977 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
1978 if (ctxp->state != LPFC_NVMET_STE_FREE) {
1979 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1980 "6414 NVMET Context corrupt %d %d oxid x%x\n",
1981 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1991 ctxp->state = LPFC_NVMET_STE_RCV;
1992 ctxp->entry_cnt = 1;
1994 ctxp->ctxbuf = ctx_buf;
1995 ctxp->rqb_buffer = (void *)nvmebuf;
1996 spin_lock_init(&ctxp->ctxlock);
1998 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1999 if (isr_timestamp) {
2000 ctxp->ts_isr_cmd = isr_timestamp;
2001 ctxp->ts_cmd_nvme = ktime_get_ns();
2002 ctxp->ts_nvme_data = 0;
2003 ctxp->ts_data_wqput = 0;
2004 ctxp->ts_isr_data = 0;
2005 ctxp->ts_data_nvme = 0;
2006 ctxp->ts_nvme_status = 0;
2007 ctxp->ts_status_wqput = 0;
2008 ctxp->ts_isr_status = 0;
2009 ctxp->ts_status_nvme = 0;
2011 ctxp->ts_cmd_nvme = 0;
2015 atomic_inc(&tgtp->rcv_fcp_cmd_in);
2017 * The calling sequence should be:
2018 * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
2019 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
2020 * When we return from nvmet_fc_rcv_fcp_req, all relevant info in
2021 * the NVME command / FC header is stored, so we are free to repost
2024 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
2027 /* Process FCP command */
2029 ctxp->rqb_buffer = NULL;
2030 atomic_inc(&tgtp->rcv_fcp_cmd_out);
2031 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2035 /* Processing of FCP command is deferred */
2036 if (rc == -EOVERFLOW) {
2038 * Post a brand new DMA buffer to RQ and defer
2039 * freeing rcv buffer till .defer_rcv callback
2042 lpfc_post_rq_buffer(
2043 phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2044 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2046 lpfc_nvmeio_data(phba,
2047 "NVMET RCV BUSY: xri x%x sz %d from %06x\n",
2049 atomic_inc(&tgtp->rcv_fcp_cmd_out);
2050 atomic_inc(&tgtp->defer_fod);
2053 ctxp->rqb_buffer = nvmebuf;
2055 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2056 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2057 "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
2059 atomic_read(&tgtp->rcv_fcp_cmd_in),
2060 atomic_read(&tgtp->rcv_fcp_cmd_out),
2061 atomic_read(&tgtp->xmt_fcp_release));
2063 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
2066 lpfc_nvmet_defer_release(phba, ctxp);
2067 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
2068 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2073 lpfc_nvmet_ctxbuf_post(phba, ctx_buf);
2076 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2080 * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
2081 * @phba: pointer to lpfc hba data structure.
2082 * @pring: pointer to a SLI ring.
2083 * @nvmebuf: pointer to received nvme data structure.
2085 * This routine is used to process an unsolicited event received from a SLI
2086 * (Service Level Interface) ring. The actual processing of the data buffer
2087 * associated with the unsolicited event is done by invoking the routine
2088 * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
2089 * SLI RQ on which the unsolicited event was received.
2092 lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2093 struct lpfc_iocbq *piocb)
2095 struct lpfc_dmabuf *d_buf;
2096 struct hbq_dmabuf *nvmebuf;
2098 d_buf = piocb->context2;
2099 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2101 if (phba->nvmet_support == 0) {
2102 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
2105 lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
2109 * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
2110 * @phba: pointer to lpfc hba data structure.
2111 * @idx: relative index of MRQ vector
2112 * @nvmebuf: pointer to received nvme data structure.
2114 * This routine is used to process an unsolicited event received from a SLI
2115 * (Service Level Interface) ring. The actual processing of the data buffer
2116 * associated with the unsolicited event is done by invoking the routine
2117 * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
2118 * SLI RQ on which the unsolicited event was received.
2121 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
2123 struct rqb_dmabuf *nvmebuf,
2124 uint64_t isr_timestamp)
2126 if (phba->nvmet_support == 0) {
2127 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2130 lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf,
2135 * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
2136 * @phba: pointer to a host N_Port data structure.
2137 * @ctxp: Context info for NVME LS Request
2138 * @rspbuf: DMA buffer of NVME command.
2139 * @rspsize: size of the NVME command.
2141 * This routine is used for allocating a lpfc-WQE data structure from
2142 * the driver lpfc-WQE free-list and prepare the WQE with the parameters
2143 * passed into the routine for discovery state machine to issue an Extended
2144 * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
2145 * and preparation routine that is used by all the discovery state machine
2146 * routines and the NVME command-specific fields will be later set up by
2147 * the individual discovery machine routines after calling this routine
2148 * allocating and preparing a generic WQE data structure. It fills in the
2149 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
2150 * payload and response payload (if expected). The reference count on the
2151 * ndlp is incremented by 1 and the reference to the ndlp is put into
2152 * context1 of the WQE data structure for this WQE to hold the ndlp
2153 * reference for the command's callback function to access later.
2156 * Pointer to the newly allocated/prepared nvme wqe data structure
2157 * NULL - when nvme wqe data structure allocation/preparation failed
2159 static struct lpfc_iocbq *
2160 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
2161 struct lpfc_nvmet_rcv_ctx *ctxp,
2162 dma_addr_t rspbuf, uint16_t rspsize)
2164 struct lpfc_nodelist *ndlp;
2165 struct lpfc_iocbq *nvmewqe;
2166 union lpfc_wqe128 *wqe;
2168 if (!lpfc_is_link_up(phba)) {
2169 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2170 "6104 NVMET prep LS wqe: link err: "
2171 "NPORT x%x oxid:x%x ste %d\n",
2172 ctxp->sid, ctxp->oxid, ctxp->state);
2176 /* Allocate buffer for command wqe */
2177 nvmewqe = lpfc_sli_get_iocbq(phba);
2178 if (nvmewqe == NULL) {
2179 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2180 "6105 NVMET prep LS wqe: No WQE: "
2181 "NPORT x%x oxid x%x ste %d\n",
2182 ctxp->sid, ctxp->oxid, ctxp->state);
2186 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2187 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2188 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2189 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2190 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2191 "6106 NVMET prep LS wqe: No ndlp: "
2192 "NPORT x%x oxid x%x ste %d\n",
2193 ctxp->sid, ctxp->oxid, ctxp->state);
2194 goto nvme_wqe_free_wqeq_exit;
2196 ctxp->wqeq = nvmewqe;
2198 /* prevent preparing wqe with NULL ndlp reference */
2199 nvmewqe->context1 = lpfc_nlp_get(ndlp);
2200 if (nvmewqe->context1 == NULL)
2201 goto nvme_wqe_free_wqeq_exit;
2202 nvmewqe->context2 = ctxp;
2204 wqe = &nvmewqe->wqe;
2205 memset(wqe, 0, sizeof(union lpfc_wqe));
2208 wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2209 wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
2210 wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
2211 wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
2218 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
2219 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
2220 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
2221 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
2222 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
2225 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
2226 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2227 bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
2230 bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
2231 CMD_XMIT_SEQUENCE64_WQE);
2232 bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
2233 bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
2234 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
2237 wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
2240 bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
2241 /* Needs to be set by caller */
2242 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
2245 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
2246 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2247 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
2248 LPFC_WQE_LENLOC_WORD12);
2249 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
2252 bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
2253 LPFC_WQE_CQ_ID_DEFAULT);
2254 bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
2258 wqe->xmit_sequence.xmit_len = rspsize;
2261 nvmewqe->vport = phba->pport;
2262 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2263 nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
2265 /* Xmit NVMET response to remote NPORT <did> */
2266 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2267 "6039 Xmit NVMET LS response to remote "
2268 "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
2269 ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
2273 nvme_wqe_free_wqeq_exit:
2274 nvmewqe->context2 = NULL;
2275 nvmewqe->context3 = NULL;
2276 lpfc_sli_release_iocbq(phba, nvmewqe);
2281 static struct lpfc_iocbq *
2282 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
2283 struct lpfc_nvmet_rcv_ctx *ctxp)
2285 struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
2286 struct lpfc_nvmet_tgtport *tgtp;
2287 struct sli4_sge *sgl;
2288 struct lpfc_nodelist *ndlp;
2289 struct lpfc_iocbq *nvmewqe;
2290 struct scatterlist *sgel;
2291 union lpfc_wqe128 *wqe;
2292 struct ulp_bde64 *bde;
2294 dma_addr_t physaddr;
2299 if (!lpfc_is_link_up(phba)) {
2300 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2301 "6107 NVMET prep FCP wqe: link err:"
2302 "NPORT x%x oxid x%x ste %d\n",
2303 ctxp->sid, ctxp->oxid, ctxp->state);
2307 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2308 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2309 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2310 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2311 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2312 "6108 NVMET prep FCP wqe: no ndlp: "
2313 "NPORT x%x oxid x%x ste %d\n",
2314 ctxp->sid, ctxp->oxid, ctxp->state);
2318 if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
2319 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2320 "6109 NVMET prep FCP wqe: seg cnt err: "
2321 "NPORT x%x oxid x%x ste %d cnt %d\n",
2322 ctxp->sid, ctxp->oxid, ctxp->state,
2323 phba->cfg_nvme_seg_cnt);
2327 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2328 nvmewqe = ctxp->wqeq;
2329 if (nvmewqe == NULL) {
2330 /* Allocate buffer for command wqe */
2331 nvmewqe = ctxp->ctxbuf->iocbq;
2332 if (nvmewqe == NULL) {
2333 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2334 "6110 NVMET prep FCP wqe: No "
2335 "WQE: NPORT x%x oxid x%x ste %d\n",
2336 ctxp->sid, ctxp->oxid, ctxp->state);
2339 ctxp->wqeq = nvmewqe;
2340 xc = 0; /* create new XRI */
2341 nvmewqe->sli4_lxritag = NO_XRI;
2342 nvmewqe->sli4_xritag = NO_XRI;
2346 if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
2347 (ctxp->entry_cnt == 1)) ||
2348 (ctxp->state == LPFC_NVMET_STE_DATA)) {
2349 wqe = &nvmewqe->wqe;
2351 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2352 "6111 Wrong state NVMET FCP: %d cnt %d\n",
2353 ctxp->state, ctxp->entry_cnt);
2357 sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
2359 case NVMET_FCOP_READDATA:
2360 case NVMET_FCOP_READDATA_RSP:
2361 /* From the tsend template, initialize words 7 - 11 */
2362 memcpy(&wqe->words[7],
2363 &lpfc_tsend_cmd_template.words[7],
2364 sizeof(uint32_t) * 5);
2366 /* Words 0 - 2 : The first sg segment */
2368 physaddr = sg_dma_address(sgel);
2369 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2370 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
2371 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
2372 wqe->fcp_tsend.bde.addrHigh =
2373 cpu_to_le32(putPaddrHigh(physaddr));
2376 wqe->fcp_tsend.payload_offset_len = 0;
2379 wqe->fcp_tsend.relative_offset = ctxp->offset;
2382 wqe->fcp_tsend.reserved = 0;
2385 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
2386 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2387 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
2388 nvmewqe->sli4_xritag);
2390 /* Word 7 - set ar later */
2393 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
2396 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
2397 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
2399 /* Word 10 - set wqes later, in template xc=1 */
2401 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0);
2403 /* Word 11 - set sup, irsp, irsplen later */
2407 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2409 /* Setup 2 SKIP SGEs */
2413 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2414 sgl->word2 = cpu_to_le32(sgl->word2);
2420 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2421 sgl->word2 = cpu_to_le32(sgl->word2);
2424 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
2425 atomic_inc(&tgtp->xmt_fcp_read_rsp);
2427 /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2429 if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
2430 if (ndlp->nlp_flag & NLP_SUPPRESS_RSP)
2432 &wqe->fcp_tsend.wqe_com, 1);
2434 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
2435 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
2436 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
2437 ((rsp->rsplen >> 2) - 1));
2438 memcpy(&wqe->words[16], rsp->rspaddr,
2442 atomic_inc(&tgtp->xmt_fcp_read);
2444 /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2445 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
2449 case NVMET_FCOP_WRITEDATA:
2450 /* From the treceive template, initialize words 3 - 11 */
2451 memcpy(&wqe->words[3],
2452 &lpfc_treceive_cmd_template.words[3],
2453 sizeof(uint32_t) * 9);
2455 /* Words 0 - 2 : The first sg segment */
2456 txrdy = dma_pool_alloc(phba->txrdy_payload_pool,
2457 GFP_KERNEL, &physaddr);
2459 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2460 "6041 Bad txrdy buffer: oxid x%x\n",
2464 ctxp->txrdy = txrdy;
2465 ctxp->txrdy_phys = physaddr;
2466 wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2467 wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
2468 wqe->fcp_treceive.bde.addrLow =
2469 cpu_to_le32(putPaddrLow(physaddr));
2470 wqe->fcp_treceive.bde.addrHigh =
2471 cpu_to_le32(putPaddrHigh(physaddr));
2474 wqe->fcp_treceive.relative_offset = ctxp->offset;
2477 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
2478 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2479 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
2480 nvmewqe->sli4_xritag);
2485 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
2488 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
2489 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
2491 /* Word 10 - in template xc=1 */
2493 bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
2495 /* Word 11 - set pbde later */
2496 if (phba->cfg_enable_pbde) {
2499 bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
2504 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2506 /* Setup 1 TXRDY and 1 SKIP SGE */
2508 txrdy[1] = cpu_to_be32(rsp->transfer_length);
2511 sgl->addr_hi = putPaddrHigh(physaddr);
2512 sgl->addr_lo = putPaddrLow(physaddr);
2514 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2515 sgl->word2 = cpu_to_le32(sgl->word2);
2516 sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
2521 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2522 sgl->word2 = cpu_to_le32(sgl->word2);
2525 atomic_inc(&tgtp->xmt_fcp_write);
2528 case NVMET_FCOP_RSP:
2529 /* From the treceive template, initialize words 4 - 11 */
2530 memcpy(&wqe->words[4],
2531 &lpfc_trsp_cmd_template.words[4],
2532 sizeof(uint32_t) * 8);
2535 physaddr = rsp->rspdma;
2536 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2537 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
2538 wqe->fcp_trsp.bde.addrLow =
2539 cpu_to_le32(putPaddrLow(physaddr));
2540 wqe->fcp_trsp.bde.addrHigh =
2541 cpu_to_le32(putPaddrHigh(physaddr));
2544 wqe->fcp_trsp.response_len = rsp->rsplen;
2547 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
2548 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2549 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
2550 nvmewqe->sli4_xritag);
2555 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
2558 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
2559 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
2563 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1);
2566 /* In template wqes=0 irsp=0 irsplen=0 - good response */
2567 if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) {
2568 /* Bad response - embed it */
2569 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
2570 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
2571 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
2572 ((rsp->rsplen >> 2) - 1));
2573 memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
2578 wqe->fcp_trsp.rsvd_12_15[0] = 0;
2580 /* Use rspbuf, NOT sg list */
2583 atomic_inc(&tgtp->xmt_fcp_rsp);
2587 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2588 "6064 Unknown Rsp Op %d\n",
2594 nvmewqe->vport = phba->pport;
2595 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2596 nvmewqe->context1 = ndlp;
2598 for (i = 0; i < rsp->sg_cnt; i++) {
2600 physaddr = sg_dma_address(sgel);
2601 cnt = sg_dma_len(sgel);
2602 sgl->addr_hi = putPaddrHigh(physaddr);
2603 sgl->addr_lo = putPaddrLow(physaddr);
2605 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2606 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
2607 if ((i+1) == rsp->sg_cnt)
2608 bf_set(lpfc_sli4_sge_last, sgl, 1);
2609 sgl->word2 = cpu_to_le32(sgl->word2);
2610 sgl->sge_len = cpu_to_le32(cnt);
2612 bde = (struct ulp_bde64 *)&wqe->words[13];
2614 /* Words 13-15 (PBDE) */
2615 bde->addrLow = sgl->addr_lo;
2616 bde->addrHigh = sgl->addr_hi;
2617 bde->tus.f.bdeSize =
2618 le32_to_cpu(sgl->sge_len);
2619 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2620 bde->tus.w = cpu_to_le32(bde->tus.w);
2622 memset(bde, 0, sizeof(struct ulp_bde64));
2626 ctxp->offset += cnt;
2628 ctxp->state = LPFC_NVMET_STE_DATA;
2634 * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
2635 * @phba: Pointer to HBA context object.
2636 * @cmdwqe: Pointer to driver command WQE object.
2637 * @wcqe: Pointer to driver response CQE object.
2639 * The function is called from SLI ring event handler with no
2640 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2641 * The function frees memory resources used for the NVME commands.
2644 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2645 struct lpfc_wcqe_complete *wcqe)
2647 struct lpfc_nvmet_rcv_ctx *ctxp;
2648 struct lpfc_nvmet_tgtport *tgtp;
2649 uint32_t status, result;
2650 unsigned long flags;
2651 bool released = false;
2653 ctxp = cmdwqe->context2;
2654 status = bf_get(lpfc_wcqe_c_status, wcqe);
2655 result = wcqe->parameter;
2657 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2658 if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2659 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2661 ctxp->state = LPFC_NVMET_STE_DONE;
2663 /* Check if we already received a free context call
2664 * and we have completed processing an abort situation.
2666 spin_lock_irqsave(&ctxp->ctxlock, flags);
2667 if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2668 !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2669 list_del(&ctxp->list);
2672 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2673 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2674 atomic_inc(&tgtp->xmt_abort_rsp);
2676 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2677 "6165 ABORT cmpl: xri x%x flg x%x (%d) "
2678 "WCQE: %08x %08x %08x %08x\n",
2679 ctxp->oxid, ctxp->flag, released,
2680 wcqe->word0, wcqe->total_data_placed,
2681 result, wcqe->word3);
2683 cmdwqe->context2 = NULL;
2684 cmdwqe->context3 = NULL;
2686 * if transport has released ctx, then can reuse it. Otherwise,
2687 * will be recycled by transport release call.
2690 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2692 /* This is the iocbq for the abort, not the command */
2693 lpfc_sli_release_iocbq(phba, cmdwqe);
2695 /* Since iaab/iaar are NOT set, there is no work left.
2696 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2697 * should have been called already.
2702 * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
2703 * @phba: Pointer to HBA context object.
2704 * @cmdwqe: Pointer to driver command WQE object.
2705 * @wcqe: Pointer to driver response CQE object.
2707 * The function is called from SLI ring event handler with no
2708 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2709 * The function frees memory resources used for the NVME commands.
2712 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2713 struct lpfc_wcqe_complete *wcqe)
2715 struct lpfc_nvmet_rcv_ctx *ctxp;
2716 struct lpfc_nvmet_tgtport *tgtp;
2717 unsigned long flags;
2718 uint32_t status, result;
2719 bool released = false;
2721 ctxp = cmdwqe->context2;
2722 status = bf_get(lpfc_wcqe_c_status, wcqe);
2723 result = wcqe->parameter;
2726 /* if context is clear, related io alrady complete */
2727 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2728 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
2729 wcqe->word0, wcqe->total_data_placed,
2730 result, wcqe->word3);
2734 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2735 if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2736 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2739 if (ctxp->state != LPFC_NVMET_STE_ABORT) {
2740 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2741 "6112 ABTS Wrong state:%d oxid x%x\n",
2742 ctxp->state, ctxp->oxid);
2745 /* Check if we already received a free context call
2746 * and we have completed processing an abort situation.
2748 ctxp->state = LPFC_NVMET_STE_DONE;
2749 spin_lock_irqsave(&ctxp->ctxlock, flags);
2750 if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2751 !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2752 list_del(&ctxp->list);
2755 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2756 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2757 atomic_inc(&tgtp->xmt_abort_rsp);
2759 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2760 "6316 ABTS cmpl xri x%x flg x%x (%x) "
2761 "WCQE: %08x %08x %08x %08x\n",
2762 ctxp->oxid, ctxp->flag, released,
2763 wcqe->word0, wcqe->total_data_placed,
2764 result, wcqe->word3);
2766 cmdwqe->context2 = NULL;
2767 cmdwqe->context3 = NULL;
2769 * if transport has released ctx, then can reuse it. Otherwise,
2770 * will be recycled by transport release call.
2773 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2775 /* Since iaab/iaar are NOT set, there is no work left.
2776 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2777 * should have been called already.
2782 * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
2783 * @phba: Pointer to HBA context object.
2784 * @cmdwqe: Pointer to driver command WQE object.
2785 * @wcqe: Pointer to driver response CQE object.
2787 * The function is called from SLI ring event handler with no
2788 * lock held. This function is the completion handler for NVME ABTS for LS cmds
2789 * The function frees memory resources used for the NVME commands.
2792 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2793 struct lpfc_wcqe_complete *wcqe)
2795 struct lpfc_nvmet_rcv_ctx *ctxp;
2796 struct lpfc_nvmet_tgtport *tgtp;
2797 uint32_t status, result;
2799 ctxp = cmdwqe->context2;
2800 status = bf_get(lpfc_wcqe_c_status, wcqe);
2801 result = wcqe->parameter;
2803 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2804 atomic_inc(&tgtp->xmt_ls_abort_cmpl);
2806 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2807 "6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n",
2808 ctxp, wcqe->word0, wcqe->total_data_placed,
2809 result, wcqe->word3);
2812 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2813 "6415 NVMET LS Abort No ctx: WCQE: "
2814 "%08x %08x %08x %08x\n",
2815 wcqe->word0, wcqe->total_data_placed,
2816 result, wcqe->word3);
2818 lpfc_sli_release_iocbq(phba, cmdwqe);
2822 if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) {
2823 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2824 "6416 NVMET LS abort cmpl state mismatch: "
2825 "oxid x%x: %d %d\n",
2826 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
2829 cmdwqe->context2 = NULL;
2830 cmdwqe->context3 = NULL;
2831 lpfc_sli_release_iocbq(phba, cmdwqe);
2836 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
2837 struct lpfc_nvmet_rcv_ctx *ctxp,
2838 uint32_t sid, uint16_t xri)
2840 struct lpfc_nvmet_tgtport *tgtp;
2841 struct lpfc_iocbq *abts_wqeq;
2842 union lpfc_wqe128 *wqe_abts;
2843 struct lpfc_nodelist *ndlp;
2845 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2846 "6067 ABTS: sid %x xri x%x/x%x\n",
2847 sid, xri, ctxp->wqeq->sli4_xritag);
2849 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2851 ndlp = lpfc_findnode_did(phba->pport, sid);
2852 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2853 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2854 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2855 atomic_inc(&tgtp->xmt_abort_rsp_error);
2856 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2857 "6134 Drop ABTS - wrong NDLP state x%x.\n",
2858 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
2860 /* No failure to an ABTS request. */
2864 abts_wqeq = ctxp->wqeq;
2865 wqe_abts = &abts_wqeq->wqe;
2868 * Since we zero the whole WQE, we need to ensure we set the WQE fields
2869 * that were initialized in lpfc_sli4_nvmet_alloc.
2871 memset(wqe_abts, 0, sizeof(union lpfc_wqe));
2874 bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
2875 bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
2876 bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
2877 bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
2878 bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
2881 bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
2882 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2883 bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
2884 abts_wqeq->sli4_xritag);
2887 bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
2888 CMD_XMIT_SEQUENCE64_WQE);
2889 bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
2890 bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
2891 bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
2894 wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
2897 bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
2898 /* Needs to be set by caller */
2899 bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
2902 bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
2903 bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2904 bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
2905 LPFC_WQE_LENLOC_WORD12);
2906 bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
2907 bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
2910 bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
2911 LPFC_WQE_CQ_ID_DEFAULT);
2912 bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
2915 abts_wqeq->vport = phba->pport;
2916 abts_wqeq->context1 = ndlp;
2917 abts_wqeq->context2 = ctxp;
2918 abts_wqeq->context3 = NULL;
2919 abts_wqeq->rsvd2 = 0;
2920 /* hba_wqidx should already be setup from command we are aborting */
2921 abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
2922 abts_wqeq->iocb.ulpLe = 1;
2924 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2925 "6069 Issue ABTS to xri x%x reqtag x%x\n",
2926 xri, abts_wqeq->iotag);
2931 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
2932 struct lpfc_nvmet_rcv_ctx *ctxp,
2933 uint32_t sid, uint16_t xri)
2935 struct lpfc_nvmet_tgtport *tgtp;
2936 struct lpfc_iocbq *abts_wqeq;
2937 union lpfc_wqe128 *abts_wqe;
2938 struct lpfc_nodelist *ndlp;
2939 unsigned long flags;
2942 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2944 ctxp->wqeq = ctxp->ctxbuf->iocbq;
2945 ctxp->wqeq->hba_wqidx = 0;
2948 ndlp = lpfc_findnode_did(phba->pport, sid);
2949 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2950 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2951 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2952 atomic_inc(&tgtp->xmt_abort_rsp_error);
2953 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2954 "6160 Drop ABORT - wrong NDLP state x%x.\n",
2955 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
2957 /* No failure to an ABTS request. */
2958 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2962 /* Issue ABTS for this WQE based on iotag */
2963 ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
2964 if (!ctxp->abort_wqeq) {
2965 atomic_inc(&tgtp->xmt_abort_rsp_error);
2966 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2967 "6161 ABORT failed: No wqeqs: "
2968 "xri: x%x\n", ctxp->oxid);
2969 /* No failure to an ABTS request. */
2970 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2973 abts_wqeq = ctxp->abort_wqeq;
2974 abts_wqe = &abts_wqeq->wqe;
2975 ctxp->state = LPFC_NVMET_STE_ABORT;
2977 /* Announce entry to new IO submit field. */
2978 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2979 "6162 ABORT Request to rport DID x%06x "
2980 "for xri x%x x%x\n",
2981 ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
2983 /* If the hba is getting reset, this flag is set. It is
2984 * cleared when the reset is complete and rings reestablished.
2986 spin_lock_irqsave(&phba->hbalock, flags);
2987 /* driver queued commands are in process of being flushed */
2988 if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
2989 spin_unlock_irqrestore(&phba->hbalock, flags);
2990 atomic_inc(&tgtp->xmt_abort_rsp_error);
2991 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2992 "6163 Driver in reset cleanup - flushing "
2993 "NVME Req now. hba_flag x%x oxid x%x\n",
2994 phba->hba_flag, ctxp->oxid);
2995 lpfc_sli_release_iocbq(phba, abts_wqeq);
2996 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3000 /* Outstanding abort is in progress */
3001 if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
3002 spin_unlock_irqrestore(&phba->hbalock, flags);
3003 atomic_inc(&tgtp->xmt_abort_rsp_error);
3004 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
3005 "6164 Outstanding NVME I/O Abort Request "
3006 "still pending on oxid x%x\n",
3008 lpfc_sli_release_iocbq(phba, abts_wqeq);
3009 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3013 /* Ready - mark outstanding as aborted by driver. */
3014 abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
3016 /* WQEs are reused. Clear stale data and set key fields to
3017 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
3019 memset(abts_wqe, 0, sizeof(union lpfc_wqe));
3022 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
3025 bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
3026 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
3028 /* word 8 - tell the FW to abort the IO associated with this
3029 * outstanding exchange ID.
3031 abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
3033 /* word 9 - this is the iotag for the abts_wqe completion. */
3034 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
3038 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
3039 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
3042 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
3043 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
3044 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
3046 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
3047 abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
3048 abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
3049 abts_wqeq->iocb_cmpl = 0;
3050 abts_wqeq->iocb_flag |= LPFC_IO_NVME;
3051 abts_wqeq->context2 = ctxp;
3052 abts_wqeq->vport = phba->pport;
3053 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
3054 spin_unlock_irqrestore(&phba->hbalock, flags);
3055 if (rc == WQE_SUCCESS) {
3056 atomic_inc(&tgtp->xmt_abort_sol);
3060 atomic_inc(&tgtp->xmt_abort_rsp_error);
3061 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3062 lpfc_sli_release_iocbq(phba, abts_wqeq);
3063 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3064 "6166 Failed ABORT issue_wqe with status x%x "
3072 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
3073 struct lpfc_nvmet_rcv_ctx *ctxp,
3074 uint32_t sid, uint16_t xri)
3076 struct lpfc_nvmet_tgtport *tgtp;
3077 struct lpfc_iocbq *abts_wqeq;
3078 unsigned long flags;
3081 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3083 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3084 ctxp->wqeq->hba_wqidx = 0;
3087 if (ctxp->state == LPFC_NVMET_STE_FREE) {
3088 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3089 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
3090 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
3094 ctxp->state = LPFC_NVMET_STE_ABORT;
3096 rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
3100 spin_lock_irqsave(&phba->hbalock, flags);
3101 abts_wqeq = ctxp->wqeq;
3102 abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
3103 abts_wqeq->iocb_cmpl = NULL;
3104 abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
3105 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
3106 spin_unlock_irqrestore(&phba->hbalock, flags);
3107 if (rc == WQE_SUCCESS) {
3112 spin_lock_irqsave(&ctxp->ctxlock, flags);
3113 if (ctxp->flag & LPFC_NVMET_CTX_RLS)
3114 list_del(&ctxp->list);
3115 ctxp->flag &= ~(LPFC_NVMET_ABORT_OP | LPFC_NVMET_CTX_RLS);
3116 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3118 atomic_inc(&tgtp->xmt_abort_rsp_error);
3119 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3120 "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
3122 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3127 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
3128 struct lpfc_nvmet_rcv_ctx *ctxp,
3129 uint32_t sid, uint16_t xri)
3131 struct lpfc_nvmet_tgtport *tgtp;
3132 struct lpfc_iocbq *abts_wqeq;
3133 union lpfc_wqe128 *wqe_abts;
3134 unsigned long flags;
3137 if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) ||
3138 (ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) {
3139 ctxp->state = LPFC_NVMET_STE_LS_ABORT;
3142 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3143 "6418 NVMET LS abort state mismatch "
3145 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3146 ctxp->state = LPFC_NVMET_STE_LS_ABORT;
3149 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3151 /* Issue ABTS for this WQE based on iotag */
3152 ctxp->wqeq = lpfc_sli_get_iocbq(phba);
3154 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3155 "6068 Abort failed: No wqeqs: "
3157 /* No failure to an ABTS request. */
3162 abts_wqeq = ctxp->wqeq;
3163 wqe_abts = &abts_wqeq->wqe;
3165 if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
3170 spin_lock_irqsave(&phba->hbalock, flags);
3171 abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
3172 abts_wqeq->iocb_cmpl = 0;
3173 abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
3174 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq);
3175 spin_unlock_irqrestore(&phba->hbalock, flags);
3176 if (rc == WQE_SUCCESS) {
3177 atomic_inc(&tgtp->xmt_abort_unsol);
3181 atomic_inc(&tgtp->xmt_abort_rsp_error);
3182 abts_wqeq->context2 = NULL;
3183 abts_wqeq->context3 = NULL;
3184 lpfc_sli_release_iocbq(phba, abts_wqeq);
3186 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3187 "6056 Failed to Issue ABTS. Status x%x\n", rc);