1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channsel Host Bus Adapters. *
4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
39 #include <linux/nvme.h>
40 #include <linux/nvme-fc-driver.h>
41 #include <linux/nvme-fc.h>
43 #include "lpfc_version.h"
47 #include "lpfc_sli4.h"
49 #include "lpfc_disc.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_nvme.h"
53 #include "lpfc_nvmet.h"
54 #include "lpfc_logmsg.h"
55 #include "lpfc_crtn.h"
56 #include "lpfc_vport.h"
57 #include "lpfc_debugfs.h"
59 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
60 struct lpfc_nvmet_rcv_ctx *,
63 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
64 struct lpfc_nvmet_rcv_ctx *);
65 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
66 struct lpfc_nvmet_rcv_ctx *,
68 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
69 struct lpfc_nvmet_rcv_ctx *,
71 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
72 struct lpfc_nvmet_rcv_ctx *,
74 static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
75 struct lpfc_nvmet_rcv_ctx *);
76 static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *);
78 static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf);
80 static union lpfc_wqe128 lpfc_tsend_cmd_template;
81 static union lpfc_wqe128 lpfc_treceive_cmd_template;
82 static union lpfc_wqe128 lpfc_trsp_cmd_template;
84 /* Setup WQE templates for NVME IOs */
86 lpfc_nvmet_cmd_template(void)
88 union lpfc_wqe128 *wqe;
91 wqe = &lpfc_tsend_cmd_template;
92 memset(wqe, 0, sizeof(union lpfc_wqe128));
94 /* Word 0, 1, 2 - BDE is variable */
96 /* Word 3 - payload_offset_len is zero */
98 /* Word 4 - relative_offset is variable */
100 /* Word 5 - is zero */
102 /* Word 6 - ctxt_tag, xri_tag is variable */
104 /* Word 7 - wqe_ar is variable */
105 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
106 bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF);
107 bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3);
108 bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI);
109 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
111 /* Word 8 - abort_tag is variable */
113 /* Word 9 - reqtag, rcvoxid is variable */
115 /* Word 10 - wqes, xc is variable */
116 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
117 bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
118 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
119 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
120 bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
121 bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12);
123 /* Word 11 - sup, irsp, irsplen is variable */
124 bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND);
125 bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
126 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
127 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
128 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
129 bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0);
131 /* Word 12 - fcp_data_len is variable */
133 /* Word 13, 14, 15 - PBDE is zero */
135 /* TRECEIVE template */
136 wqe = &lpfc_treceive_cmd_template;
137 memset(wqe, 0, sizeof(union lpfc_wqe128));
139 /* Word 0, 1, 2 - BDE is variable */
142 wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
144 /* Word 4 - relative_offset is variable */
146 /* Word 5 - is zero */
148 /* Word 6 - ctxt_tag, xri_tag is variable */
151 bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE);
152 bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF);
153 bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3);
154 bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI);
155 bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
157 /* Word 8 - abort_tag is variable */
159 /* Word 9 - reqtag, rcvoxid is variable */
161 /* Word 10 - xc is variable */
162 bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
163 bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
164 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
165 bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
166 bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
167 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
169 /* Word 11 - pbde is variable */
170 bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE);
171 bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
172 bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0);
173 bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
174 bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
175 bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1);
177 /* Word 12 - fcp_data_len is variable */
179 /* Word 13, 14, 15 - PBDE is variable */
182 wqe = &lpfc_trsp_cmd_template;
183 memset(wqe, 0, sizeof(union lpfc_wqe128));
185 /* Word 0, 1, 2 - BDE is variable */
187 /* Word 3 - response_len is variable */
189 /* Word 4, 5 - is zero */
191 /* Word 6 - ctxt_tag, xri_tag is variable */
194 bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
195 bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED);
196 bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3);
197 bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI);
198 bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); /* wqe_ar */
200 /* Word 8 - abort_tag is variable */
202 /* Word 9 - reqtag is variable */
204 /* Word 10 wqes, xc is variable */
205 bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1);
206 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
207 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
208 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0);
209 bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE);
210 bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3);
212 /* Word 11 irsp, irsplen is variable */
213 bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP);
214 bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
215 bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0);
216 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
217 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
218 bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0);
220 /* Word 12, 13, 14, 15 - is zero */
224 lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
226 lockdep_assert_held(&ctxp->ctxlock);
228 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
229 "6313 NVMET Defer ctx release xri x%x flg x%x\n",
230 ctxp->oxid, ctxp->flag);
232 if (ctxp->flag & LPFC_NVMET_CTX_RLS)
235 ctxp->flag |= LPFC_NVMET_CTX_RLS;
236 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
237 list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
238 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
242 * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
243 * @phba: Pointer to HBA context object.
244 * @cmdwqe: Pointer to driver command WQE object.
245 * @wcqe: Pointer to driver response CQE object.
247 * The function is called from SLI ring event handler with no
248 * lock held. This function is the completion handler for NVME LS commands
249 * The function frees memory resources used for the NVME commands.
252 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
253 struct lpfc_wcqe_complete *wcqe)
255 struct lpfc_nvmet_tgtport *tgtp;
256 struct nvmefc_tgt_ls_req *rsp;
257 struct lpfc_nvmet_rcv_ctx *ctxp;
258 uint32_t status, result;
260 status = bf_get(lpfc_wcqe_c_status, wcqe);
261 result = wcqe->parameter;
262 ctxp = cmdwqe->context2;
264 if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) {
265 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
266 "6410 NVMET LS cmpl state mismatch IO x%x: "
268 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
271 if (!phba->targetport)
274 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
278 atomic_inc(&tgtp->xmt_ls_rsp_error);
279 if (result == IOERR_ABORT_REQUESTED)
280 atomic_inc(&tgtp->xmt_ls_rsp_aborted);
281 if (bf_get(lpfc_wcqe_c_xb, wcqe))
282 atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
284 atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
289 rsp = &ctxp->ctx.ls_req;
291 lpfc_nvmeio_data(phba, "NVMET LS CMPL: xri x%x stat x%x result x%x\n",
292 ctxp->oxid, status, result);
294 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
295 "6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
296 status, result, ctxp->oxid);
298 lpfc_nlp_put(cmdwqe->context1);
299 cmdwqe->context2 = NULL;
300 cmdwqe->context3 = NULL;
301 lpfc_sli_release_iocbq(phba, cmdwqe);
307 * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
308 * @phba: HBA buffer is associated with
309 * @ctxp: context to clean up
310 * @mp: Buffer to free
312 * Description: Frees the given DMA buffer in the appropriate way given by
313 * reposting it to its associated RQ so it can be reused.
315 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
320 lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
322 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
323 struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
324 struct lpfc_nvmet_tgtport *tgtp;
325 struct fc_frame_header *fc_hdr;
326 struct rqb_dmabuf *nvmebuf;
327 struct lpfc_nvmet_ctx_info *infop;
328 uint32_t size, oxid, sid;
333 dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
336 ctxp->txrdy_phys = 0;
339 if (ctxp->state == LPFC_NVMET_STE_FREE) {
340 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
341 "6411 NVMET free, already free IO x%x: %d %d\n",
342 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
345 if (ctxp->rqb_buffer) {
346 spin_lock_irqsave(&ctxp->ctxlock, iflag);
347 nvmebuf = ctxp->rqb_buffer;
348 /* check if freed in another path whilst acquiring lock */
350 ctxp->rqb_buffer = NULL;
351 if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
352 ctxp->flag &= ~LPFC_NVMET_CTX_REUSE_WQ;
353 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
354 nvmebuf->hrq->rqbp->rqb_free_buffer(phba,
357 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
359 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
362 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
365 ctxp->state = LPFC_NVMET_STE_FREE;
367 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
368 if (phba->sli4_hba.nvmet_io_wait_cnt) {
369 list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
370 nvmebuf, struct rqb_dmabuf,
372 phba->sli4_hba.nvmet_io_wait_cnt--;
373 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
376 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
377 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
378 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
379 size = nvmebuf->bytes_recv;
380 sid = sli4_sid_from_fc_hdr(fc_hdr);
382 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
390 ctxp->state = LPFC_NVMET_STE_RCV;
393 ctxp->ctxbuf = ctx_buf;
394 ctxp->rqb_buffer = (void *)nvmebuf;
395 spin_lock_init(&ctxp->ctxlock);
397 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
398 /* NOTE: isr time stamp is stale when context is re-assigned*/
399 if (ctxp->ts_isr_cmd) {
400 ctxp->ts_cmd_nvme = 0;
401 ctxp->ts_nvme_data = 0;
402 ctxp->ts_data_wqput = 0;
403 ctxp->ts_isr_data = 0;
404 ctxp->ts_data_nvme = 0;
405 ctxp->ts_nvme_status = 0;
406 ctxp->ts_status_wqput = 0;
407 ctxp->ts_isr_status = 0;
408 ctxp->ts_status_nvme = 0;
411 atomic_inc(&tgtp->rcv_fcp_cmd_in);
413 /* flag new work queued, replacement buffer has already
416 spin_lock_irqsave(&ctxp->ctxlock, iflag);
417 ctxp->flag |= LPFC_NVMET_CTX_REUSE_WQ;
418 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
420 if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
421 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
422 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
423 "6181 Unable to queue deferred work "
425 "FCP Drop IO [x%x x%x x%x]\n",
427 atomic_read(&tgtp->rcv_fcp_cmd_in),
428 atomic_read(&tgtp->rcv_fcp_cmd_out),
429 atomic_read(&tgtp->xmt_fcp_release));
431 spin_lock_irqsave(&ctxp->ctxlock, iflag);
432 lpfc_nvmet_defer_release(phba, ctxp);
433 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
434 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
438 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
441 * Use the CPU context list, from the MRQ the IO was received on
442 * (ctxp->idx), to save context structure.
444 cpu = raw_smp_processor_id();
445 infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
446 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
447 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
448 infop->nvmet_ctx_list_cnt++;
449 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
453 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
455 lpfc_nvmet_ktime(struct lpfc_hba *phba,
456 struct lpfc_nvmet_rcv_ctx *ctxp)
458 uint64_t seg1, seg2, seg3, seg4, seg5;
459 uint64_t seg6, seg7, seg8, seg9, seg10;
462 if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
463 !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
464 !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
465 !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
466 !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
469 if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
471 if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme)
473 if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
475 if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
477 if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
479 if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
481 if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
483 if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
485 if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
487 if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
490 * Segment 1 - Time from FCP command received by MSI-X ISR
491 * to FCP command is passed to NVME Layer.
492 * Segment 2 - Time from FCP command payload handed
493 * off to NVME Layer to Driver receives a Command op
495 * Segment 3 - Time from Driver receives a Command op
496 * from NVME Layer to Command is put on WQ.
497 * Segment 4 - Time from Driver WQ put is done
498 * to MSI-X ISR for Command cmpl.
499 * Segment 5 - Time from MSI-X ISR for Command cmpl to
500 * Command cmpl is passed to NVME Layer.
501 * Segment 6 - Time from Command cmpl is passed to NVME
502 * Layer to Driver receives a RSP op from NVME Layer.
503 * Segment 7 - Time from Driver receives a RSP op from
504 * NVME Layer to WQ put is done on TRSP FCP Status.
505 * Segment 8 - Time from Driver WQ put is done on TRSP
506 * FCP Status to MSI-X ISR for TRSP cmpl.
507 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
508 * TRSP cmpl is passed to NVME Layer.
509 * Segment 10 - Time from FCP command received by
510 * MSI-X ISR to command is completed on wire.
511 * (Segments 1 thru 8) for READDATA / WRITEDATA
512 * (Segments 1 thru 4) for READDATA_RSP
514 seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
517 seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
523 seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
529 seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
535 seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
542 /* For auto rsp commands seg6 thru seg10 will be 0 */
543 if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
544 seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
550 seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
556 seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
562 seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
568 if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
570 seg10 = (ctxp->ts_isr_status -
573 if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
579 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
582 phba->ktime_seg1_total += seg1;
583 if (seg1 < phba->ktime_seg1_min)
584 phba->ktime_seg1_min = seg1;
585 else if (seg1 > phba->ktime_seg1_max)
586 phba->ktime_seg1_max = seg1;
588 phba->ktime_seg2_total += seg2;
589 if (seg2 < phba->ktime_seg2_min)
590 phba->ktime_seg2_min = seg2;
591 else if (seg2 > phba->ktime_seg2_max)
592 phba->ktime_seg2_max = seg2;
594 phba->ktime_seg3_total += seg3;
595 if (seg3 < phba->ktime_seg3_min)
596 phba->ktime_seg3_min = seg3;
597 else if (seg3 > phba->ktime_seg3_max)
598 phba->ktime_seg3_max = seg3;
600 phba->ktime_seg4_total += seg4;
601 if (seg4 < phba->ktime_seg4_min)
602 phba->ktime_seg4_min = seg4;
603 else if (seg4 > phba->ktime_seg4_max)
604 phba->ktime_seg4_max = seg4;
606 phba->ktime_seg5_total += seg5;
607 if (seg5 < phba->ktime_seg5_min)
608 phba->ktime_seg5_min = seg5;
609 else if (seg5 > phba->ktime_seg5_max)
610 phba->ktime_seg5_max = seg5;
612 phba->ktime_data_samples++;
616 phba->ktime_seg6_total += seg6;
617 if (seg6 < phba->ktime_seg6_min)
618 phba->ktime_seg6_min = seg6;
619 else if (seg6 > phba->ktime_seg6_max)
620 phba->ktime_seg6_max = seg6;
622 phba->ktime_seg7_total += seg7;
623 if (seg7 < phba->ktime_seg7_min)
624 phba->ktime_seg7_min = seg7;
625 else if (seg7 > phba->ktime_seg7_max)
626 phba->ktime_seg7_max = seg7;
628 phba->ktime_seg8_total += seg8;
629 if (seg8 < phba->ktime_seg8_min)
630 phba->ktime_seg8_min = seg8;
631 else if (seg8 > phba->ktime_seg8_max)
632 phba->ktime_seg8_max = seg8;
634 phba->ktime_seg9_total += seg9;
635 if (seg9 < phba->ktime_seg9_min)
636 phba->ktime_seg9_min = seg9;
637 else if (seg9 > phba->ktime_seg9_max)
638 phba->ktime_seg9_max = seg9;
640 phba->ktime_seg10_total += seg10;
641 if (seg10 < phba->ktime_seg10_min)
642 phba->ktime_seg10_min = seg10;
643 else if (seg10 > phba->ktime_seg10_max)
644 phba->ktime_seg10_max = seg10;
645 phba->ktime_status_samples++;
650 * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
651 * @phba: Pointer to HBA context object.
652 * @cmdwqe: Pointer to driver command WQE object.
653 * @wcqe: Pointer to driver response CQE object.
655 * The function is called from SLI ring event handler with no
656 * lock held. This function is the completion handler for NVME FCP commands
657 * The function frees memory resources used for the NVME commands.
660 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
661 struct lpfc_wcqe_complete *wcqe)
663 struct lpfc_nvmet_tgtport *tgtp;
664 struct nvmefc_tgt_fcp_req *rsp;
665 struct lpfc_nvmet_rcv_ctx *ctxp;
666 uint32_t status, result, op, start_clean, logerr;
667 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
671 ctxp = cmdwqe->context2;
672 ctxp->flag &= ~LPFC_NVMET_IO_INP;
674 rsp = &ctxp->ctx.fcp_req;
677 status = bf_get(lpfc_wcqe_c_status, wcqe);
678 result = wcqe->parameter;
680 if (phba->targetport)
681 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
685 lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
686 ctxp->oxid, op, status);
689 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
690 rsp->transferred_length = 0;
692 atomic_inc(&tgtp->xmt_fcp_rsp_error);
693 if (result == IOERR_ABORT_REQUESTED)
694 atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
697 logerr = LOG_NVME_IOERR;
699 /* pick up SLI4 exhange busy condition */
700 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
701 ctxp->flag |= LPFC_NVMET_XBUSY;
702 logerr |= LOG_NVME_ABTS;
704 atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
707 ctxp->flag &= ~LPFC_NVMET_XBUSY;
710 lpfc_printf_log(phba, KERN_INFO, logerr,
711 "6315 IO Error Cmpl xri x%x: %x/%x XBUSY:x%x\n",
712 ctxp->oxid, status, result, ctxp->flag);
715 rsp->fcp_error = NVME_SC_SUCCESS;
716 if (op == NVMET_FCOP_RSP)
717 rsp->transferred_length = rsp->rsplen;
719 rsp->transferred_length = rsp->transfer_length;
721 atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
724 if ((op == NVMET_FCOP_READDATA_RSP) ||
725 (op == NVMET_FCOP_RSP)) {
727 ctxp->state = LPFC_NVMET_STE_DONE;
730 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
731 if (ctxp->ts_cmd_nvme) {
732 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
734 cmdwqe->isr_timestamp;
737 ctxp->ts_nvme_status =
739 ctxp->ts_status_wqput =
741 ctxp->ts_isr_status =
743 ctxp->ts_status_nvme =
746 ctxp->ts_isr_status =
747 cmdwqe->isr_timestamp;
748 ctxp->ts_status_nvme =
754 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
755 if (ctxp->ts_cmd_nvme)
756 lpfc_nvmet_ktime(phba, ctxp);
758 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
761 start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
762 memset(((char *)cmdwqe) + start_clean, 0,
763 (sizeof(struct lpfc_iocbq) - start_clean));
764 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
765 if (ctxp->ts_cmd_nvme) {
766 ctxp->ts_isr_data = cmdwqe->isr_timestamp;
767 ctxp->ts_data_nvme = ktime_get_ns();
772 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
773 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
774 id = raw_smp_processor_id();
775 if (id < LPFC_CHECK_CPU_CNT) {
777 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
778 "6704 CPU Check cmdcmpl: "
779 "cpu %d expect %d\n",
781 phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_cmpl_io[id]++;
788 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
789 struct nvmefc_tgt_ls_req *rsp)
791 struct lpfc_nvmet_rcv_ctx *ctxp =
792 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
793 struct lpfc_hba *phba = ctxp->phba;
794 struct hbq_dmabuf *nvmebuf =
795 (struct hbq_dmabuf *)ctxp->rqb_buffer;
796 struct lpfc_iocbq *nvmewqeq;
797 struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
798 struct lpfc_dmabuf dmabuf;
799 struct ulp_bde64 bpl;
802 if (phba->pport->load_flag & FC_UNLOADING)
805 if (phba->pport->load_flag & FC_UNLOADING)
808 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
809 "6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
811 if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) ||
812 (ctxp->entry_cnt != 1)) {
813 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
814 "6412 NVMET LS rsp state mismatch "
816 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
818 ctxp->state = LPFC_NVMET_STE_LS_RSP;
821 nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
823 if (nvmewqeq == NULL) {
824 atomic_inc(&nvmep->xmt_ls_drop);
825 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
826 "6150 LS Drop IO x%x: Prep\n",
828 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
829 atomic_inc(&nvmep->xmt_ls_abort);
830 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
831 ctxp->sid, ctxp->oxid);
835 /* Save numBdes for bpl2sgl */
837 nvmewqeq->hba_wqidx = 0;
838 nvmewqeq->context3 = &dmabuf;
840 bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
841 bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
842 bpl.tus.f.bdeSize = rsp->rsplen;
843 bpl.tus.f.bdeFlags = 0;
844 bpl.tus.w = le32_to_cpu(bpl.tus.w);
846 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
847 nvmewqeq->iocb_cmpl = NULL;
848 nvmewqeq->context2 = ctxp;
850 lpfc_nvmeio_data(phba, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n",
851 ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
853 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
854 if (rc == WQE_SUCCESS) {
856 * Okay to repost buffer here, but wait till cmpl
857 * before freeing ctxp and iocbq.
859 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
860 ctxp->rqb_buffer = 0;
861 atomic_inc(&nvmep->xmt_ls_rsp);
864 /* Give back resources */
865 atomic_inc(&nvmep->xmt_ls_drop);
866 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
867 "6151 LS Drop IO x%x: Issue %d\n",
870 lpfc_nlp_put(nvmewqeq->context1);
872 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
873 atomic_inc(&nvmep->xmt_ls_abort);
874 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
879 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
880 struct nvmefc_tgt_fcp_req *rsp)
882 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
883 struct lpfc_nvmet_rcv_ctx *ctxp =
884 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
885 struct lpfc_hba *phba = ctxp->phba;
886 struct lpfc_queue *wq;
887 struct lpfc_iocbq *nvmewqeq;
888 struct lpfc_sli_ring *pring;
889 unsigned long iflags;
892 if (phba->pport->load_flag & FC_UNLOADING) {
897 if (phba->pport->load_flag & FC_UNLOADING) {
902 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
903 if (ctxp->ts_cmd_nvme) {
904 if (rsp->op == NVMET_FCOP_RSP)
905 ctxp->ts_nvme_status = ktime_get_ns();
907 ctxp->ts_nvme_data = ktime_get_ns();
910 /* Setup the hdw queue if not already set */
912 ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
914 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
915 int id = raw_smp_processor_id();
916 if (id < LPFC_CHECK_CPU_CNT) {
917 if (rsp->hwqid != id)
918 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
919 "6705 CPU Check OP: "
920 "cpu %d expect %d\n",
922 phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_xmt_io[id]++;
924 ctxp->cpu = id; /* Setup cpu for cmpl check */
929 if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) ||
930 (ctxp->state == LPFC_NVMET_STE_ABORT)) {
931 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
932 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
933 "6102 IO xri x%x aborted\n",
939 nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
940 if (nvmewqeq == NULL) {
941 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
942 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
943 "6152 FCP Drop IO x%x: Prep\n",
949 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
950 nvmewqeq->iocb_cmpl = NULL;
951 nvmewqeq->context2 = ctxp;
952 nvmewqeq->iocb_flag |= LPFC_IO_NVMET;
953 ctxp->wqeq->hba_wqidx = rsp->hwqid;
955 lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
956 ctxp->oxid, rsp->op, rsp->rsplen);
958 ctxp->flag |= LPFC_NVMET_IO_INP;
959 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
960 if (rc == WQE_SUCCESS) {
961 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
962 if (!ctxp->ts_cmd_nvme)
964 if (rsp->op == NVMET_FCOP_RSP)
965 ctxp->ts_status_wqput = ktime_get_ns();
967 ctxp->ts_data_wqput = ktime_get_ns();
974 * WQ was full, so queue nvmewqeq to be sent after
977 ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
978 wq = ctxp->hdwq->nvme_wq;
980 spin_lock_irqsave(&pring->ring_lock, iflags);
981 list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
982 wq->q_flag |= HBA_NVMET_WQFULL;
983 spin_unlock_irqrestore(&pring->ring_lock, iflags);
984 atomic_inc(&lpfc_nvmep->defer_wqfull);
988 /* Give back resources */
989 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
990 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
991 "6153 FCP Drop IO x%x: Issue: %d\n",
994 ctxp->wqeq->hba_wqidx = 0;
995 nvmewqeq->context2 = NULL;
996 nvmewqeq->context3 = NULL;
1003 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
1005 struct lpfc_nvmet_tgtport *tport = targetport->private;
1007 /* release any threads waiting for the unreg to complete */
1008 if (tport->phba->targetport)
1009 complete(tport->tport_unreg_cmp);
1013 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
1014 struct nvmefc_tgt_fcp_req *req)
1016 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1017 struct lpfc_nvmet_rcv_ctx *ctxp =
1018 container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1019 struct lpfc_hba *phba = ctxp->phba;
1020 struct lpfc_queue *wq;
1021 unsigned long flags;
1023 if (phba->pport->load_flag & FC_UNLOADING)
1026 if (phba->pport->load_flag & FC_UNLOADING)
1030 ctxp->hdwq = &phba->sli4_hba.hdwq[0];
1032 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1033 "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
1034 ctxp->oxid, ctxp->flag, ctxp->state);
1036 lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
1037 ctxp->oxid, ctxp->flag, ctxp->state);
1039 atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
1041 spin_lock_irqsave(&ctxp->ctxlock, flags);
1043 /* Since iaab/iaar are NOT set, we need to check
1044 * if the firmware is in process of aborting IO
1046 if (ctxp->flag & LPFC_NVMET_XBUSY) {
1047 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1050 ctxp->flag |= LPFC_NVMET_ABORT_OP;
1052 if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) {
1053 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1054 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1056 wq = ctxp->hdwq->nvme_wq;
1057 lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
1060 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1062 /* An state of LPFC_NVMET_STE_RCV means we have just received
1063 * the NVME command and have not started processing it.
1064 * (by issuing any IO WQEs on this exchange yet)
1066 if (ctxp->state == LPFC_NVMET_STE_RCV)
1067 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1070 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1075 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
1076 struct nvmefc_tgt_fcp_req *rsp)
1078 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1079 struct lpfc_nvmet_rcv_ctx *ctxp =
1080 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1081 struct lpfc_hba *phba = ctxp->phba;
1082 unsigned long flags;
1083 bool aborting = false;
1085 spin_lock_irqsave(&ctxp->ctxlock, flags);
1086 if (ctxp->flag & LPFC_NVMET_XBUSY)
1087 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1088 "6027 NVMET release with XBUSY flag x%x"
1090 ctxp->flag, ctxp->oxid);
1091 else if (ctxp->state != LPFC_NVMET_STE_DONE &&
1092 ctxp->state != LPFC_NVMET_STE_ABORT)
1093 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1094 "6413 NVMET release bad state %d %d oxid x%x\n",
1095 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1097 if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
1098 (ctxp->flag & LPFC_NVMET_XBUSY)) {
1100 /* let the abort path do the real release */
1101 lpfc_nvmet_defer_release(phba, ctxp);
1103 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1105 lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
1106 ctxp->state, aborting);
1108 atomic_inc(&lpfc_nvmep->xmt_fcp_release);
1113 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1117 lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
1118 struct nvmefc_tgt_fcp_req *rsp)
1120 struct lpfc_nvmet_tgtport *tgtp;
1121 struct lpfc_nvmet_rcv_ctx *ctxp =
1122 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1123 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
1124 struct lpfc_hba *phba = ctxp->phba;
1125 unsigned long iflag;
1128 lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
1129 ctxp->oxid, ctxp->size, raw_smp_processor_id());
1132 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1133 "6425 Defer rcv: no buffer xri x%x: "
1135 ctxp->oxid, ctxp->flag, ctxp->state);
1139 tgtp = phba->targetport->private;
1141 atomic_inc(&tgtp->rcv_fcp_cmd_defer);
1143 /* Free the nvmebuf since a new buffer already replaced it */
1144 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1145 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1146 ctxp->rqb_buffer = NULL;
1147 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1150 static struct nvmet_fc_target_template lpfc_tgttemplate = {
1151 .targetport_delete = lpfc_nvmet_targetport_delete,
1152 .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
1153 .fcp_op = lpfc_nvmet_xmt_fcp_op,
1154 .fcp_abort = lpfc_nvmet_xmt_fcp_abort,
1155 .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
1156 .defer_rcv = lpfc_nvmet_defer_rcv,
1159 .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1160 .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1161 .dma_boundary = 0xFFFFFFFF,
1163 /* optional features */
1164 .target_features = 0,
1165 /* sizes of additional private data for data structures */
1166 .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
1170 __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
1171 struct lpfc_nvmet_ctx_info *infop)
1173 struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
1174 unsigned long flags;
1176 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
1177 list_for_each_entry_safe(ctx_buf, next_ctx_buf,
1178 &infop->nvmet_ctx_list, list) {
1179 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1180 list_del_init(&ctx_buf->list);
1181 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1183 __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
1184 ctx_buf->sglq->state = SGL_FREED;
1185 ctx_buf->sglq->ndlp = NULL;
1187 spin_lock(&phba->sli4_hba.sgl_list_lock);
1188 list_add_tail(&ctx_buf->sglq->list,
1189 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1190 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1192 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1193 kfree(ctx_buf->context);
1195 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
1199 lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
1201 struct lpfc_nvmet_ctx_info *infop;
1204 /* The first context list, MRQ 0 CPU 0 */
1205 infop = phba->sli4_hba.nvmet_ctx_info;
1209 /* Cycle the the entire CPU context list for every MRQ */
1210 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
1211 for_each_present_cpu(j) {
1212 infop = lpfc_get_ctx_list(phba, j, i);
1213 __lpfc_nvmet_clean_io_for_cpu(phba, infop);
1216 kfree(phba->sli4_hba.nvmet_ctx_info);
1217 phba->sli4_hba.nvmet_ctx_info = NULL;
1221 lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1223 struct lpfc_nvmet_ctxbuf *ctx_buf;
1224 struct lpfc_iocbq *nvmewqe;
1225 union lpfc_wqe128 *wqe;
1226 struct lpfc_nvmet_ctx_info *last_infop;
1227 struct lpfc_nvmet_ctx_info *infop;
1230 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1231 "6403 Allocate NVMET resources for %d XRIs\n",
1232 phba->sli4_hba.nvmet_xri_cnt);
1234 phba->sli4_hba.nvmet_ctx_info = kcalloc(
1235 phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq,
1236 sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
1237 if (!phba->sli4_hba.nvmet_ctx_info) {
1238 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1239 "6419 Failed allocate memory for "
1240 "nvmet context lists\n");
1245 * Assuming X CPUs in the system, and Y MRQs, allocate some
1246 * lpfc_nvmet_ctx_info structures as follows:
1248 * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
1249 * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
1251 * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
1253 * Each line represents a MRQ "silo" containing an entry for
1256 * MRQ X is initially assumed to be associated with CPU X, thus
1257 * contexts are initially distributed across all MRQs using
1258 * the MRQ index (N) as follows cpuN/mrqN. When contexts are
1259 * freed, the are freed to the MRQ silo based on the CPU number
1260 * of the IO completion. Thus a context that was allocated for MRQ A
1261 * whose IO completed on CPU B will be freed to cpuB/mrqA.
1263 for_each_possible_cpu(i) {
1264 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1265 infop = lpfc_get_ctx_list(phba, i, j);
1266 INIT_LIST_HEAD(&infop->nvmet_ctx_list);
1267 spin_lock_init(&infop->nvmet_ctx_list_lock);
1268 infop->nvmet_ctx_list_cnt = 0;
1273 * Setup the next CPU context info ptr for each MRQ.
1274 * MRQ 0 will cycle thru CPUs 0 - X separately from
1275 * MRQ 1 cycling thru CPUs 0 - X, and so on.
1277 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1278 last_infop = lpfc_get_ctx_list(phba,
1279 cpumask_first(cpu_present_mask),
1281 for (i = phba->sli4_hba.num_possible_cpu - 1; i >= 0; i--) {
1282 infop = lpfc_get_ctx_list(phba, i, j);
1283 infop->nvmet_ctx_next_cpu = last_infop;
1288 /* For all nvmet xris, allocate resources needed to process a
1289 * received command on a per xri basis.
1292 cpu = cpumask_first(cpu_present_mask);
1293 for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
1294 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
1296 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1297 "6404 Ran out of memory for NVMET\n");
1301 ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
1303 if (!ctx_buf->context) {
1305 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1306 "6405 Ran out of NVMET "
1307 "context memory\n");
1310 ctx_buf->context->ctxbuf = ctx_buf;
1311 ctx_buf->context->state = LPFC_NVMET_STE_FREE;
1313 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
1314 if (!ctx_buf->iocbq) {
1315 kfree(ctx_buf->context);
1317 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1318 "6406 Ran out of NVMET iocb/WQEs\n");
1321 ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
1322 nvmewqe = ctx_buf->iocbq;
1323 wqe = &nvmewqe->wqe;
1325 /* Initialize WQE */
1326 memset(wqe, 0, sizeof(union lpfc_wqe));
1328 ctx_buf->iocbq->context1 = NULL;
1329 spin_lock(&phba->sli4_hba.sgl_list_lock);
1330 ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
1331 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1332 if (!ctx_buf->sglq) {
1333 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1334 kfree(ctx_buf->context);
1336 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1337 "6407 Ran out of NVMET XRIs\n");
1340 INIT_WORK(&ctx_buf->defer_work, lpfc_nvmet_fcp_rqst_defer_work);
1343 * Add ctx to MRQidx context list. Our initial assumption
1344 * is MRQidx will be associated with CPUidx. This association
1345 * can change on the fly.
1347 infop = lpfc_get_ctx_list(phba, cpu, idx);
1348 spin_lock(&infop->nvmet_ctx_list_lock);
1349 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
1350 infop->nvmet_ctx_list_cnt++;
1351 spin_unlock(&infop->nvmet_ctx_list_lock);
1353 /* Spread ctx structures evenly across all MRQs */
1355 if (idx >= phba->cfg_nvmet_mrq) {
1357 cpu = cpumask_first(cpu_present_mask);
1360 cpu = cpumask_next(cpu, cpu_present_mask);
1361 if (cpu == nr_cpu_ids)
1362 cpu = cpumask_first(cpu_present_mask);
1366 for_each_present_cpu(i) {
1367 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1368 infop = lpfc_get_ctx_list(phba, i, j);
1369 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1370 "6408 TOTAL NVMET ctx for CPU %d "
1371 "MRQ %d: cnt %d nextcpu %p\n",
1372 i, j, infop->nvmet_ctx_list_cnt,
1373 infop->nvmet_ctx_next_cpu);
1380 lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1382 struct lpfc_vport *vport = phba->pport;
1383 struct lpfc_nvmet_tgtport *tgtp;
1384 struct nvmet_fc_port_info pinfo;
1387 if (phba->targetport)
1390 error = lpfc_nvmet_setup_io_context(phba);
1394 memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
1395 pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
1396 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
1397 pinfo.port_id = vport->fc_myDID;
1399 /* We need to tell the transport layer + 1 because it takes page
1400 * alignment into account. When space for the SGL is allocated we
1401 * allocate + 3, one for cmd, one for rsp and one for this alignment
1403 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
1404 lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue;
1405 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
1407 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1408 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
1415 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1416 "6025 Cannot register NVME targetport x%x: "
1417 "portnm %llx nodenm %llx segs %d qs %d\n",
1419 pinfo.port_name, pinfo.node_name,
1420 lpfc_tgttemplate.max_sgl_segments,
1421 lpfc_tgttemplate.max_hw_queues);
1422 phba->targetport = NULL;
1423 phba->nvmet_support = 0;
1425 lpfc_nvmet_cleanup_io_context(phba);
1428 tgtp = (struct lpfc_nvmet_tgtport *)
1429 phba->targetport->private;
1432 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1433 "6026 Registered NVME "
1434 "targetport: %p, private %p "
1435 "portnm %llx nodenm %llx segs %d qs %d\n",
1436 phba->targetport, tgtp,
1437 pinfo.port_name, pinfo.node_name,
1438 lpfc_tgttemplate.max_sgl_segments,
1439 lpfc_tgttemplate.max_hw_queues);
1441 atomic_set(&tgtp->rcv_ls_req_in, 0);
1442 atomic_set(&tgtp->rcv_ls_req_out, 0);
1443 atomic_set(&tgtp->rcv_ls_req_drop, 0);
1444 atomic_set(&tgtp->xmt_ls_abort, 0);
1445 atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
1446 atomic_set(&tgtp->xmt_ls_rsp, 0);
1447 atomic_set(&tgtp->xmt_ls_drop, 0);
1448 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
1449 atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
1450 atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
1451 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
1452 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1453 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1454 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
1455 atomic_set(&tgtp->xmt_fcp_drop, 0);
1456 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1457 atomic_set(&tgtp->xmt_fcp_read, 0);
1458 atomic_set(&tgtp->xmt_fcp_write, 0);
1459 atomic_set(&tgtp->xmt_fcp_rsp, 0);
1460 atomic_set(&tgtp->xmt_fcp_release, 0);
1461 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1462 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1463 atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
1464 atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
1465 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1466 atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
1467 atomic_set(&tgtp->xmt_fcp_abort, 0);
1468 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1469 atomic_set(&tgtp->xmt_abort_unsol, 0);
1470 atomic_set(&tgtp->xmt_abort_sol, 0);
1471 atomic_set(&tgtp->xmt_abort_rsp, 0);
1472 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
1473 atomic_set(&tgtp->defer_ctx, 0);
1474 atomic_set(&tgtp->defer_fod, 0);
1475 atomic_set(&tgtp->defer_wqfull, 0);
1481 lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
1483 struct lpfc_vport *vport = phba->pport;
1485 if (!phba->targetport)
1488 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1489 "6007 Update NVMET port %p did x%x\n",
1490 phba->targetport, vport->fc_myDID);
1492 phba->targetport->port_id = vport->fc_myDID;
1497 * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
1498 * @phba: pointer to lpfc hba data structure.
1499 * @axri: pointer to the nvmet xri abort wcqe structure.
1501 * This routine is invoked by the worker thread to process a SLI4 fast-path
1502 * NVMET aborted xri.
1505 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1506 struct sli4_wcqe_xri_aborted *axri)
1508 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1509 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
1510 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1511 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1512 struct lpfc_nvmet_tgtport *tgtp;
1513 struct lpfc_nodelist *ndlp;
1514 unsigned long iflag = 0;
1516 bool released = false;
1518 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1519 "6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
1521 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1524 if (phba->targetport) {
1525 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1526 atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
1529 spin_lock_irqsave(&phba->hbalock, iflag);
1530 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1531 list_for_each_entry_safe(ctxp, next_ctxp,
1532 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1534 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1537 spin_lock(&ctxp->ctxlock);
1538 /* Check if we already received a free context call
1539 * and we have completed processing an abort situation.
1541 if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
1542 !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
1543 list_del(&ctxp->list);
1546 ctxp->flag &= ~LPFC_NVMET_XBUSY;
1547 spin_unlock(&ctxp->ctxlock);
1548 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1550 rrq_empty = list_empty(&phba->active_rrq_list);
1551 spin_unlock_irqrestore(&phba->hbalock, iflag);
1552 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1553 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1554 (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
1555 ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
1556 lpfc_set_rrq_active(phba, ndlp,
1557 ctxp->ctxbuf->sglq->sli4_lxritag,
1559 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
1562 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1563 "6318 XB aborted oxid %x flg x%x (%x)\n",
1564 ctxp->oxid, ctxp->flag, released);
1566 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1569 lpfc_worker_wake_up(phba);
1572 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1573 spin_unlock_irqrestore(&phba->hbalock, iflag);
1578 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
1579 struct fc_frame_header *fc_hdr)
1581 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1582 struct lpfc_hba *phba = vport->phba;
1583 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1584 struct nvmefc_tgt_fcp_req *rsp;
1587 unsigned long iflag = 0;
1589 xri = be16_to_cpu(fc_hdr->fh_ox_id);
1590 sid = sli4_sid_from_fc_hdr(fc_hdr);
1592 spin_lock_irqsave(&phba->hbalock, iflag);
1593 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1594 list_for_each_entry_safe(ctxp, next_ctxp,
1595 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1597 if (ctxp->oxid != xri || ctxp->sid != sid)
1600 xri = ctxp->ctxbuf->sglq->sli4_xritag;
1602 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1603 spin_unlock_irqrestore(&phba->hbalock, iflag);
1605 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1606 ctxp->flag |= LPFC_NVMET_ABTS_RCV;
1607 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1609 lpfc_nvmeio_data(phba,
1610 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1611 xri, raw_smp_processor_id(), 0);
1613 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1614 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
1616 rsp = &ctxp->ctx.fcp_req;
1617 nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
1619 /* Respond with BA_ACC accordingly */
1620 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1623 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1624 spin_unlock_irqrestore(&phba->hbalock, iflag);
1626 lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1627 xri, raw_smp_processor_id(), 1);
1629 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1630 "6320 NVMET Rcv ABTS:rjt xid x%x\n", xri);
1632 /* Respond with BA_RJT accordingly */
1633 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
1639 lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
1640 struct lpfc_nvmet_rcv_ctx *ctxp)
1642 struct lpfc_sli_ring *pring;
1643 struct lpfc_iocbq *nvmewqeq;
1644 struct lpfc_iocbq *next_nvmewqeq;
1645 unsigned long iflags;
1646 struct lpfc_wcqe_complete wcqe;
1647 struct lpfc_wcqe_complete *wcqep;
1652 /* Fake an ABORT error code back to cmpl routine */
1653 memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete));
1654 bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT);
1655 wcqep->parameter = IOERR_ABORT_REQUESTED;
1657 spin_lock_irqsave(&pring->ring_lock, iflags);
1658 list_for_each_entry_safe(nvmewqeq, next_nvmewqeq,
1659 &wq->wqfull_list, list) {
1661 /* Checking for a specific IO to flush */
1662 if (nvmewqeq->context2 == ctxp) {
1663 list_del(&nvmewqeq->list);
1664 spin_unlock_irqrestore(&pring->ring_lock,
1666 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
1673 list_del(&nvmewqeq->list);
1674 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1675 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep);
1676 spin_lock_irqsave(&pring->ring_lock, iflags);
1680 wq->q_flag &= ~HBA_NVMET_WQFULL;
1681 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1685 lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
1686 struct lpfc_queue *wq)
1688 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1689 struct lpfc_sli_ring *pring;
1690 struct lpfc_iocbq *nvmewqeq;
1691 struct lpfc_nvmet_rcv_ctx *ctxp;
1692 unsigned long iflags;
1696 * Some WQE slots are available, so try to re-issue anything
1697 * on the WQ wqfull_list.
1700 spin_lock_irqsave(&pring->ring_lock, iflags);
1701 while (!list_empty(&wq->wqfull_list)) {
1702 list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
1704 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1705 ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmewqeq->context2;
1706 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
1707 spin_lock_irqsave(&pring->ring_lock, iflags);
1709 /* WQ was full again, so put it back on the list */
1710 list_add(&nvmewqeq->list, &wq->wqfull_list);
1711 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1715 wq->q_flag &= ~HBA_NVMET_WQFULL;
1716 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1722 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
1724 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1725 struct lpfc_nvmet_tgtport *tgtp;
1726 struct lpfc_queue *wq;
1728 DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
1730 if (phba->nvmet_support == 0)
1732 if (phba->targetport) {
1733 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1734 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
1735 wq = phba->sli4_hba.hdwq[qidx].nvme_wq;
1736 lpfc_nvmet_wqfull_flush(phba, wq, NULL);
1738 tgtp->tport_unreg_cmp = &tport_unreg_cmp;
1739 nvmet_fc_unregister_targetport(phba->targetport);
1740 if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp,
1741 msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
1742 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1743 "6179 Unreg targetport %p timeout "
1744 "reached.\n", phba->targetport);
1745 lpfc_nvmet_cleanup_io_context(phba);
1747 phba->targetport = NULL;
1752 * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
1753 * @phba: pointer to lpfc hba data structure.
1754 * @pring: pointer to a SLI ring.
1755 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1757 * This routine is used for processing the WQE associated with a unsolicited
1758 * event. It first determines whether there is an existing ndlp that matches
1759 * the DID from the unsolicited WQE. If not, it will create a new one with
1760 * the DID from the unsolicited WQE. The ELS command from the unsolicited
1761 * WQE is then used to invoke the proper routine and to set up proper state
1762 * of the discovery state machine.
1765 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1766 struct hbq_dmabuf *nvmebuf)
1768 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1769 struct lpfc_nvmet_tgtport *tgtp;
1770 struct fc_frame_header *fc_hdr;
1771 struct lpfc_nvmet_rcv_ctx *ctxp;
1773 uint32_t size, oxid, sid, rc;
1775 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1776 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1778 if (!phba->targetport) {
1779 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1780 "6154 LS Drop IO x%x\n", oxid);
1788 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1789 payload = (uint32_t *)(nvmebuf->dbuf.virt);
1790 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
1791 sid = sli4_sid_from_fc_hdr(fc_hdr);
1793 ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
1795 atomic_inc(&tgtp->rcv_ls_req_drop);
1796 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1797 "6155 LS Drop IO x%x: Alloc\n",
1800 lpfc_nvmeio_data(phba, "NVMET LS DROP: "
1801 "xri x%x sz %d from %06x\n",
1803 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1811 ctxp->state = LPFC_NVMET_STE_LS_RCV;
1812 ctxp->entry_cnt = 1;
1813 ctxp->rqb_buffer = (void *)nvmebuf;
1814 ctxp->hdwq = &phba->sli4_hba.hdwq[0];
1816 lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n",
1819 * The calling sequence should be:
1820 * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
1821 * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
1823 atomic_inc(&tgtp->rcv_ls_req_in);
1824 rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
1827 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1828 "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
1829 "%08x %08x %08x\n", size, rc,
1830 *payload, *(payload+1), *(payload+2),
1831 *(payload+3), *(payload+4), *(payload+5));
1834 atomic_inc(&tgtp->rcv_ls_req_out);
1838 lpfc_nvmeio_data(phba, "NVMET LS DROP: xri x%x sz %d from %06x\n",
1841 atomic_inc(&tgtp->rcv_ls_req_drop);
1842 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1843 "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
1846 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
1847 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1849 atomic_inc(&tgtp->xmt_ls_abort);
1850 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
1855 lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
1857 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1858 struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
1859 struct lpfc_hba *phba = ctxp->phba;
1860 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
1861 struct lpfc_nvmet_tgtport *tgtp;
1862 uint32_t *payload, qno;
1864 unsigned long iflags;
1867 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1868 "6159 process_rcv_fcp_req, nvmebuf is NULL, "
1869 "oxid: x%x flg: x%x state: x%x\n",
1870 ctxp->oxid, ctxp->flag, ctxp->state);
1871 spin_lock_irqsave(&ctxp->ctxlock, iflags);
1872 lpfc_nvmet_defer_release(phba, ctxp);
1873 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
1874 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1879 payload = (uint32_t *)(nvmebuf->dbuf.virt);
1880 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1881 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1882 if (ctxp->ts_isr_cmd)
1883 ctxp->ts_cmd_nvme = ktime_get_ns();
1886 * The calling sequence should be:
1887 * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
1888 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
1889 * When we return from nvmet_fc_rcv_fcp_req, all relevant info
1890 * the NVME command / FC header is stored.
1891 * A buffer has already been reposted for this IO, so just free
1894 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
1895 payload, ctxp->size);
1896 /* Process FCP command */
1898 atomic_inc(&tgtp->rcv_fcp_cmd_out);
1899 spin_lock_irqsave(&ctxp->ctxlock, iflags);
1900 if ((ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) ||
1901 (nvmebuf != ctxp->rqb_buffer)) {
1902 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
1905 ctxp->rqb_buffer = NULL;
1906 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
1907 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
1911 /* Processing of FCP command is deferred */
1912 if (rc == -EOVERFLOW) {
1913 lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d "
1915 ctxp->oxid, ctxp->size, ctxp->sid);
1916 atomic_inc(&tgtp->rcv_fcp_cmd_out);
1917 atomic_inc(&tgtp->defer_fod);
1918 spin_lock_irqsave(&ctxp->ctxlock, iflags);
1919 if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
1920 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
1923 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
1925 * Post a replacement DMA buffer to RQ and defer
1926 * freeing rcv buffer till .defer_rcv callback
1929 lpfc_post_rq_buffer(
1930 phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
1931 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
1934 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
1935 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1936 "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
1938 atomic_read(&tgtp->rcv_fcp_cmd_in),
1939 atomic_read(&tgtp->rcv_fcp_cmd_out),
1940 atomic_read(&tgtp->xmt_fcp_release));
1941 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
1942 ctxp->oxid, ctxp->size, ctxp->sid);
1943 spin_lock_irqsave(&ctxp->ctxlock, iflags);
1944 lpfc_nvmet_defer_release(phba, ctxp);
1945 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
1946 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
1951 lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work)
1953 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1954 struct lpfc_nvmet_ctxbuf *ctx_buf =
1955 container_of(work, struct lpfc_nvmet_ctxbuf, defer_work);
1957 lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
1961 static struct lpfc_nvmet_ctxbuf *
1962 lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
1963 struct lpfc_nvmet_ctx_info *current_infop)
1965 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1966 struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
1967 struct lpfc_nvmet_ctx_info *get_infop;
1971 * The current_infop for the MRQ a NVME command IU was received
1972 * on is empty. Our goal is to replenish this MRQs context
1973 * list from a another CPUs.
1975 * First we need to pick a context list to start looking on.
1976 * nvmet_ctx_start_cpu has available context the last time
1977 * we needed to replenish this CPU where nvmet_ctx_next_cpu
1978 * is just the next sequential CPU for this MRQ.
1980 if (current_infop->nvmet_ctx_start_cpu)
1981 get_infop = current_infop->nvmet_ctx_start_cpu;
1983 get_infop = current_infop->nvmet_ctx_next_cpu;
1985 for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) {
1986 if (get_infop == current_infop) {
1987 get_infop = get_infop->nvmet_ctx_next_cpu;
1990 spin_lock(&get_infop->nvmet_ctx_list_lock);
1992 /* Just take the entire context list, if there are any */
1993 if (get_infop->nvmet_ctx_list_cnt) {
1994 list_splice_init(&get_infop->nvmet_ctx_list,
1995 ¤t_infop->nvmet_ctx_list);
1996 current_infop->nvmet_ctx_list_cnt =
1997 get_infop->nvmet_ctx_list_cnt - 1;
1998 get_infop->nvmet_ctx_list_cnt = 0;
1999 spin_unlock(&get_infop->nvmet_ctx_list_lock);
2001 current_infop->nvmet_ctx_start_cpu = get_infop;
2002 list_remove_head(¤t_infop->nvmet_ctx_list,
2003 ctx_buf, struct lpfc_nvmet_ctxbuf,
2008 /* Otherwise, move on to the next CPU for this MRQ */
2009 spin_unlock(&get_infop->nvmet_ctx_list_lock);
2010 get_infop = get_infop->nvmet_ctx_next_cpu;
2014 /* Nothing found, all contexts for the MRQ are in-flight */
2019 * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
2020 * @phba: pointer to lpfc hba data structure.
2021 * @idx: relative index of MRQ vector
2022 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
2023 * @isr_timestamp: in jiffies.
2024 * @cqflag: cq processing information regarding workload.
2026 * This routine is used for processing the WQE associated with a unsolicited
2027 * event. It first determines whether there is an existing ndlp that matches
2028 * the DID from the unsolicited WQE. If not, it will create a new one with
2029 * the DID from the unsolicited WQE. The ELS command from the unsolicited
2030 * WQE is then used to invoke the proper routine and to set up proper state
2031 * of the discovery state machine.
2034 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
2036 struct rqb_dmabuf *nvmebuf,
2037 uint64_t isr_timestamp,
2040 struct lpfc_nvmet_rcv_ctx *ctxp;
2041 struct lpfc_nvmet_tgtport *tgtp;
2042 struct fc_frame_header *fc_hdr;
2043 struct lpfc_nvmet_ctxbuf *ctx_buf;
2044 struct lpfc_nvmet_ctx_info *current_infop;
2045 uint32_t size, oxid, sid, qno;
2046 unsigned long iflag;
2049 if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
2053 if (!nvmebuf || !phba->targetport) {
2054 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2055 "6157 NVMET FCP Drop IO\n");
2057 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2062 * Get a pointer to the context list for this MRQ based on
2063 * the CPU this MRQ IRQ is associated with. If the CPU association
2064 * changes from our initial assumption, the context list could
2065 * be empty, thus it would need to be replenished with the
2066 * context list from another CPU for this MRQ.
2068 current_cpu = raw_smp_processor_id();
2069 current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
2070 spin_lock_irqsave(¤t_infop->nvmet_ctx_list_lock, iflag);
2071 if (current_infop->nvmet_ctx_list_cnt) {
2072 list_remove_head(¤t_infop->nvmet_ctx_list,
2073 ctx_buf, struct lpfc_nvmet_ctxbuf, list);
2074 current_infop->nvmet_ctx_list_cnt--;
2076 ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
2078 spin_unlock_irqrestore(¤t_infop->nvmet_ctx_list_lock, iflag);
2080 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
2081 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2082 size = nvmebuf->bytes_recv;
2084 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2085 if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
2086 if (current_cpu < LPFC_CHECK_CPU_CNT) {
2087 if (idx != current_cpu)
2088 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2089 "6703 CPU Check rcv: "
2090 "cpu %d expect %d\n",
2092 phba->sli4_hba.hdwq[idx].cpucheck_rcv_io[current_cpu]++;
2097 lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
2098 oxid, size, raw_smp_processor_id());
2100 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2103 /* Queue this NVME IO to process later */
2104 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
2105 list_add_tail(&nvmebuf->hbuf.list,
2106 &phba->sli4_hba.lpfc_nvmet_io_wait_list);
2107 phba->sli4_hba.nvmet_io_wait_cnt++;
2108 phba->sli4_hba.nvmet_io_wait_total++;
2109 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
2112 /* Post a brand new DMA buffer to RQ */
2114 lpfc_post_rq_buffer(
2115 phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2116 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2118 atomic_inc(&tgtp->defer_ctx);
2122 sid = sli4_sid_from_fc_hdr(fc_hdr);
2124 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
2125 if (ctxp->state != LPFC_NVMET_STE_FREE) {
2126 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2127 "6414 NVMET Context corrupt %d %d oxid x%x\n",
2128 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
2138 ctxp->state = LPFC_NVMET_STE_RCV;
2139 ctxp->entry_cnt = 1;
2141 ctxp->ctxbuf = ctx_buf;
2142 ctxp->rqb_buffer = (void *)nvmebuf;
2144 spin_lock_init(&ctxp->ctxlock);
2146 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2148 ctxp->ts_isr_cmd = isr_timestamp;
2149 ctxp->ts_cmd_nvme = 0;
2150 ctxp->ts_nvme_data = 0;
2151 ctxp->ts_data_wqput = 0;
2152 ctxp->ts_isr_data = 0;
2153 ctxp->ts_data_nvme = 0;
2154 ctxp->ts_nvme_status = 0;
2155 ctxp->ts_status_wqput = 0;
2156 ctxp->ts_isr_status = 0;
2157 ctxp->ts_status_nvme = 0;
2160 atomic_inc(&tgtp->rcv_fcp_cmd_in);
2161 /* check for cq processing load */
2163 lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2167 if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
2168 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2169 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2170 "6325 Unable to queue work for oxid x%x. "
2171 "FCP Drop IO [x%x x%x x%x]\n",
2173 atomic_read(&tgtp->rcv_fcp_cmd_in),
2174 atomic_read(&tgtp->rcv_fcp_cmd_out),
2175 atomic_read(&tgtp->xmt_fcp_release));
2177 spin_lock_irqsave(&ctxp->ctxlock, iflag);
2178 lpfc_nvmet_defer_release(phba, ctxp);
2179 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
2180 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
2185 * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
2186 * @phba: pointer to lpfc hba data structure.
2187 * @pring: pointer to a SLI ring.
2188 * @nvmebuf: pointer to received nvme data structure.
2190 * This routine is used to process an unsolicited event received from a SLI
2191 * (Service Level Interface) ring. The actual processing of the data buffer
2192 * associated with the unsolicited event is done by invoking the routine
2193 * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
2194 * SLI RQ on which the unsolicited event was received.
2197 lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2198 struct lpfc_iocbq *piocb)
2200 struct lpfc_dmabuf *d_buf;
2201 struct hbq_dmabuf *nvmebuf;
2203 d_buf = piocb->context2;
2204 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2206 if (phba->nvmet_support == 0) {
2207 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
2210 lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
2214 * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
2215 * @phba: pointer to lpfc hba data structure.
2216 * @idx: relative index of MRQ vector
2217 * @nvmebuf: pointer to received nvme data structure.
2218 * @isr_timestamp: in jiffies.
2219 * @cqflag: cq processing information regarding workload.
2221 * This routine is used to process an unsolicited event received from a SLI
2222 * (Service Level Interface) ring. The actual processing of the data buffer
2223 * associated with the unsolicited event is done by invoking the routine
2224 * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
2225 * SLI RQ on which the unsolicited event was received.
2228 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
2230 struct rqb_dmabuf *nvmebuf,
2231 uint64_t isr_timestamp,
2234 if (phba->nvmet_support == 0) {
2235 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2238 lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp, cqflag);
2242 * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
2243 * @phba: pointer to a host N_Port data structure.
2244 * @ctxp: Context info for NVME LS Request
2245 * @rspbuf: DMA buffer of NVME command.
2246 * @rspsize: size of the NVME command.
2248 * This routine is used for allocating a lpfc-WQE data structure from
2249 * the driver lpfc-WQE free-list and prepare the WQE with the parameters
2250 * passed into the routine for discovery state machine to issue an Extended
2251 * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
2252 * and preparation routine that is used by all the discovery state machine
2253 * routines and the NVME command-specific fields will be later set up by
2254 * the individual discovery machine routines after calling this routine
2255 * allocating and preparing a generic WQE data structure. It fills in the
2256 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
2257 * payload and response payload (if expected). The reference count on the
2258 * ndlp is incremented by 1 and the reference to the ndlp is put into
2259 * context1 of the WQE data structure for this WQE to hold the ndlp
2260 * reference for the command's callback function to access later.
2263 * Pointer to the newly allocated/prepared nvme wqe data structure
2264 * NULL - when nvme wqe data structure allocation/preparation failed
2266 static struct lpfc_iocbq *
2267 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
2268 struct lpfc_nvmet_rcv_ctx *ctxp,
2269 dma_addr_t rspbuf, uint16_t rspsize)
2271 struct lpfc_nodelist *ndlp;
2272 struct lpfc_iocbq *nvmewqe;
2273 union lpfc_wqe128 *wqe;
2275 if (!lpfc_is_link_up(phba)) {
2276 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2277 "6104 NVMET prep LS wqe: link err: "
2278 "NPORT x%x oxid:x%x ste %d\n",
2279 ctxp->sid, ctxp->oxid, ctxp->state);
2283 /* Allocate buffer for command wqe */
2284 nvmewqe = lpfc_sli_get_iocbq(phba);
2285 if (nvmewqe == NULL) {
2286 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2287 "6105 NVMET prep LS wqe: No WQE: "
2288 "NPORT x%x oxid x%x ste %d\n",
2289 ctxp->sid, ctxp->oxid, ctxp->state);
2293 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2294 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2295 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2296 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2297 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2298 "6106 NVMET prep LS wqe: No ndlp: "
2299 "NPORT x%x oxid x%x ste %d\n",
2300 ctxp->sid, ctxp->oxid, ctxp->state);
2301 goto nvme_wqe_free_wqeq_exit;
2303 ctxp->wqeq = nvmewqe;
2305 /* prevent preparing wqe with NULL ndlp reference */
2306 nvmewqe->context1 = lpfc_nlp_get(ndlp);
2307 if (nvmewqe->context1 == NULL)
2308 goto nvme_wqe_free_wqeq_exit;
2309 nvmewqe->context2 = ctxp;
2311 wqe = &nvmewqe->wqe;
2312 memset(wqe, 0, sizeof(union lpfc_wqe));
2315 wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2316 wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
2317 wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
2318 wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
2325 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
2326 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
2327 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
2328 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
2329 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
2332 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
2333 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2334 bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
2337 bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
2338 CMD_XMIT_SEQUENCE64_WQE);
2339 bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
2340 bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
2341 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
2344 wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
2347 bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
2348 /* Needs to be set by caller */
2349 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
2352 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
2353 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2354 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
2355 LPFC_WQE_LENLOC_WORD12);
2356 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
2359 bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
2360 LPFC_WQE_CQ_ID_DEFAULT);
2361 bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
2365 wqe->xmit_sequence.xmit_len = rspsize;
2368 nvmewqe->vport = phba->pport;
2369 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2370 nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
2372 /* Xmit NVMET response to remote NPORT <did> */
2373 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2374 "6039 Xmit NVMET LS response to remote "
2375 "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
2376 ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
2380 nvme_wqe_free_wqeq_exit:
2381 nvmewqe->context2 = NULL;
2382 nvmewqe->context3 = NULL;
2383 lpfc_sli_release_iocbq(phba, nvmewqe);
2388 static struct lpfc_iocbq *
2389 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
2390 struct lpfc_nvmet_rcv_ctx *ctxp)
2392 struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
2393 struct lpfc_nvmet_tgtport *tgtp;
2394 struct sli4_sge *sgl;
2395 struct lpfc_nodelist *ndlp;
2396 struct lpfc_iocbq *nvmewqe;
2397 struct scatterlist *sgel;
2398 union lpfc_wqe128 *wqe;
2399 struct ulp_bde64 *bde;
2401 dma_addr_t physaddr;
2406 if (!lpfc_is_link_up(phba)) {
2407 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2408 "6107 NVMET prep FCP wqe: link err:"
2409 "NPORT x%x oxid x%x ste %d\n",
2410 ctxp->sid, ctxp->oxid, ctxp->state);
2414 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2415 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2416 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2417 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2418 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2419 "6108 NVMET prep FCP wqe: no ndlp: "
2420 "NPORT x%x oxid x%x ste %d\n",
2421 ctxp->sid, ctxp->oxid, ctxp->state);
2425 if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
2426 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2427 "6109 NVMET prep FCP wqe: seg cnt err: "
2428 "NPORT x%x oxid x%x ste %d cnt %d\n",
2429 ctxp->sid, ctxp->oxid, ctxp->state,
2430 phba->cfg_nvme_seg_cnt);
2434 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2435 nvmewqe = ctxp->wqeq;
2436 if (nvmewqe == NULL) {
2437 /* Allocate buffer for command wqe */
2438 nvmewqe = ctxp->ctxbuf->iocbq;
2439 if (nvmewqe == NULL) {
2440 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2441 "6110 NVMET prep FCP wqe: No "
2442 "WQE: NPORT x%x oxid x%x ste %d\n",
2443 ctxp->sid, ctxp->oxid, ctxp->state);
2446 ctxp->wqeq = nvmewqe;
2447 xc = 0; /* create new XRI */
2448 nvmewqe->sli4_lxritag = NO_XRI;
2449 nvmewqe->sli4_xritag = NO_XRI;
2453 if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
2454 (ctxp->entry_cnt == 1)) ||
2455 (ctxp->state == LPFC_NVMET_STE_DATA)) {
2456 wqe = &nvmewqe->wqe;
2458 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2459 "6111 Wrong state NVMET FCP: %d cnt %d\n",
2460 ctxp->state, ctxp->entry_cnt);
2464 sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
2466 case NVMET_FCOP_READDATA:
2467 case NVMET_FCOP_READDATA_RSP:
2468 /* From the tsend template, initialize words 7 - 11 */
2469 memcpy(&wqe->words[7],
2470 &lpfc_tsend_cmd_template.words[7],
2471 sizeof(uint32_t) * 5);
2473 /* Words 0 - 2 : The first sg segment */
2475 physaddr = sg_dma_address(sgel);
2476 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2477 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
2478 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
2479 wqe->fcp_tsend.bde.addrHigh =
2480 cpu_to_le32(putPaddrHigh(physaddr));
2483 wqe->fcp_tsend.payload_offset_len = 0;
2486 wqe->fcp_tsend.relative_offset = ctxp->offset;
2489 wqe->fcp_tsend.reserved = 0;
2492 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
2493 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2494 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
2495 nvmewqe->sli4_xritag);
2497 /* Word 7 - set ar later */
2500 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
2503 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
2504 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
2506 /* Word 10 - set wqes later, in template xc=1 */
2508 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0);
2510 /* Word 11 - set sup, irsp, irsplen later */
2514 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2516 /* Setup 2 SKIP SGEs */
2520 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2521 sgl->word2 = cpu_to_le32(sgl->word2);
2527 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2528 sgl->word2 = cpu_to_le32(sgl->word2);
2531 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
2532 atomic_inc(&tgtp->xmt_fcp_read_rsp);
2534 /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2536 if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
2537 if (ndlp->nlp_flag & NLP_SUPPRESS_RSP)
2539 &wqe->fcp_tsend.wqe_com, 1);
2541 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
2542 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
2543 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
2544 ((rsp->rsplen >> 2) - 1));
2545 memcpy(&wqe->words[16], rsp->rspaddr,
2549 atomic_inc(&tgtp->xmt_fcp_read);
2551 /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2552 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
2556 case NVMET_FCOP_WRITEDATA:
2557 /* From the treceive template, initialize words 3 - 11 */
2558 memcpy(&wqe->words[3],
2559 &lpfc_treceive_cmd_template.words[3],
2560 sizeof(uint32_t) * 9);
2562 /* Words 0 - 2 : The first sg segment */
2563 txrdy = dma_pool_alloc(phba->txrdy_payload_pool,
2564 GFP_KERNEL, &physaddr);
2566 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2567 "6041 Bad txrdy buffer: oxid x%x\n",
2571 ctxp->txrdy = txrdy;
2572 ctxp->txrdy_phys = physaddr;
2573 wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2574 wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
2575 wqe->fcp_treceive.bde.addrLow =
2576 cpu_to_le32(putPaddrLow(physaddr));
2577 wqe->fcp_treceive.bde.addrHigh =
2578 cpu_to_le32(putPaddrHigh(physaddr));
2581 wqe->fcp_treceive.relative_offset = ctxp->offset;
2584 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
2585 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2586 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
2587 nvmewqe->sli4_xritag);
2592 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
2595 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
2596 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
2598 /* Word 10 - in template xc=1 */
2600 bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
2602 /* Word 11 - set pbde later */
2603 if (phba->cfg_enable_pbde) {
2606 bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
2611 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2613 /* Setup 1 TXRDY and 1 SKIP SGE */
2615 txrdy[1] = cpu_to_be32(rsp->transfer_length);
2618 sgl->addr_hi = putPaddrHigh(physaddr);
2619 sgl->addr_lo = putPaddrLow(physaddr);
2621 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2622 sgl->word2 = cpu_to_le32(sgl->word2);
2623 sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
2628 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2629 sgl->word2 = cpu_to_le32(sgl->word2);
2632 atomic_inc(&tgtp->xmt_fcp_write);
2635 case NVMET_FCOP_RSP:
2636 /* From the treceive template, initialize words 4 - 11 */
2637 memcpy(&wqe->words[4],
2638 &lpfc_trsp_cmd_template.words[4],
2639 sizeof(uint32_t) * 8);
2642 physaddr = rsp->rspdma;
2643 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2644 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
2645 wqe->fcp_trsp.bde.addrLow =
2646 cpu_to_le32(putPaddrLow(physaddr));
2647 wqe->fcp_trsp.bde.addrHigh =
2648 cpu_to_le32(putPaddrHigh(physaddr));
2651 wqe->fcp_trsp.response_len = rsp->rsplen;
2654 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
2655 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2656 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
2657 nvmewqe->sli4_xritag);
2662 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
2665 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
2666 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
2670 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1);
2673 /* In template wqes=0 irsp=0 irsplen=0 - good response */
2674 if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) {
2675 /* Bad response - embed it */
2676 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
2677 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
2678 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
2679 ((rsp->rsplen >> 2) - 1));
2680 memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
2685 wqe->fcp_trsp.rsvd_12_15[0] = 0;
2687 /* Use rspbuf, NOT sg list */
2690 atomic_inc(&tgtp->xmt_fcp_rsp);
2694 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2695 "6064 Unknown Rsp Op %d\n",
2701 nvmewqe->vport = phba->pport;
2702 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2703 nvmewqe->context1 = ndlp;
2705 for (i = 0; i < rsp->sg_cnt; i++) {
2707 physaddr = sg_dma_address(sgel);
2708 cnt = sg_dma_len(sgel);
2709 sgl->addr_hi = putPaddrHigh(physaddr);
2710 sgl->addr_lo = putPaddrLow(physaddr);
2712 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2713 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
2714 if ((i+1) == rsp->sg_cnt)
2715 bf_set(lpfc_sli4_sge_last, sgl, 1);
2716 sgl->word2 = cpu_to_le32(sgl->word2);
2717 sgl->sge_len = cpu_to_le32(cnt);
2719 bde = (struct ulp_bde64 *)&wqe->words[13];
2721 /* Words 13-15 (PBDE) */
2722 bde->addrLow = sgl->addr_lo;
2723 bde->addrHigh = sgl->addr_hi;
2724 bde->tus.f.bdeSize =
2725 le32_to_cpu(sgl->sge_len);
2726 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2727 bde->tus.w = cpu_to_le32(bde->tus.w);
2729 memset(bde, 0, sizeof(struct ulp_bde64));
2733 ctxp->offset += cnt;
2735 ctxp->state = LPFC_NVMET_STE_DATA;
2741 * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
2742 * @phba: Pointer to HBA context object.
2743 * @cmdwqe: Pointer to driver command WQE object.
2744 * @wcqe: Pointer to driver response CQE object.
2746 * The function is called from SLI ring event handler with no
2747 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2748 * The function frees memory resources used for the NVME commands.
2751 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2752 struct lpfc_wcqe_complete *wcqe)
2754 struct lpfc_nvmet_rcv_ctx *ctxp;
2755 struct lpfc_nvmet_tgtport *tgtp;
2757 unsigned long flags;
2758 bool released = false;
2760 ctxp = cmdwqe->context2;
2761 result = wcqe->parameter;
2763 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2764 if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2765 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2767 spin_lock_irqsave(&ctxp->ctxlock, flags);
2768 ctxp->state = LPFC_NVMET_STE_DONE;
2770 /* Check if we already received a free context call
2771 * and we have completed processing an abort situation.
2773 if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2774 !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2775 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
2776 list_del(&ctxp->list);
2777 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
2780 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2781 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2782 atomic_inc(&tgtp->xmt_abort_rsp);
2784 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2785 "6165 ABORT cmpl: xri x%x flg x%x (%d) "
2786 "WCQE: %08x %08x %08x %08x\n",
2787 ctxp->oxid, ctxp->flag, released,
2788 wcqe->word0, wcqe->total_data_placed,
2789 result, wcqe->word3);
2791 cmdwqe->context2 = NULL;
2792 cmdwqe->context3 = NULL;
2794 * if transport has released ctx, then can reuse it. Otherwise,
2795 * will be recycled by transport release call.
2798 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2800 /* This is the iocbq for the abort, not the command */
2801 lpfc_sli_release_iocbq(phba, cmdwqe);
2803 /* Since iaab/iaar are NOT set, there is no work left.
2804 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2805 * should have been called already.
2810 * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
2811 * @phba: Pointer to HBA context object.
2812 * @cmdwqe: Pointer to driver command WQE object.
2813 * @wcqe: Pointer to driver response CQE object.
2815 * The function is called from SLI ring event handler with no
2816 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2817 * The function frees memory resources used for the NVME commands.
2820 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2821 struct lpfc_wcqe_complete *wcqe)
2823 struct lpfc_nvmet_rcv_ctx *ctxp;
2824 struct lpfc_nvmet_tgtport *tgtp;
2825 unsigned long flags;
2827 bool released = false;
2829 ctxp = cmdwqe->context2;
2830 result = wcqe->parameter;
2833 /* if context is clear, related io alrady complete */
2834 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2835 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
2836 wcqe->word0, wcqe->total_data_placed,
2837 result, wcqe->word3);
2841 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2842 spin_lock_irqsave(&ctxp->ctxlock, flags);
2843 if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2844 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2847 if (ctxp->state != LPFC_NVMET_STE_ABORT) {
2848 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2849 "6112 ABTS Wrong state:%d oxid x%x\n",
2850 ctxp->state, ctxp->oxid);
2853 /* Check if we already received a free context call
2854 * and we have completed processing an abort situation.
2856 ctxp->state = LPFC_NVMET_STE_DONE;
2857 if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2858 !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2859 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
2860 list_del(&ctxp->list);
2861 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
2864 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2865 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2866 atomic_inc(&tgtp->xmt_abort_rsp);
2868 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2869 "6316 ABTS cmpl xri x%x flg x%x (%x) "
2870 "WCQE: %08x %08x %08x %08x\n",
2871 ctxp->oxid, ctxp->flag, released,
2872 wcqe->word0, wcqe->total_data_placed,
2873 result, wcqe->word3);
2875 cmdwqe->context2 = NULL;
2876 cmdwqe->context3 = NULL;
2878 * if transport has released ctx, then can reuse it. Otherwise,
2879 * will be recycled by transport release call.
2882 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2884 /* Since iaab/iaar are NOT set, there is no work left.
2885 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2886 * should have been called already.
2891 * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
2892 * @phba: Pointer to HBA context object.
2893 * @cmdwqe: Pointer to driver command WQE object.
2894 * @wcqe: Pointer to driver response CQE object.
2896 * The function is called from SLI ring event handler with no
2897 * lock held. This function is the completion handler for NVME ABTS for LS cmds
2898 * The function frees memory resources used for the NVME commands.
2901 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2902 struct lpfc_wcqe_complete *wcqe)
2904 struct lpfc_nvmet_rcv_ctx *ctxp;
2905 struct lpfc_nvmet_tgtport *tgtp;
2908 ctxp = cmdwqe->context2;
2909 result = wcqe->parameter;
2911 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2912 atomic_inc(&tgtp->xmt_ls_abort_cmpl);
2914 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2915 "6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n",
2916 ctxp, wcqe->word0, wcqe->total_data_placed,
2917 result, wcqe->word3);
2920 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2921 "6415 NVMET LS Abort No ctx: WCQE: "
2922 "%08x %08x %08x %08x\n",
2923 wcqe->word0, wcqe->total_data_placed,
2924 result, wcqe->word3);
2926 lpfc_sli_release_iocbq(phba, cmdwqe);
2930 if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) {
2931 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2932 "6416 NVMET LS abort cmpl state mismatch: "
2933 "oxid x%x: %d %d\n",
2934 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
2937 cmdwqe->context2 = NULL;
2938 cmdwqe->context3 = NULL;
2939 lpfc_sli_release_iocbq(phba, cmdwqe);
2944 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
2945 struct lpfc_nvmet_rcv_ctx *ctxp,
2946 uint32_t sid, uint16_t xri)
2948 struct lpfc_nvmet_tgtport *tgtp;
2949 struct lpfc_iocbq *abts_wqeq;
2950 union lpfc_wqe128 *wqe_abts;
2951 struct lpfc_nodelist *ndlp;
2953 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2954 "6067 ABTS: sid %x xri x%x/x%x\n",
2955 sid, xri, ctxp->wqeq->sli4_xritag);
2957 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2959 ndlp = lpfc_findnode_did(phba->pport, sid);
2960 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2961 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2962 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2963 atomic_inc(&tgtp->xmt_abort_rsp_error);
2964 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2965 "6134 Drop ABTS - wrong NDLP state x%x.\n",
2966 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
2968 /* No failure to an ABTS request. */
2972 abts_wqeq = ctxp->wqeq;
2973 wqe_abts = &abts_wqeq->wqe;
2976 * Since we zero the whole WQE, we need to ensure we set the WQE fields
2977 * that were initialized in lpfc_sli4_nvmet_alloc.
2979 memset(wqe_abts, 0, sizeof(union lpfc_wqe));
2982 bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
2983 bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
2984 bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
2985 bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
2986 bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
2989 bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
2990 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2991 bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
2992 abts_wqeq->sli4_xritag);
2995 bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
2996 CMD_XMIT_SEQUENCE64_WQE);
2997 bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
2998 bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
2999 bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
3002 wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
3005 bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
3006 /* Needs to be set by caller */
3007 bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
3010 bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
3011 bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
3012 bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
3013 LPFC_WQE_LENLOC_WORD12);
3014 bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
3015 bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
3018 bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
3019 LPFC_WQE_CQ_ID_DEFAULT);
3020 bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
3023 abts_wqeq->vport = phba->pport;
3024 abts_wqeq->context1 = ndlp;
3025 abts_wqeq->context2 = ctxp;
3026 abts_wqeq->context3 = NULL;
3027 abts_wqeq->rsvd2 = 0;
3028 /* hba_wqidx should already be setup from command we are aborting */
3029 abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
3030 abts_wqeq->iocb.ulpLe = 1;
3032 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3033 "6069 Issue ABTS to xri x%x reqtag x%x\n",
3034 xri, abts_wqeq->iotag);
3039 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
3040 struct lpfc_nvmet_rcv_ctx *ctxp,
3041 uint32_t sid, uint16_t xri)
3043 struct lpfc_nvmet_tgtport *tgtp;
3044 struct lpfc_iocbq *abts_wqeq;
3045 union lpfc_wqe128 *abts_wqe;
3046 struct lpfc_nodelist *ndlp;
3047 unsigned long flags;
3050 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3052 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3053 ctxp->wqeq->hba_wqidx = 0;
3056 ndlp = lpfc_findnode_did(phba->pport, sid);
3057 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
3058 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3059 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3060 atomic_inc(&tgtp->xmt_abort_rsp_error);
3061 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3062 "6160 Drop ABORT - wrong NDLP state x%x.\n",
3063 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3065 /* No failure to an ABTS request. */
3066 spin_lock_irqsave(&ctxp->ctxlock, flags);
3067 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3068 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3072 /* Issue ABTS for this WQE based on iotag */
3073 ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
3074 spin_lock_irqsave(&ctxp->ctxlock, flags);
3075 if (!ctxp->abort_wqeq) {
3076 atomic_inc(&tgtp->xmt_abort_rsp_error);
3077 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3078 "6161 ABORT failed: No wqeqs: "
3079 "xri: x%x\n", ctxp->oxid);
3080 /* No failure to an ABTS request. */
3081 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3082 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3085 abts_wqeq = ctxp->abort_wqeq;
3086 abts_wqe = &abts_wqeq->wqe;
3087 ctxp->state = LPFC_NVMET_STE_ABORT;
3088 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3090 /* Announce entry to new IO submit field. */
3091 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3092 "6162 ABORT Request to rport DID x%06x "
3093 "for xri x%x x%x\n",
3094 ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
3096 /* If the hba is getting reset, this flag is set. It is
3097 * cleared when the reset is complete and rings reestablished.
3099 spin_lock_irqsave(&phba->hbalock, flags);
3100 /* driver queued commands are in process of being flushed */
3101 if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
3102 spin_unlock_irqrestore(&phba->hbalock, flags);
3103 atomic_inc(&tgtp->xmt_abort_rsp_error);
3104 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
3105 "6163 Driver in reset cleanup - flushing "
3106 "NVME Req now. hba_flag x%x oxid x%x\n",
3107 phba->hba_flag, ctxp->oxid);
3108 lpfc_sli_release_iocbq(phba, abts_wqeq);
3109 spin_lock_irqsave(&ctxp->ctxlock, flags);
3110 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3111 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3115 /* Outstanding abort is in progress */
3116 if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
3117 spin_unlock_irqrestore(&phba->hbalock, flags);
3118 atomic_inc(&tgtp->xmt_abort_rsp_error);
3119 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
3120 "6164 Outstanding NVME I/O Abort Request "
3121 "still pending on oxid x%x\n",
3123 lpfc_sli_release_iocbq(phba, abts_wqeq);
3124 spin_lock_irqsave(&ctxp->ctxlock, flags);
3125 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3126 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3130 /* Ready - mark outstanding as aborted by driver. */
3131 abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
3133 /* WQEs are reused. Clear stale data and set key fields to
3134 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
3136 memset(abts_wqe, 0, sizeof(union lpfc_wqe));
3139 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
3142 bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
3143 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
3145 /* word 8 - tell the FW to abort the IO associated with this
3146 * outstanding exchange ID.
3148 abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
3150 /* word 9 - this is the iotag for the abts_wqe completion. */
3151 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
3155 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
3156 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
3159 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
3160 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
3161 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
3163 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
3164 abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
3165 abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
3166 abts_wqeq->iocb_cmpl = 0;
3167 abts_wqeq->iocb_flag |= LPFC_IO_NVME;
3168 abts_wqeq->context2 = ctxp;
3169 abts_wqeq->vport = phba->pport;
3171 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3173 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3174 spin_unlock_irqrestore(&phba->hbalock, flags);
3175 if (rc == WQE_SUCCESS) {
3176 atomic_inc(&tgtp->xmt_abort_sol);
3180 atomic_inc(&tgtp->xmt_abort_rsp_error);
3181 spin_lock_irqsave(&ctxp->ctxlock, flags);
3182 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3183 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3184 lpfc_sli_release_iocbq(phba, abts_wqeq);
3185 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3186 "6166 Failed ABORT issue_wqe with status x%x "
3193 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
3194 struct lpfc_nvmet_rcv_ctx *ctxp,
3195 uint32_t sid, uint16_t xri)
3197 struct lpfc_nvmet_tgtport *tgtp;
3198 struct lpfc_iocbq *abts_wqeq;
3199 unsigned long flags;
3200 bool released = false;
3203 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3205 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3206 ctxp->wqeq->hba_wqidx = 0;
3209 if (ctxp->state == LPFC_NVMET_STE_FREE) {
3210 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3211 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
3212 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
3216 ctxp->state = LPFC_NVMET_STE_ABORT;
3218 rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
3222 spin_lock_irqsave(&phba->hbalock, flags);
3223 abts_wqeq = ctxp->wqeq;
3224 abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
3225 abts_wqeq->iocb_cmpl = NULL;
3226 abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
3228 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3230 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3231 spin_unlock_irqrestore(&phba->hbalock, flags);
3232 if (rc == WQE_SUCCESS) {
3237 spin_lock_irqsave(&ctxp->ctxlock, flags);
3238 if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
3239 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3240 list_del(&ctxp->list);
3241 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3244 ctxp->flag &= ~(LPFC_NVMET_ABORT_OP | LPFC_NVMET_CTX_RLS);
3245 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3247 atomic_inc(&tgtp->xmt_abort_rsp_error);
3248 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3249 "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
3252 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3257 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
3258 struct lpfc_nvmet_rcv_ctx *ctxp,
3259 uint32_t sid, uint16_t xri)
3261 struct lpfc_nvmet_tgtport *tgtp;
3262 struct lpfc_iocbq *abts_wqeq;
3263 unsigned long flags;
3266 if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) ||
3267 (ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) {
3268 ctxp->state = LPFC_NVMET_STE_LS_ABORT;
3271 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3272 "6418 NVMET LS abort state mismatch "
3274 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3275 ctxp->state = LPFC_NVMET_STE_LS_ABORT;
3278 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3280 /* Issue ABTS for this WQE based on iotag */
3281 ctxp->wqeq = lpfc_sli_get_iocbq(phba);
3283 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3284 "6068 Abort failed: No wqeqs: "
3286 /* No failure to an ABTS request. */
3291 abts_wqeq = ctxp->wqeq;
3293 if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
3298 spin_lock_irqsave(&phba->hbalock, flags);
3299 abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
3300 abts_wqeq->iocb_cmpl = 0;
3301 abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
3302 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3303 spin_unlock_irqrestore(&phba->hbalock, flags);
3304 if (rc == WQE_SUCCESS) {
3305 atomic_inc(&tgtp->xmt_abort_unsol);
3309 atomic_inc(&tgtp->xmt_abort_rsp_error);
3310 abts_wqeq->context2 = NULL;
3311 abts_wqeq->context3 = NULL;
3312 lpfc_sli_release_iocbq(phba, abts_wqeq);
3314 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3315 "6056 Failed to Issue ABTS. Status x%x\n", rc);