95386f90a8745aac8fc95ce2f1537322041c3958
[linux-2.6-microblaze.git] / drivers / scsi / lpfc / lpfc_nvmet.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channsel Host Bus Adapters.                               *
4  * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
30
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
38
39 #include <linux/nvme.h>
40 #include <linux/nvme-fc-driver.h>
41 #include <linux/nvme-fc.h>
42
43 #include "lpfc_version.h"
44 #include "lpfc_hw4.h"
45 #include "lpfc_hw.h"
46 #include "lpfc_sli.h"
47 #include "lpfc_sli4.h"
48 #include "lpfc_nl.h"
49 #include "lpfc_disc.h"
50 #include "lpfc.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_nvme.h"
53 #include "lpfc_nvmet.h"
54 #include "lpfc_logmsg.h"
55 #include "lpfc_crtn.h"
56 #include "lpfc_vport.h"
57 #include "lpfc_debugfs.h"
58
59 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
60                                                  struct lpfc_nvmet_rcv_ctx *,
61                                                  dma_addr_t rspbuf,
62                                                  uint16_t rspsize);
63 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
64                                                   struct lpfc_nvmet_rcv_ctx *);
65 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
66                                           struct lpfc_nvmet_rcv_ctx *,
67                                           uint32_t, uint16_t);
68 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
69                                             struct lpfc_nvmet_rcv_ctx *,
70                                             uint32_t, uint16_t);
71 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
72                                            struct lpfc_nvmet_rcv_ctx *,
73                                            uint32_t, uint16_t);
74 static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
75                                     struct lpfc_nvmet_rcv_ctx *);
76 static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *);
77
78 static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf);
79
80 static union lpfc_wqe128 lpfc_tsend_cmd_template;
81 static union lpfc_wqe128 lpfc_treceive_cmd_template;
82 static union lpfc_wqe128 lpfc_trsp_cmd_template;
83
84 /* Setup WQE templates for NVME IOs */
85 void
86 lpfc_nvmet_cmd_template(void)
87 {
88         union lpfc_wqe128 *wqe;
89
90         /* TSEND template */
91         wqe = &lpfc_tsend_cmd_template;
92         memset(wqe, 0, sizeof(union lpfc_wqe128));
93
94         /* Word 0, 1, 2 - BDE is variable */
95
96         /* Word 3 - payload_offset_len is zero */
97
98         /* Word 4 - relative_offset is variable */
99
100         /* Word 5 - is zero */
101
102         /* Word 6 - ctxt_tag, xri_tag is variable */
103
104         /* Word 7 - wqe_ar is variable */
105         bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
106         bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF);
107         bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3);
108         bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI);
109         bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
110
111         /* Word 8 - abort_tag is variable */
112
113         /* Word 9  - reqtag, rcvoxid is variable */
114
115         /* Word 10 - wqes, xc is variable */
116         bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
117         bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
118         bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
119         bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
120         bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
121         bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12);
122
123         /* Word 11 - sup, irsp, irsplen is variable */
124         bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND);
125         bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
126         bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
127         bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
128         bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
129         bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0);
130
131         /* Word 12 - fcp_data_len is variable */
132
133         /* Word 13, 14, 15 - PBDE is zero */
134
135         /* TRECEIVE template */
136         wqe = &lpfc_treceive_cmd_template;
137         memset(wqe, 0, sizeof(union lpfc_wqe128));
138
139         /* Word 0, 1, 2 - BDE is variable */
140
141         /* Word 3 */
142         wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
143
144         /* Word 4 - relative_offset is variable */
145
146         /* Word 5 - is zero */
147
148         /* Word 6 - ctxt_tag, xri_tag is variable */
149
150         /* Word 7 */
151         bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE);
152         bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF);
153         bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3);
154         bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI);
155         bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
156
157         /* Word 8 - abort_tag is variable */
158
159         /* Word 9  - reqtag, rcvoxid is variable */
160
161         /* Word 10 - xc is variable */
162         bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
163         bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
164         bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
165         bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
166         bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
167         bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
168
169         /* Word 11 - pbde is variable */
170         bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE);
171         bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
172         bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0);
173         bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
174         bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
175         bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1);
176
177         /* Word 12 - fcp_data_len is variable */
178
179         /* Word 13, 14, 15 - PBDE is variable */
180
181         /* TRSP template */
182         wqe = &lpfc_trsp_cmd_template;
183         memset(wqe, 0, sizeof(union lpfc_wqe128));
184
185         /* Word 0, 1, 2 - BDE is variable */
186
187         /* Word 3 - response_len is variable */
188
189         /* Word 4, 5 - is zero */
190
191         /* Word 6 - ctxt_tag, xri_tag is variable */
192
193         /* Word 7 */
194         bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
195         bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED);
196         bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3);
197         bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI);
198         bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); /* wqe_ar */
199
200         /* Word 8 - abort_tag is variable */
201
202         /* Word 9  - reqtag is variable */
203
204         /* Word 10 wqes, xc is variable */
205         bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1);
206         bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
207         bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
208         bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0);
209         bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE);
210         bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3);
211
212         /* Word 11 irsp, irsplen is variable */
213         bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP);
214         bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
215         bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0);
216         bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
217         bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
218         bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0);
219
220         /* Word 12, 13, 14, 15 - is zero */
221 }
222
223 static void
224 lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
225 {
226         lockdep_assert_held(&ctxp->ctxlock);
227
228         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
229                         "6313 NVMET Defer ctx release xri x%x flg x%x\n",
230                         ctxp->oxid, ctxp->flag);
231
232         if (ctxp->flag & LPFC_NVMET_CTX_RLS)
233                 return;
234
235         ctxp->flag |= LPFC_NVMET_CTX_RLS;
236         spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
237         list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
238         spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
239 }
240
241 /**
242  * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
243  * @phba: Pointer to HBA context object.
244  * @cmdwqe: Pointer to driver command WQE object.
245  * @wcqe: Pointer to driver response CQE object.
246  *
247  * The function is called from SLI ring event handler with no
248  * lock held. This function is the completion handler for NVME LS commands
249  * The function frees memory resources used for the NVME commands.
250  **/
251 static void
252 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
253                           struct lpfc_wcqe_complete *wcqe)
254 {
255         struct lpfc_nvmet_tgtport *tgtp;
256         struct nvmefc_tgt_ls_req *rsp;
257         struct lpfc_nvmet_rcv_ctx *ctxp;
258         uint32_t status, result;
259
260         status = bf_get(lpfc_wcqe_c_status, wcqe);
261         result = wcqe->parameter;
262         ctxp = cmdwqe->context2;
263
264         if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) {
265                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
266                                 "6410 NVMET LS cmpl state mismatch IO x%x: "
267                                 "%d %d\n",
268                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
269         }
270
271         if (!phba->targetport)
272                 goto out;
273
274         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
275
276         if (tgtp) {
277                 if (status) {
278                         atomic_inc(&tgtp->xmt_ls_rsp_error);
279                         if (result == IOERR_ABORT_REQUESTED)
280                                 atomic_inc(&tgtp->xmt_ls_rsp_aborted);
281                         if (bf_get(lpfc_wcqe_c_xb, wcqe))
282                                 atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
283                 } else {
284                         atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
285                 }
286         }
287
288 out:
289         rsp = &ctxp->ctx.ls_req;
290
291         lpfc_nvmeio_data(phba, "NVMET LS  CMPL: xri x%x stat x%x result x%x\n",
292                          ctxp->oxid, status, result);
293
294         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
295                         "6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
296                         status, result, ctxp->oxid);
297
298         lpfc_nlp_put(cmdwqe->context1);
299         cmdwqe->context2 = NULL;
300         cmdwqe->context3 = NULL;
301         lpfc_sli_release_iocbq(phba, cmdwqe);
302         rsp->done(rsp);
303         kfree(ctxp);
304 }
305
306 /**
307  * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
308  * @phba: HBA buffer is associated with
309  * @ctxp: context to clean up
310  * @mp: Buffer to free
311  *
312  * Description: Frees the given DMA buffer in the appropriate way given by
313  * reposting it to its associated RQ so it can be reused.
314  *
315  * Notes: Takes phba->hbalock.  Can be called with or without other locks held.
316  *
317  * Returns: None
318  **/
319 void
320 lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
321 {
322 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
323         struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
324         struct lpfc_nvmet_tgtport *tgtp;
325         struct fc_frame_header *fc_hdr;
326         struct rqb_dmabuf *nvmebuf;
327         struct lpfc_nvmet_ctx_info *infop;
328         uint32_t size, oxid, sid;
329         int cpu;
330         unsigned long iflag;
331
332         if (ctxp->txrdy) {
333                 dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
334                               ctxp->txrdy_phys);
335                 ctxp->txrdy = NULL;
336                 ctxp->txrdy_phys = 0;
337         }
338
339         if (ctxp->state == LPFC_NVMET_STE_FREE) {
340                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
341                                 "6411 NVMET free, already free IO x%x: %d %d\n",
342                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
343         }
344
345         if (ctxp->rqb_buffer) {
346                 spin_lock_irqsave(&ctxp->ctxlock, iflag);
347                 nvmebuf = ctxp->rqb_buffer;
348                 /* check if freed in another path whilst acquiring lock */
349                 if (nvmebuf) {
350                         ctxp->rqb_buffer = NULL;
351                         if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
352                                 ctxp->flag &= ~LPFC_NVMET_CTX_REUSE_WQ;
353                                 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
354                                 nvmebuf->hrq->rqbp->rqb_free_buffer(phba,
355                                                                     nvmebuf);
356                         } else {
357                                 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
358                                 /* repost */
359                                 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
360                         }
361                 } else {
362                         spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
363                 }
364         }
365         ctxp->state = LPFC_NVMET_STE_FREE;
366
367         spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
368         if (phba->sli4_hba.nvmet_io_wait_cnt) {
369                 list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
370                                  nvmebuf, struct rqb_dmabuf,
371                                  hbuf.list);
372                 phba->sli4_hba.nvmet_io_wait_cnt--;
373                 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
374                                        iflag);
375
376                 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
377                 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
378                 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
379                 size = nvmebuf->bytes_recv;
380                 sid = sli4_sid_from_fc_hdr(fc_hdr);
381
382                 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
383                 ctxp->wqeq = NULL;
384                 ctxp->txrdy = NULL;
385                 ctxp->offset = 0;
386                 ctxp->phba = phba;
387                 ctxp->size = size;
388                 ctxp->oxid = oxid;
389                 ctxp->sid = sid;
390                 ctxp->state = LPFC_NVMET_STE_RCV;
391                 ctxp->entry_cnt = 1;
392                 ctxp->flag = 0;
393                 ctxp->ctxbuf = ctx_buf;
394                 ctxp->rqb_buffer = (void *)nvmebuf;
395                 spin_lock_init(&ctxp->ctxlock);
396
397 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
398                 /* NOTE: isr time stamp is stale when context is re-assigned*/
399                 if (ctxp->ts_isr_cmd) {
400                         ctxp->ts_cmd_nvme = 0;
401                         ctxp->ts_nvme_data = 0;
402                         ctxp->ts_data_wqput = 0;
403                         ctxp->ts_isr_data = 0;
404                         ctxp->ts_data_nvme = 0;
405                         ctxp->ts_nvme_status = 0;
406                         ctxp->ts_status_wqput = 0;
407                         ctxp->ts_isr_status = 0;
408                         ctxp->ts_status_nvme = 0;
409                 }
410 #endif
411                 atomic_inc(&tgtp->rcv_fcp_cmd_in);
412
413                 /*  flag new work queued, replacement buffer has already
414                  *  been reposted
415                  */
416                 spin_lock_irqsave(&ctxp->ctxlock, iflag);
417                 ctxp->flag |= LPFC_NVMET_CTX_REUSE_WQ;
418                 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
419
420                 if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
421                         atomic_inc(&tgtp->rcv_fcp_cmd_drop);
422                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
423                                         "6181 Unable to queue deferred work "
424                                         "for oxid x%x. "
425                                         "FCP Drop IO [x%x x%x x%x]\n",
426                                         ctxp->oxid,
427                                         atomic_read(&tgtp->rcv_fcp_cmd_in),
428                                         atomic_read(&tgtp->rcv_fcp_cmd_out),
429                                         atomic_read(&tgtp->xmt_fcp_release));
430
431                         spin_lock_irqsave(&ctxp->ctxlock, iflag);
432                         lpfc_nvmet_defer_release(phba, ctxp);
433                         spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
434                         lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
435                 }
436                 return;
437         }
438         spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
439
440         /*
441          * Use the CPU context list, from the MRQ the IO was received on
442          * (ctxp->idx), to save context structure.
443          */
444         cpu = raw_smp_processor_id();
445         infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
446         spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
447         list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
448         infop->nvmet_ctx_list_cnt++;
449         spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
450 #endif
451 }
452
453 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
454 static void
455 lpfc_nvmet_ktime(struct lpfc_hba *phba,
456                  struct lpfc_nvmet_rcv_ctx *ctxp)
457 {
458         uint64_t seg1, seg2, seg3, seg4, seg5;
459         uint64_t seg6, seg7, seg8, seg9, seg10;
460         uint64_t segsum;
461
462         if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
463             !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
464             !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
465             !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
466             !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
467                 return;
468
469         if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
470                 return;
471         if (ctxp->ts_isr_cmd  > ctxp->ts_cmd_nvme)
472                 return;
473         if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
474                 return;
475         if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
476                 return;
477         if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
478                 return;
479         if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
480                 return;
481         if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
482                 return;
483         if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
484                 return;
485         if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
486                 return;
487         if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
488                 return;
489         /*
490          * Segment 1 - Time from FCP command received by MSI-X ISR
491          * to FCP command is passed to NVME Layer.
492          * Segment 2 - Time from FCP command payload handed
493          * off to NVME Layer to Driver receives a Command op
494          * from NVME Layer.
495          * Segment 3 - Time from Driver receives a Command op
496          * from NVME Layer to Command is put on WQ.
497          * Segment 4 - Time from Driver WQ put is done
498          * to MSI-X ISR for Command cmpl.
499          * Segment 5 - Time from MSI-X ISR for Command cmpl to
500          * Command cmpl is passed to NVME Layer.
501          * Segment 6 - Time from Command cmpl is passed to NVME
502          * Layer to Driver receives a RSP op from NVME Layer.
503          * Segment 7 - Time from Driver receives a RSP op from
504          * NVME Layer to WQ put is done on TRSP FCP Status.
505          * Segment 8 - Time from Driver WQ put is done on TRSP
506          * FCP Status to MSI-X ISR for TRSP cmpl.
507          * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
508          * TRSP cmpl is passed to NVME Layer.
509          * Segment 10 - Time from FCP command received by
510          * MSI-X ISR to command is completed on wire.
511          * (Segments 1 thru 8) for READDATA / WRITEDATA
512          * (Segments 1 thru 4) for READDATA_RSP
513          */
514         seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
515         segsum = seg1;
516
517         seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
518         if (segsum > seg2)
519                 return;
520         seg2 -= segsum;
521         segsum += seg2;
522
523         seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
524         if (segsum > seg3)
525                 return;
526         seg3 -= segsum;
527         segsum += seg3;
528
529         seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
530         if (segsum > seg4)
531                 return;
532         seg4 -= segsum;
533         segsum += seg4;
534
535         seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
536         if (segsum > seg5)
537                 return;
538         seg5 -= segsum;
539         segsum += seg5;
540
541
542         /* For auto rsp commands seg6 thru seg10 will be 0 */
543         if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
544                 seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
545                 if (segsum > seg6)
546                         return;
547                 seg6 -= segsum;
548                 segsum += seg6;
549
550                 seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
551                 if (segsum > seg7)
552                         return;
553                 seg7 -= segsum;
554                 segsum += seg7;
555
556                 seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
557                 if (segsum > seg8)
558                         return;
559                 seg8 -= segsum;
560                 segsum += seg8;
561
562                 seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
563                 if (segsum > seg9)
564                         return;
565                 seg9 -= segsum;
566                 segsum += seg9;
567
568                 if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
569                         return;
570                 seg10 = (ctxp->ts_isr_status -
571                         ctxp->ts_isr_cmd);
572         } else {
573                 if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
574                         return;
575                 seg6 =  0;
576                 seg7 =  0;
577                 seg8 =  0;
578                 seg9 =  0;
579                 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
580         }
581
582         phba->ktime_seg1_total += seg1;
583         if (seg1 < phba->ktime_seg1_min)
584                 phba->ktime_seg1_min = seg1;
585         else if (seg1 > phba->ktime_seg1_max)
586                 phba->ktime_seg1_max = seg1;
587
588         phba->ktime_seg2_total += seg2;
589         if (seg2 < phba->ktime_seg2_min)
590                 phba->ktime_seg2_min = seg2;
591         else if (seg2 > phba->ktime_seg2_max)
592                 phba->ktime_seg2_max = seg2;
593
594         phba->ktime_seg3_total += seg3;
595         if (seg3 < phba->ktime_seg3_min)
596                 phba->ktime_seg3_min = seg3;
597         else if (seg3 > phba->ktime_seg3_max)
598                 phba->ktime_seg3_max = seg3;
599
600         phba->ktime_seg4_total += seg4;
601         if (seg4 < phba->ktime_seg4_min)
602                 phba->ktime_seg4_min = seg4;
603         else if (seg4 > phba->ktime_seg4_max)
604                 phba->ktime_seg4_max = seg4;
605
606         phba->ktime_seg5_total += seg5;
607         if (seg5 < phba->ktime_seg5_min)
608                 phba->ktime_seg5_min = seg5;
609         else if (seg5 > phba->ktime_seg5_max)
610                 phba->ktime_seg5_max = seg5;
611
612         phba->ktime_data_samples++;
613         if (!seg6)
614                 goto out;
615
616         phba->ktime_seg6_total += seg6;
617         if (seg6 < phba->ktime_seg6_min)
618                 phba->ktime_seg6_min = seg6;
619         else if (seg6 > phba->ktime_seg6_max)
620                 phba->ktime_seg6_max = seg6;
621
622         phba->ktime_seg7_total += seg7;
623         if (seg7 < phba->ktime_seg7_min)
624                 phba->ktime_seg7_min = seg7;
625         else if (seg7 > phba->ktime_seg7_max)
626                 phba->ktime_seg7_max = seg7;
627
628         phba->ktime_seg8_total += seg8;
629         if (seg8 < phba->ktime_seg8_min)
630                 phba->ktime_seg8_min = seg8;
631         else if (seg8 > phba->ktime_seg8_max)
632                 phba->ktime_seg8_max = seg8;
633
634         phba->ktime_seg9_total += seg9;
635         if (seg9 < phba->ktime_seg9_min)
636                 phba->ktime_seg9_min = seg9;
637         else if (seg9 > phba->ktime_seg9_max)
638                 phba->ktime_seg9_max = seg9;
639 out:
640         phba->ktime_seg10_total += seg10;
641         if (seg10 < phba->ktime_seg10_min)
642                 phba->ktime_seg10_min = seg10;
643         else if (seg10 > phba->ktime_seg10_max)
644                 phba->ktime_seg10_max = seg10;
645         phba->ktime_status_samples++;
646 }
647 #endif
648
649 /**
650  * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
651  * @phba: Pointer to HBA context object.
652  * @cmdwqe: Pointer to driver command WQE object.
653  * @wcqe: Pointer to driver response CQE object.
654  *
655  * The function is called from SLI ring event handler with no
656  * lock held. This function is the completion handler for NVME FCP commands
657  * The function frees memory resources used for the NVME commands.
658  **/
659 static void
660 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
661                           struct lpfc_wcqe_complete *wcqe)
662 {
663         struct lpfc_nvmet_tgtport *tgtp;
664         struct nvmefc_tgt_fcp_req *rsp;
665         struct lpfc_nvmet_rcv_ctx *ctxp;
666         uint32_t status, result, op, start_clean, logerr;
667 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
668         uint32_t id;
669 #endif
670
671         ctxp = cmdwqe->context2;
672         ctxp->flag &= ~LPFC_NVMET_IO_INP;
673
674         rsp = &ctxp->ctx.fcp_req;
675         op = rsp->op;
676
677         status = bf_get(lpfc_wcqe_c_status, wcqe);
678         result = wcqe->parameter;
679
680         if (phba->targetport)
681                 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
682         else
683                 tgtp = NULL;
684
685         lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
686                          ctxp->oxid, op, status);
687
688         if (status) {
689                 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
690                 rsp->transferred_length = 0;
691                 if (tgtp) {
692                         atomic_inc(&tgtp->xmt_fcp_rsp_error);
693                         if (result == IOERR_ABORT_REQUESTED)
694                                 atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
695                 }
696
697                 logerr = LOG_NVME_IOERR;
698
699                 /* pick up SLI4 exhange busy condition */
700                 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
701                         ctxp->flag |= LPFC_NVMET_XBUSY;
702                         logerr |= LOG_NVME_ABTS;
703                         if (tgtp)
704                                 atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
705
706                 } else {
707                         ctxp->flag &= ~LPFC_NVMET_XBUSY;
708                 }
709
710                 lpfc_printf_log(phba, KERN_INFO, logerr,
711                                 "6315 IO Error Cmpl xri x%x: %x/%x XBUSY:x%x\n",
712                                 ctxp->oxid, status, result, ctxp->flag);
713
714         } else {
715                 rsp->fcp_error = NVME_SC_SUCCESS;
716                 if (op == NVMET_FCOP_RSP)
717                         rsp->transferred_length = rsp->rsplen;
718                 else
719                         rsp->transferred_length = rsp->transfer_length;
720                 if (tgtp)
721                         atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
722         }
723
724         if ((op == NVMET_FCOP_READDATA_RSP) ||
725             (op == NVMET_FCOP_RSP)) {
726                 /* Sanity check */
727                 ctxp->state = LPFC_NVMET_STE_DONE;
728                 ctxp->entry_cnt++;
729
730 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
731                 if (ctxp->ts_cmd_nvme) {
732                         if (rsp->op == NVMET_FCOP_READDATA_RSP) {
733                                 ctxp->ts_isr_data =
734                                         cmdwqe->isr_timestamp;
735                                 ctxp->ts_data_nvme =
736                                         ktime_get_ns();
737                                 ctxp->ts_nvme_status =
738                                         ctxp->ts_data_nvme;
739                                 ctxp->ts_status_wqput =
740                                         ctxp->ts_data_nvme;
741                                 ctxp->ts_isr_status =
742                                         ctxp->ts_data_nvme;
743                                 ctxp->ts_status_nvme =
744                                         ctxp->ts_data_nvme;
745                         } else {
746                                 ctxp->ts_isr_status =
747                                         cmdwqe->isr_timestamp;
748                                 ctxp->ts_status_nvme =
749                                         ktime_get_ns();
750                         }
751                 }
752 #endif
753                 rsp->done(rsp);
754 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
755                 if (ctxp->ts_cmd_nvme)
756                         lpfc_nvmet_ktime(phba, ctxp);
757 #endif
758                 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
759         } else {
760                 ctxp->entry_cnt++;
761                 start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
762                 memset(((char *)cmdwqe) + start_clean, 0,
763                        (sizeof(struct lpfc_iocbq) - start_clean));
764 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
765                 if (ctxp->ts_cmd_nvme) {
766                         ctxp->ts_isr_data = cmdwqe->isr_timestamp;
767                         ctxp->ts_data_nvme = ktime_get_ns();
768                 }
769 #endif
770                 rsp->done(rsp);
771         }
772 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
773         if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
774                 id = raw_smp_processor_id();
775                 if (id < LPFC_CHECK_CPU_CNT) {
776                         if (ctxp->cpu != id)
777                                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
778                                                 "6704 CPU Check cmdcmpl: "
779                                                 "cpu %d expect %d\n",
780                                                 id, ctxp->cpu);
781                         phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_cmpl_io[id]++;
782                 }
783         }
784 #endif
785 }
786
787 static int
788 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
789                       struct nvmefc_tgt_ls_req *rsp)
790 {
791         struct lpfc_nvmet_rcv_ctx *ctxp =
792                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
793         struct lpfc_hba *phba = ctxp->phba;
794         struct hbq_dmabuf *nvmebuf =
795                 (struct hbq_dmabuf *)ctxp->rqb_buffer;
796         struct lpfc_iocbq *nvmewqeq;
797         struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
798         struct lpfc_dmabuf dmabuf;
799         struct ulp_bde64 bpl;
800         int rc;
801
802         if (phba->pport->load_flag & FC_UNLOADING)
803                 return -ENODEV;
804
805         if (phba->pport->load_flag & FC_UNLOADING)
806                 return -ENODEV;
807
808         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
809                         "6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
810
811         if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) ||
812             (ctxp->entry_cnt != 1)) {
813                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
814                                 "6412 NVMET LS rsp state mismatch "
815                                 "oxid x%x: %d %d\n",
816                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
817         }
818         ctxp->state = LPFC_NVMET_STE_LS_RSP;
819         ctxp->entry_cnt++;
820
821         nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
822                                       rsp->rsplen);
823         if (nvmewqeq == NULL) {
824                 atomic_inc(&nvmep->xmt_ls_drop);
825                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
826                                 "6150 LS Drop IO x%x: Prep\n",
827                                 ctxp->oxid);
828                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
829                 atomic_inc(&nvmep->xmt_ls_abort);
830                 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
831                                                 ctxp->sid, ctxp->oxid);
832                 return -ENOMEM;
833         }
834
835         /* Save numBdes for bpl2sgl */
836         nvmewqeq->rsvd2 = 1;
837         nvmewqeq->hba_wqidx = 0;
838         nvmewqeq->context3 = &dmabuf;
839         dmabuf.virt = &bpl;
840         bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
841         bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
842         bpl.tus.f.bdeSize = rsp->rsplen;
843         bpl.tus.f.bdeFlags = 0;
844         bpl.tus.w = le32_to_cpu(bpl.tus.w);
845
846         nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
847         nvmewqeq->iocb_cmpl = NULL;
848         nvmewqeq->context2 = ctxp;
849
850         lpfc_nvmeio_data(phba, "NVMET LS  RESP: xri x%x wqidx x%x len x%x\n",
851                          ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
852
853         rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
854         if (rc == WQE_SUCCESS) {
855                 /*
856                  * Okay to repost buffer here, but wait till cmpl
857                  * before freeing ctxp and iocbq.
858                  */
859                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
860                 ctxp->rqb_buffer = 0;
861                 atomic_inc(&nvmep->xmt_ls_rsp);
862                 return 0;
863         }
864         /* Give back resources */
865         atomic_inc(&nvmep->xmt_ls_drop);
866         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
867                         "6151 LS Drop IO x%x: Issue %d\n",
868                         ctxp->oxid, rc);
869
870         lpfc_nlp_put(nvmewqeq->context1);
871
872         lpfc_in_buf_free(phba, &nvmebuf->dbuf);
873         atomic_inc(&nvmep->xmt_ls_abort);
874         lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
875         return -ENXIO;
876 }
877
878 static int
879 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
880                       struct nvmefc_tgt_fcp_req *rsp)
881 {
882         struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
883         struct lpfc_nvmet_rcv_ctx *ctxp =
884                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
885         struct lpfc_hba *phba = ctxp->phba;
886         struct lpfc_queue *wq;
887         struct lpfc_iocbq *nvmewqeq;
888         struct lpfc_sli_ring *pring;
889         unsigned long iflags;
890         int rc;
891
892         if (phba->pport->load_flag & FC_UNLOADING) {
893                 rc = -ENODEV;
894                 goto aerr;
895         }
896
897         if (phba->pport->load_flag & FC_UNLOADING) {
898                 rc = -ENODEV;
899                 goto aerr;
900         }
901
902 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
903         if (ctxp->ts_cmd_nvme) {
904                 if (rsp->op == NVMET_FCOP_RSP)
905                         ctxp->ts_nvme_status = ktime_get_ns();
906                 else
907                         ctxp->ts_nvme_data = ktime_get_ns();
908         }
909
910         /* Setup the hdw queue if not already set */
911         if (!ctxp->hdwq)
912                 ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
913
914         if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
915                 int id = raw_smp_processor_id();
916                 if (id < LPFC_CHECK_CPU_CNT) {
917                         if (rsp->hwqid != id)
918                                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
919                                                 "6705 CPU Check OP: "
920                                                 "cpu %d expect %d\n",
921                                                 id, rsp->hwqid);
922                         phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_xmt_io[id]++;
923                 }
924                 ctxp->cpu = id; /* Setup cpu for cmpl check */
925         }
926 #endif
927
928         /* Sanity check */
929         if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) ||
930             (ctxp->state == LPFC_NVMET_STE_ABORT)) {
931                 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
932                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
933                                 "6102 IO xri x%x aborted\n",
934                                 ctxp->oxid);
935                 rc = -ENXIO;
936                 goto aerr;
937         }
938
939         nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
940         if (nvmewqeq == NULL) {
941                 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
942                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
943                                 "6152 FCP Drop IO x%x: Prep\n",
944                                 ctxp->oxid);
945                 rc = -ENXIO;
946                 goto aerr;
947         }
948
949         nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
950         nvmewqeq->iocb_cmpl = NULL;
951         nvmewqeq->context2 = ctxp;
952         nvmewqeq->iocb_flag |=  LPFC_IO_NVMET;
953         ctxp->wqeq->hba_wqidx = rsp->hwqid;
954
955         lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
956                          ctxp->oxid, rsp->op, rsp->rsplen);
957
958         ctxp->flag |= LPFC_NVMET_IO_INP;
959         rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
960         if (rc == WQE_SUCCESS) {
961 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
962                 if (!ctxp->ts_cmd_nvme)
963                         return 0;
964                 if (rsp->op == NVMET_FCOP_RSP)
965                         ctxp->ts_status_wqput = ktime_get_ns();
966                 else
967                         ctxp->ts_data_wqput = ktime_get_ns();
968 #endif
969                 return 0;
970         }
971
972         if (rc == -EBUSY) {
973                 /*
974                  * WQ was full, so queue nvmewqeq to be sent after
975                  * WQE release CQE
976                  */
977                 ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
978                 wq = ctxp->hdwq->nvme_wq;
979                 pring = wq->pring;
980                 spin_lock_irqsave(&pring->ring_lock, iflags);
981                 list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
982                 wq->q_flag |= HBA_NVMET_WQFULL;
983                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
984                 atomic_inc(&lpfc_nvmep->defer_wqfull);
985                 return 0;
986         }
987
988         /* Give back resources */
989         atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
990         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
991                         "6153 FCP Drop IO x%x: Issue: %d\n",
992                         ctxp->oxid, rc);
993
994         ctxp->wqeq->hba_wqidx = 0;
995         nvmewqeq->context2 = NULL;
996         nvmewqeq->context3 = NULL;
997         rc = -EBUSY;
998 aerr:
999         return rc;
1000 }
1001
1002 static void
1003 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
1004 {
1005         struct lpfc_nvmet_tgtport *tport = targetport->private;
1006
1007         /* release any threads waiting for the unreg to complete */
1008         if (tport->phba->targetport)
1009                 complete(tport->tport_unreg_cmp);
1010 }
1011
1012 static void
1013 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
1014                          struct nvmefc_tgt_fcp_req *req)
1015 {
1016         struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1017         struct lpfc_nvmet_rcv_ctx *ctxp =
1018                 container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1019         struct lpfc_hba *phba = ctxp->phba;
1020         struct lpfc_queue *wq;
1021         unsigned long flags;
1022
1023         if (phba->pport->load_flag & FC_UNLOADING)
1024                 return;
1025
1026         if (phba->pport->load_flag & FC_UNLOADING)
1027                 return;
1028
1029         if (!ctxp->hdwq)
1030                 ctxp->hdwq = &phba->sli4_hba.hdwq[0];
1031
1032         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1033                         "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
1034                         ctxp->oxid, ctxp->flag, ctxp->state);
1035
1036         lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
1037                          ctxp->oxid, ctxp->flag, ctxp->state);
1038
1039         atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
1040
1041         spin_lock_irqsave(&ctxp->ctxlock, flags);
1042
1043         /* Since iaab/iaar are NOT set, we need to check
1044          * if the firmware is in process of aborting IO
1045          */
1046         if (ctxp->flag & LPFC_NVMET_XBUSY) {
1047                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1048                 return;
1049         }
1050         ctxp->flag |= LPFC_NVMET_ABORT_OP;
1051
1052         if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) {
1053                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1054                 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1055                                                  ctxp->oxid);
1056                 wq = ctxp->hdwq->nvme_wq;
1057                 lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
1058                 return;
1059         }
1060         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1061
1062         /* An state of LPFC_NVMET_STE_RCV means we have just received
1063          * the NVME command and have not started processing it.
1064          * (by issuing any IO WQEs on this exchange yet)
1065          */
1066         if (ctxp->state == LPFC_NVMET_STE_RCV)
1067                 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1068                                                  ctxp->oxid);
1069         else
1070                 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1071                                                ctxp->oxid);
1072 }
1073
1074 static void
1075 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
1076                            struct nvmefc_tgt_fcp_req *rsp)
1077 {
1078         struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1079         struct lpfc_nvmet_rcv_ctx *ctxp =
1080                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1081         struct lpfc_hba *phba = ctxp->phba;
1082         unsigned long flags;
1083         bool aborting = false;
1084
1085         spin_lock_irqsave(&ctxp->ctxlock, flags);
1086         if (ctxp->flag & LPFC_NVMET_XBUSY)
1087                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1088                                 "6027 NVMET release with XBUSY flag x%x"
1089                                 " oxid x%x\n",
1090                                 ctxp->flag, ctxp->oxid);
1091         else if (ctxp->state != LPFC_NVMET_STE_DONE &&
1092                  ctxp->state != LPFC_NVMET_STE_ABORT)
1093                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1094                                 "6413 NVMET release bad state %d %d oxid x%x\n",
1095                                 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1096
1097         if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
1098             (ctxp->flag & LPFC_NVMET_XBUSY)) {
1099                 aborting = true;
1100                 /* let the abort path do the real release */
1101                 lpfc_nvmet_defer_release(phba, ctxp);
1102         }
1103         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1104
1105         lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
1106                          ctxp->state, aborting);
1107
1108         atomic_inc(&lpfc_nvmep->xmt_fcp_release);
1109
1110         if (aborting)
1111                 return;
1112
1113         lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1114 }
1115
1116 static void
1117 lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
1118                      struct nvmefc_tgt_fcp_req *rsp)
1119 {
1120         struct lpfc_nvmet_tgtport *tgtp;
1121         struct lpfc_nvmet_rcv_ctx *ctxp =
1122                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1123         struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
1124         struct lpfc_hba *phba = ctxp->phba;
1125         unsigned long iflag;
1126
1127
1128         lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
1129                          ctxp->oxid, ctxp->size, raw_smp_processor_id());
1130
1131         if (!nvmebuf) {
1132                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1133                                 "6425 Defer rcv: no buffer xri x%x: "
1134                                 "flg %x ste %x\n",
1135                                 ctxp->oxid, ctxp->flag, ctxp->state);
1136                 return;
1137         }
1138
1139         tgtp = phba->targetport->private;
1140         if (tgtp)
1141                 atomic_inc(&tgtp->rcv_fcp_cmd_defer);
1142
1143         /* Free the nvmebuf since a new buffer already replaced it */
1144         nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1145         spin_lock_irqsave(&ctxp->ctxlock, iflag);
1146         ctxp->rqb_buffer = NULL;
1147         spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1148 }
1149
1150 static struct nvmet_fc_target_template lpfc_tgttemplate = {
1151         .targetport_delete = lpfc_nvmet_targetport_delete,
1152         .xmt_ls_rsp     = lpfc_nvmet_xmt_ls_rsp,
1153         .fcp_op         = lpfc_nvmet_xmt_fcp_op,
1154         .fcp_abort      = lpfc_nvmet_xmt_fcp_abort,
1155         .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
1156         .defer_rcv      = lpfc_nvmet_defer_rcv,
1157
1158         .max_hw_queues  = 1,
1159         .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1160         .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1161         .dma_boundary = 0xFFFFFFFF,
1162
1163         /* optional features */
1164         .target_features = 0,
1165         /* sizes of additional private data for data structures */
1166         .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
1167 };
1168
1169 static void
1170 __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
1171                 struct lpfc_nvmet_ctx_info *infop)
1172 {
1173         struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
1174         unsigned long flags;
1175
1176         spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
1177         list_for_each_entry_safe(ctx_buf, next_ctx_buf,
1178                                 &infop->nvmet_ctx_list, list) {
1179                 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1180                 list_del_init(&ctx_buf->list);
1181                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1182
1183                 __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
1184                 ctx_buf->sglq->state = SGL_FREED;
1185                 ctx_buf->sglq->ndlp = NULL;
1186
1187                 spin_lock(&phba->sli4_hba.sgl_list_lock);
1188                 list_add_tail(&ctx_buf->sglq->list,
1189                                 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1190                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1191
1192                 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1193                 kfree(ctx_buf->context);
1194         }
1195         spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
1196 }
1197
1198 static void
1199 lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
1200 {
1201         struct lpfc_nvmet_ctx_info *infop;
1202         int i, j;
1203
1204         /* The first context list, MRQ 0 CPU 0 */
1205         infop = phba->sli4_hba.nvmet_ctx_info;
1206         if (!infop)
1207                 return;
1208
1209         /* Cycle the the entire CPU context list for every MRQ */
1210         for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
1211                 for_each_present_cpu(j) {
1212                         infop = lpfc_get_ctx_list(phba, j, i);
1213                         __lpfc_nvmet_clean_io_for_cpu(phba, infop);
1214                 }
1215         }
1216         kfree(phba->sli4_hba.nvmet_ctx_info);
1217         phba->sli4_hba.nvmet_ctx_info = NULL;
1218 }
1219
1220 static int
1221 lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1222 {
1223         struct lpfc_nvmet_ctxbuf *ctx_buf;
1224         struct lpfc_iocbq *nvmewqe;
1225         union lpfc_wqe128 *wqe;
1226         struct lpfc_nvmet_ctx_info *last_infop;
1227         struct lpfc_nvmet_ctx_info *infop;
1228         int i, j, idx, cpu;
1229
1230         lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1231                         "6403 Allocate NVMET resources for %d XRIs\n",
1232                         phba->sli4_hba.nvmet_xri_cnt);
1233
1234         phba->sli4_hba.nvmet_ctx_info = kcalloc(
1235                 phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq,
1236                 sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
1237         if (!phba->sli4_hba.nvmet_ctx_info) {
1238                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1239                                 "6419 Failed allocate memory for "
1240                                 "nvmet context lists\n");
1241                 return -ENOMEM;
1242         }
1243
1244         /*
1245          * Assuming X CPUs in the system, and Y MRQs, allocate some
1246          * lpfc_nvmet_ctx_info structures as follows:
1247          *
1248          * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
1249          * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
1250          * ...
1251          * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
1252          *
1253          * Each line represents a MRQ "silo" containing an entry for
1254          * every CPU.
1255          *
1256          * MRQ X is initially assumed to be associated with CPU X, thus
1257          * contexts are initially distributed across all MRQs using
1258          * the MRQ index (N) as follows cpuN/mrqN. When contexts are
1259          * freed, the are freed to the MRQ silo based on the CPU number
1260          * of the IO completion. Thus a context that was allocated for MRQ A
1261          * whose IO completed on CPU B will be freed to cpuB/mrqA.
1262          */
1263         for_each_possible_cpu(i) {
1264                 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1265                         infop = lpfc_get_ctx_list(phba, i, j);
1266                         INIT_LIST_HEAD(&infop->nvmet_ctx_list);
1267                         spin_lock_init(&infop->nvmet_ctx_list_lock);
1268                         infop->nvmet_ctx_list_cnt = 0;
1269                 }
1270         }
1271
1272         /*
1273          * Setup the next CPU context info ptr for each MRQ.
1274          * MRQ 0 will cycle thru CPUs 0 - X separately from
1275          * MRQ 1 cycling thru CPUs 0 - X, and so on.
1276          */
1277         for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1278                 last_infop = lpfc_get_ctx_list(phba,
1279                                                cpumask_first(cpu_present_mask),
1280                                                j);
1281                 for (i = phba->sli4_hba.num_possible_cpu - 1;  i >= 0; i--) {
1282                         infop = lpfc_get_ctx_list(phba, i, j);
1283                         infop->nvmet_ctx_next_cpu = last_infop;
1284                         last_infop = infop;
1285                 }
1286         }
1287
1288         /* For all nvmet xris, allocate resources needed to process a
1289          * received command on a per xri basis.
1290          */
1291         idx = 0;
1292         cpu = cpumask_first(cpu_present_mask);
1293         for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
1294                 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
1295                 if (!ctx_buf) {
1296                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1297                                         "6404 Ran out of memory for NVMET\n");
1298                         return -ENOMEM;
1299                 }
1300
1301                 ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
1302                                            GFP_KERNEL);
1303                 if (!ctx_buf->context) {
1304                         kfree(ctx_buf);
1305                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1306                                         "6405 Ran out of NVMET "
1307                                         "context memory\n");
1308                         return -ENOMEM;
1309                 }
1310                 ctx_buf->context->ctxbuf = ctx_buf;
1311                 ctx_buf->context->state = LPFC_NVMET_STE_FREE;
1312
1313                 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
1314                 if (!ctx_buf->iocbq) {
1315                         kfree(ctx_buf->context);
1316                         kfree(ctx_buf);
1317                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1318                                         "6406 Ran out of NVMET iocb/WQEs\n");
1319                         return -ENOMEM;
1320                 }
1321                 ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
1322                 nvmewqe = ctx_buf->iocbq;
1323                 wqe = &nvmewqe->wqe;
1324
1325                 /* Initialize WQE */
1326                 memset(wqe, 0, sizeof(union lpfc_wqe));
1327
1328                 ctx_buf->iocbq->context1 = NULL;
1329                 spin_lock(&phba->sli4_hba.sgl_list_lock);
1330                 ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
1331                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1332                 if (!ctx_buf->sglq) {
1333                         lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1334                         kfree(ctx_buf->context);
1335                         kfree(ctx_buf);
1336                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1337                                         "6407 Ran out of NVMET XRIs\n");
1338                         return -ENOMEM;
1339                 }
1340                 INIT_WORK(&ctx_buf->defer_work, lpfc_nvmet_fcp_rqst_defer_work);
1341
1342                 /*
1343                  * Add ctx to MRQidx context list. Our initial assumption
1344                  * is MRQidx will be associated with CPUidx. This association
1345                  * can change on the fly.
1346                  */
1347                 infop = lpfc_get_ctx_list(phba, cpu, idx);
1348                 spin_lock(&infop->nvmet_ctx_list_lock);
1349                 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
1350                 infop->nvmet_ctx_list_cnt++;
1351                 spin_unlock(&infop->nvmet_ctx_list_lock);
1352
1353                 /* Spread ctx structures evenly across all MRQs */
1354                 idx++;
1355                 if (idx >= phba->cfg_nvmet_mrq) {
1356                         idx = 0;
1357                         cpu = cpumask_first(cpu_present_mask);
1358                         continue;
1359                 }
1360                 cpu = cpumask_next(cpu, cpu_present_mask);
1361                 if (cpu == nr_cpu_ids)
1362                         cpu = cpumask_first(cpu_present_mask);
1363
1364         }
1365
1366         for_each_present_cpu(i) {
1367                 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1368                         infop = lpfc_get_ctx_list(phba, i, j);
1369                         lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1370                                         "6408 TOTAL NVMET ctx for CPU %d "
1371                                         "MRQ %d: cnt %d nextcpu %p\n",
1372                                         i, j, infop->nvmet_ctx_list_cnt,
1373                                         infop->nvmet_ctx_next_cpu);
1374                 }
1375         }
1376         return 0;
1377 }
1378
1379 int
1380 lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1381 {
1382         struct lpfc_vport  *vport = phba->pport;
1383         struct lpfc_nvmet_tgtport *tgtp;
1384         struct nvmet_fc_port_info pinfo;
1385         int error;
1386
1387         if (phba->targetport)
1388                 return 0;
1389
1390         error = lpfc_nvmet_setup_io_context(phba);
1391         if (error)
1392                 return error;
1393
1394         memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
1395         pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
1396         pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
1397         pinfo.port_id = vport->fc_myDID;
1398
1399         /* We need to tell the transport layer + 1 because it takes page
1400          * alignment into account. When space for the SGL is allocated we
1401          * allocate + 3, one for cmd, one for rsp and one for this alignment
1402          */
1403         lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
1404         lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue;
1405         lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
1406
1407 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1408         error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
1409                                              &phba->pcidev->dev,
1410                                              &phba->targetport);
1411 #else
1412         error = -ENOENT;
1413 #endif
1414         if (error) {
1415                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1416                                 "6025 Cannot register NVME targetport x%x: "
1417                                 "portnm %llx nodenm %llx segs %d qs %d\n",
1418                                 error,
1419                                 pinfo.port_name, pinfo.node_name,
1420                                 lpfc_tgttemplate.max_sgl_segments,
1421                                 lpfc_tgttemplate.max_hw_queues);
1422                 phba->targetport = NULL;
1423                 phba->nvmet_support = 0;
1424
1425                 lpfc_nvmet_cleanup_io_context(phba);
1426
1427         } else {
1428                 tgtp = (struct lpfc_nvmet_tgtport *)
1429                         phba->targetport->private;
1430                 tgtp->phba = phba;
1431
1432                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1433                                 "6026 Registered NVME "
1434                                 "targetport: %p, private %p "
1435                                 "portnm %llx nodenm %llx segs %d qs %d\n",
1436                                 phba->targetport, tgtp,
1437                                 pinfo.port_name, pinfo.node_name,
1438                                 lpfc_tgttemplate.max_sgl_segments,
1439                                 lpfc_tgttemplate.max_hw_queues);
1440
1441                 atomic_set(&tgtp->rcv_ls_req_in, 0);
1442                 atomic_set(&tgtp->rcv_ls_req_out, 0);
1443                 atomic_set(&tgtp->rcv_ls_req_drop, 0);
1444                 atomic_set(&tgtp->xmt_ls_abort, 0);
1445                 atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
1446                 atomic_set(&tgtp->xmt_ls_rsp, 0);
1447                 atomic_set(&tgtp->xmt_ls_drop, 0);
1448                 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
1449                 atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
1450                 atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
1451                 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
1452                 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1453                 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1454                 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
1455                 atomic_set(&tgtp->xmt_fcp_drop, 0);
1456                 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1457                 atomic_set(&tgtp->xmt_fcp_read, 0);
1458                 atomic_set(&tgtp->xmt_fcp_write, 0);
1459                 atomic_set(&tgtp->xmt_fcp_rsp, 0);
1460                 atomic_set(&tgtp->xmt_fcp_release, 0);
1461                 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1462                 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1463                 atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
1464                 atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
1465                 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1466                 atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
1467                 atomic_set(&tgtp->xmt_fcp_abort, 0);
1468                 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1469                 atomic_set(&tgtp->xmt_abort_unsol, 0);
1470                 atomic_set(&tgtp->xmt_abort_sol, 0);
1471                 atomic_set(&tgtp->xmt_abort_rsp, 0);
1472                 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
1473                 atomic_set(&tgtp->defer_ctx, 0);
1474                 atomic_set(&tgtp->defer_fod, 0);
1475                 atomic_set(&tgtp->defer_wqfull, 0);
1476         }
1477         return error;
1478 }
1479
1480 int
1481 lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
1482 {
1483         struct lpfc_vport  *vport = phba->pport;
1484
1485         if (!phba->targetport)
1486                 return 0;
1487
1488         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1489                          "6007 Update NVMET port %p did x%x\n",
1490                          phba->targetport, vport->fc_myDID);
1491
1492         phba->targetport->port_id = vport->fc_myDID;
1493         return 0;
1494 }
1495
1496 /**
1497  * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
1498  * @phba: pointer to lpfc hba data structure.
1499  * @axri: pointer to the nvmet xri abort wcqe structure.
1500  *
1501  * This routine is invoked by the worker thread to process a SLI4 fast-path
1502  * NVMET aborted xri.
1503  **/
1504 void
1505 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1506                             struct sli4_wcqe_xri_aborted *axri)
1507 {
1508 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1509         uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
1510         uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1511         struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1512         struct lpfc_nvmet_tgtport *tgtp;
1513         struct lpfc_nodelist *ndlp;
1514         unsigned long iflag = 0;
1515         int rrq_empty = 0;
1516         bool released = false;
1517
1518         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1519                         "6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
1520
1521         if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1522                 return;
1523
1524         if (phba->targetport) {
1525                 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1526                 atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
1527         }
1528
1529         spin_lock_irqsave(&phba->hbalock, iflag);
1530         spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1531         list_for_each_entry_safe(ctxp, next_ctxp,
1532                                  &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1533                                  list) {
1534                 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1535                         continue;
1536
1537                 spin_lock(&ctxp->ctxlock);
1538                 /* Check if we already received a free context call
1539                  * and we have completed processing an abort situation.
1540                  */
1541                 if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
1542                     !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
1543                         list_del(&ctxp->list);
1544                         released = true;
1545                 }
1546                 ctxp->flag &= ~LPFC_NVMET_XBUSY;
1547                 spin_unlock(&ctxp->ctxlock);
1548                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1549
1550                 rrq_empty = list_empty(&phba->active_rrq_list);
1551                 spin_unlock_irqrestore(&phba->hbalock, iflag);
1552                 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1553                 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1554                     (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
1555                      ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
1556                         lpfc_set_rrq_active(phba, ndlp,
1557                                 ctxp->ctxbuf->sglq->sli4_lxritag,
1558                                 rxid, 1);
1559                         lpfc_sli4_abts_err_handler(phba, ndlp, axri);
1560                 }
1561
1562                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1563                                 "6318 XB aborted oxid %x flg x%x (%x)\n",
1564                                 ctxp->oxid, ctxp->flag, released);
1565                 if (released)
1566                         lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1567
1568                 if (rrq_empty)
1569                         lpfc_worker_wake_up(phba);
1570                 return;
1571         }
1572         spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1573         spin_unlock_irqrestore(&phba->hbalock, iflag);
1574 #endif
1575 }
1576
1577 int
1578 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
1579                            struct fc_frame_header *fc_hdr)
1580 {
1581 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1582         struct lpfc_hba *phba = vport->phba;
1583         struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1584         struct nvmefc_tgt_fcp_req *rsp;
1585         uint32_t sid;
1586         uint16_t xri;
1587         unsigned long iflag = 0;
1588
1589         xri = be16_to_cpu(fc_hdr->fh_ox_id);
1590         sid = sli4_sid_from_fc_hdr(fc_hdr);
1591
1592         spin_lock_irqsave(&phba->hbalock, iflag);
1593         spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1594         list_for_each_entry_safe(ctxp, next_ctxp,
1595                                  &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1596                                  list) {
1597                 if (ctxp->oxid != xri || ctxp->sid != sid)
1598                         continue;
1599
1600                 xri = ctxp->ctxbuf->sglq->sli4_xritag;
1601
1602                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1603                 spin_unlock_irqrestore(&phba->hbalock, iflag);
1604
1605                 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1606                 ctxp->flag |= LPFC_NVMET_ABTS_RCV;
1607                 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1608
1609                 lpfc_nvmeio_data(phba,
1610                         "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1611                         xri, raw_smp_processor_id(), 0);
1612
1613                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1614                                 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
1615
1616                 rsp = &ctxp->ctx.fcp_req;
1617                 nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
1618
1619                 /* Respond with BA_ACC accordingly */
1620                 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1621                 return 0;
1622         }
1623         spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1624         spin_unlock_irqrestore(&phba->hbalock, iflag);
1625
1626         lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1627                          xri, raw_smp_processor_id(), 1);
1628
1629         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1630                         "6320 NVMET Rcv ABTS:rjt xid x%x\n", xri);
1631
1632         /* Respond with BA_RJT accordingly */
1633         lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
1634 #endif
1635         return 0;
1636 }
1637
1638 static void
1639 lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
1640                         struct lpfc_nvmet_rcv_ctx *ctxp)
1641 {
1642         struct lpfc_sli_ring *pring;
1643         struct lpfc_iocbq *nvmewqeq;
1644         struct lpfc_iocbq *next_nvmewqeq;
1645         unsigned long iflags;
1646         struct lpfc_wcqe_complete wcqe;
1647         struct lpfc_wcqe_complete *wcqep;
1648
1649         pring = wq->pring;
1650         wcqep = &wcqe;
1651
1652         /* Fake an ABORT error code back to cmpl routine */
1653         memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete));
1654         bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT);
1655         wcqep->parameter = IOERR_ABORT_REQUESTED;
1656
1657         spin_lock_irqsave(&pring->ring_lock, iflags);
1658         list_for_each_entry_safe(nvmewqeq, next_nvmewqeq,
1659                                  &wq->wqfull_list, list) {
1660                 if (ctxp) {
1661                         /* Checking for a specific IO to flush */
1662                         if (nvmewqeq->context2 == ctxp) {
1663                                 list_del(&nvmewqeq->list);
1664                                 spin_unlock_irqrestore(&pring->ring_lock,
1665                                                        iflags);
1666                                 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
1667                                                           wcqep);
1668                                 return;
1669                         }
1670                         continue;
1671                 } else {
1672                         /* Flush all IOs */
1673                         list_del(&nvmewqeq->list);
1674                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
1675                         lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep);
1676                         spin_lock_irqsave(&pring->ring_lock, iflags);
1677                 }
1678         }
1679         if (!ctxp)
1680                 wq->q_flag &= ~HBA_NVMET_WQFULL;
1681         spin_unlock_irqrestore(&pring->ring_lock, iflags);
1682 }
1683
1684 void
1685 lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
1686                           struct lpfc_queue *wq)
1687 {
1688 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1689         struct lpfc_sli_ring *pring;
1690         struct lpfc_iocbq *nvmewqeq;
1691         struct lpfc_nvmet_rcv_ctx *ctxp;
1692         unsigned long iflags;
1693         int rc;
1694
1695         /*
1696          * Some WQE slots are available, so try to re-issue anything
1697          * on the WQ wqfull_list.
1698          */
1699         pring = wq->pring;
1700         spin_lock_irqsave(&pring->ring_lock, iflags);
1701         while (!list_empty(&wq->wqfull_list)) {
1702                 list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
1703                                  list);
1704                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1705                 ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmewqeq->context2;
1706                 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
1707                 spin_lock_irqsave(&pring->ring_lock, iflags);
1708                 if (rc == -EBUSY) {
1709                         /* WQ was full again, so put it back on the list */
1710                         list_add(&nvmewqeq->list, &wq->wqfull_list);
1711                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
1712                         return;
1713                 }
1714         }
1715         wq->q_flag &= ~HBA_NVMET_WQFULL;
1716         spin_unlock_irqrestore(&pring->ring_lock, iflags);
1717
1718 #endif
1719 }
1720
1721 void
1722 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
1723 {
1724 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1725         struct lpfc_nvmet_tgtport *tgtp;
1726         struct lpfc_queue *wq;
1727         uint32_t qidx;
1728         DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
1729
1730         if (phba->nvmet_support == 0)
1731                 return;
1732         if (phba->targetport) {
1733                 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1734                 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
1735                         wq = phba->sli4_hba.hdwq[qidx].nvme_wq;
1736                         lpfc_nvmet_wqfull_flush(phba, wq, NULL);
1737                 }
1738                 tgtp->tport_unreg_cmp = &tport_unreg_cmp;
1739                 nvmet_fc_unregister_targetport(phba->targetport);
1740                 if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp,
1741                                         msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
1742                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1743                                         "6179 Unreg targetport %p timeout "
1744                                         "reached.\n", phba->targetport);
1745                 lpfc_nvmet_cleanup_io_context(phba);
1746         }
1747         phba->targetport = NULL;
1748 #endif
1749 }
1750
1751 /**
1752  * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
1753  * @phba: pointer to lpfc hba data structure.
1754  * @pring: pointer to a SLI ring.
1755  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1756  *
1757  * This routine is used for processing the WQE associated with a unsolicited
1758  * event. It first determines whether there is an existing ndlp that matches
1759  * the DID from the unsolicited WQE. If not, it will create a new one with
1760  * the DID from the unsolicited WQE. The ELS command from the unsolicited
1761  * WQE is then used to invoke the proper routine and to set up proper state
1762  * of the discovery state machine.
1763  **/
1764 static void
1765 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1766                            struct hbq_dmabuf *nvmebuf)
1767 {
1768 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1769         struct lpfc_nvmet_tgtport *tgtp;
1770         struct fc_frame_header *fc_hdr;
1771         struct lpfc_nvmet_rcv_ctx *ctxp;
1772         uint32_t *payload;
1773         uint32_t size, oxid, sid, rc;
1774
1775         fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1776         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1777
1778         if (!phba->targetport) {
1779                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1780                                 "6154 LS Drop IO x%x\n", oxid);
1781                 oxid = 0;
1782                 size = 0;
1783                 sid = 0;
1784                 ctxp = NULL;
1785                 goto dropit;
1786         }
1787
1788         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1789         payload = (uint32_t *)(nvmebuf->dbuf.virt);
1790         size = bf_get(lpfc_rcqe_length,  &nvmebuf->cq_event.cqe.rcqe_cmpl);
1791         sid = sli4_sid_from_fc_hdr(fc_hdr);
1792
1793         ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
1794         if (ctxp == NULL) {
1795                 atomic_inc(&tgtp->rcv_ls_req_drop);
1796                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1797                                 "6155 LS Drop IO x%x: Alloc\n",
1798                                 oxid);
1799 dropit:
1800                 lpfc_nvmeio_data(phba, "NVMET LS  DROP: "
1801                                  "xri x%x sz %d from %06x\n",
1802                                  oxid, size, sid);
1803                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1804                 return;
1805         }
1806         ctxp->phba = phba;
1807         ctxp->size = size;
1808         ctxp->oxid = oxid;
1809         ctxp->sid = sid;
1810         ctxp->wqeq = NULL;
1811         ctxp->state = LPFC_NVMET_STE_LS_RCV;
1812         ctxp->entry_cnt = 1;
1813         ctxp->rqb_buffer = (void *)nvmebuf;
1814         ctxp->hdwq = &phba->sli4_hba.hdwq[0];
1815
1816         lpfc_nvmeio_data(phba, "NVMET LS   RCV: xri x%x sz %d from %06x\n",
1817                          oxid, size, sid);
1818         /*
1819          * The calling sequence should be:
1820          * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
1821          * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
1822          */
1823         atomic_inc(&tgtp->rcv_ls_req_in);
1824         rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
1825                                  payload, size);
1826
1827         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1828                         "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
1829                         "%08x %08x %08x\n", size, rc,
1830                         *payload, *(payload+1), *(payload+2),
1831                         *(payload+3), *(payload+4), *(payload+5));
1832
1833         if (rc == 0) {
1834                 atomic_inc(&tgtp->rcv_ls_req_out);
1835                 return;
1836         }
1837
1838         lpfc_nvmeio_data(phba, "NVMET LS  DROP: xri x%x sz %d from %06x\n",
1839                          oxid, size, sid);
1840
1841         atomic_inc(&tgtp->rcv_ls_req_drop);
1842         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1843                         "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
1844                         ctxp->oxid, rc);
1845
1846         /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
1847         lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1848
1849         atomic_inc(&tgtp->xmt_ls_abort);
1850         lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
1851 #endif
1852 }
1853
1854 static void
1855 lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
1856 {
1857 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1858         struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
1859         struct lpfc_hba *phba = ctxp->phba;
1860         struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
1861         struct lpfc_nvmet_tgtport *tgtp;
1862         uint32_t *payload, qno;
1863         uint32_t rc;
1864         unsigned long iflags;
1865
1866         if (!nvmebuf) {
1867                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1868                         "6159 process_rcv_fcp_req, nvmebuf is NULL, "
1869                         "oxid: x%x flg: x%x state: x%x\n",
1870                         ctxp->oxid, ctxp->flag, ctxp->state);
1871                 spin_lock_irqsave(&ctxp->ctxlock, iflags);
1872                 lpfc_nvmet_defer_release(phba, ctxp);
1873                 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
1874                 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1875                                                  ctxp->oxid);
1876                 return;
1877         }
1878
1879         payload = (uint32_t *)(nvmebuf->dbuf.virt);
1880         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1881 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1882         if (ctxp->ts_isr_cmd)
1883                 ctxp->ts_cmd_nvme = ktime_get_ns();
1884 #endif
1885         /*
1886          * The calling sequence should be:
1887          * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
1888          * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
1889          * When we return from nvmet_fc_rcv_fcp_req, all relevant info
1890          * the NVME command / FC header is stored.
1891          * A buffer has already been reposted for this IO, so just free
1892          * the nvmebuf.
1893          */
1894         rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
1895                                   payload, ctxp->size);
1896         /* Process FCP command */
1897         if (rc == 0) {
1898                 atomic_inc(&tgtp->rcv_fcp_cmd_out);
1899                 spin_lock_irqsave(&ctxp->ctxlock, iflags);
1900                 if ((ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) ||
1901                     (nvmebuf != ctxp->rqb_buffer)) {
1902                         spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
1903                         return;
1904                 }
1905                 ctxp->rqb_buffer = NULL;
1906                 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
1907                 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
1908                 return;
1909         }
1910
1911         /* Processing of FCP command is deferred */
1912         if (rc == -EOVERFLOW) {
1913                 lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d "
1914                                  "from %06x\n",
1915                                  ctxp->oxid, ctxp->size, ctxp->sid);
1916                 atomic_inc(&tgtp->rcv_fcp_cmd_out);
1917                 atomic_inc(&tgtp->defer_fod);
1918                 spin_lock_irqsave(&ctxp->ctxlock, iflags);
1919                 if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
1920                         spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
1921                         return;
1922                 }
1923                 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
1924                 /*
1925                  * Post a replacement DMA buffer to RQ and defer
1926                  * freeing rcv buffer till .defer_rcv callback
1927                  */
1928                 qno = nvmebuf->idx;
1929                 lpfc_post_rq_buffer(
1930                         phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
1931                         phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
1932                 return;
1933         }
1934         atomic_inc(&tgtp->rcv_fcp_cmd_drop);
1935         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1936                         "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
1937                         ctxp->oxid, rc,
1938                         atomic_read(&tgtp->rcv_fcp_cmd_in),
1939                         atomic_read(&tgtp->rcv_fcp_cmd_out),
1940                         atomic_read(&tgtp->xmt_fcp_release));
1941         lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
1942                          ctxp->oxid, ctxp->size, ctxp->sid);
1943         spin_lock_irqsave(&ctxp->ctxlock, iflags);
1944         lpfc_nvmet_defer_release(phba, ctxp);
1945         spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
1946         lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
1947 #endif
1948 }
1949
1950 static void
1951 lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work)
1952 {
1953 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1954         struct lpfc_nvmet_ctxbuf *ctx_buf =
1955                 container_of(work, struct lpfc_nvmet_ctxbuf, defer_work);
1956
1957         lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
1958 #endif
1959 }
1960
1961 static struct lpfc_nvmet_ctxbuf *
1962 lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
1963                              struct lpfc_nvmet_ctx_info *current_infop)
1964 {
1965 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1966         struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
1967         struct lpfc_nvmet_ctx_info *get_infop;
1968         int i;
1969
1970         /*
1971          * The current_infop for the MRQ a NVME command IU was received
1972          * on is empty. Our goal is to replenish this MRQs context
1973          * list from a another CPUs.
1974          *
1975          * First we need to pick a context list to start looking on.
1976          * nvmet_ctx_start_cpu has available context the last time
1977          * we needed to replenish this CPU where nvmet_ctx_next_cpu
1978          * is just the next sequential CPU for this MRQ.
1979          */
1980         if (current_infop->nvmet_ctx_start_cpu)
1981                 get_infop = current_infop->nvmet_ctx_start_cpu;
1982         else
1983                 get_infop = current_infop->nvmet_ctx_next_cpu;
1984
1985         for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) {
1986                 if (get_infop == current_infop) {
1987                         get_infop = get_infop->nvmet_ctx_next_cpu;
1988                         continue;
1989                 }
1990                 spin_lock(&get_infop->nvmet_ctx_list_lock);
1991
1992                 /* Just take the entire context list, if there are any */
1993                 if (get_infop->nvmet_ctx_list_cnt) {
1994                         list_splice_init(&get_infop->nvmet_ctx_list,
1995                                     &current_infop->nvmet_ctx_list);
1996                         current_infop->nvmet_ctx_list_cnt =
1997                                 get_infop->nvmet_ctx_list_cnt - 1;
1998                         get_infop->nvmet_ctx_list_cnt = 0;
1999                         spin_unlock(&get_infop->nvmet_ctx_list_lock);
2000
2001                         current_infop->nvmet_ctx_start_cpu = get_infop;
2002                         list_remove_head(&current_infop->nvmet_ctx_list,
2003                                          ctx_buf, struct lpfc_nvmet_ctxbuf,
2004                                          list);
2005                         return ctx_buf;
2006                 }
2007
2008                 /* Otherwise, move on to the next CPU for this MRQ */
2009                 spin_unlock(&get_infop->nvmet_ctx_list_lock);
2010                 get_infop = get_infop->nvmet_ctx_next_cpu;
2011         }
2012
2013 #endif
2014         /* Nothing found, all contexts for the MRQ are in-flight */
2015         return NULL;
2016 }
2017
2018 /**
2019  * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
2020  * @phba: pointer to lpfc hba data structure.
2021  * @idx: relative index of MRQ vector
2022  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
2023  * @isr_timestamp: in jiffies.
2024  * @cqflag: cq processing information regarding workload.
2025  *
2026  * This routine is used for processing the WQE associated with a unsolicited
2027  * event. It first determines whether there is an existing ndlp that matches
2028  * the DID from the unsolicited WQE. If not, it will create a new one with
2029  * the DID from the unsolicited WQE. The ELS command from the unsolicited
2030  * WQE is then used to invoke the proper routine and to set up proper state
2031  * of the discovery state machine.
2032  **/
2033 static void
2034 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
2035                             uint32_t idx,
2036                             struct rqb_dmabuf *nvmebuf,
2037                             uint64_t isr_timestamp,
2038                             uint8_t cqflag)
2039 {
2040         struct lpfc_nvmet_rcv_ctx *ctxp;
2041         struct lpfc_nvmet_tgtport *tgtp;
2042         struct fc_frame_header *fc_hdr;
2043         struct lpfc_nvmet_ctxbuf *ctx_buf;
2044         struct lpfc_nvmet_ctx_info *current_infop;
2045         uint32_t size, oxid, sid, qno;
2046         unsigned long iflag;
2047         int current_cpu;
2048
2049         if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
2050                 return;
2051
2052         ctx_buf = NULL;
2053         if (!nvmebuf || !phba->targetport) {
2054                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2055                                 "6157 NVMET FCP Drop IO\n");
2056                 if (nvmebuf)
2057                         lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2058                 return;
2059         }
2060
2061         /*
2062          * Get a pointer to the context list for this MRQ based on
2063          * the CPU this MRQ IRQ is associated with. If the CPU association
2064          * changes from our initial assumption, the context list could
2065          * be empty, thus it would need to be replenished with the
2066          * context list from another CPU for this MRQ.
2067          */
2068         current_cpu = raw_smp_processor_id();
2069         current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
2070         spin_lock_irqsave(&current_infop->nvmet_ctx_list_lock, iflag);
2071         if (current_infop->nvmet_ctx_list_cnt) {
2072                 list_remove_head(&current_infop->nvmet_ctx_list,
2073                                  ctx_buf, struct lpfc_nvmet_ctxbuf, list);
2074                 current_infop->nvmet_ctx_list_cnt--;
2075         } else {
2076                 ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
2077         }
2078         spin_unlock_irqrestore(&current_infop->nvmet_ctx_list_lock, iflag);
2079
2080         fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
2081         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2082         size = nvmebuf->bytes_recv;
2083
2084 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2085         if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
2086                 if (current_cpu < LPFC_CHECK_CPU_CNT) {
2087                         if (idx != current_cpu)
2088                                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2089                                                 "6703 CPU Check rcv: "
2090                                                 "cpu %d expect %d\n",
2091                                                 current_cpu, idx);
2092                         phba->sli4_hba.hdwq[idx].cpucheck_rcv_io[current_cpu]++;
2093                 }
2094         }
2095 #endif
2096
2097         lpfc_nvmeio_data(phba, "NVMET FCP  RCV: xri x%x sz %d CPU %02x\n",
2098                          oxid, size, raw_smp_processor_id());
2099
2100         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2101
2102         if (!ctx_buf) {
2103                 /* Queue this NVME IO to process later */
2104                 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
2105                 list_add_tail(&nvmebuf->hbuf.list,
2106                               &phba->sli4_hba.lpfc_nvmet_io_wait_list);
2107                 phba->sli4_hba.nvmet_io_wait_cnt++;
2108                 phba->sli4_hba.nvmet_io_wait_total++;
2109                 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
2110                                        iflag);
2111
2112                 /* Post a brand new DMA buffer to RQ */
2113                 qno = nvmebuf->idx;
2114                 lpfc_post_rq_buffer(
2115                         phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2116                         phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2117
2118                 atomic_inc(&tgtp->defer_ctx);
2119                 return;
2120         }
2121
2122         sid = sli4_sid_from_fc_hdr(fc_hdr);
2123
2124         ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
2125         if (ctxp->state != LPFC_NVMET_STE_FREE) {
2126                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2127                                 "6414 NVMET Context corrupt %d %d oxid x%x\n",
2128                                 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
2129         }
2130         ctxp->wqeq = NULL;
2131         ctxp->txrdy = NULL;
2132         ctxp->offset = 0;
2133         ctxp->phba = phba;
2134         ctxp->size = size;
2135         ctxp->oxid = oxid;
2136         ctxp->sid = sid;
2137         ctxp->idx = idx;
2138         ctxp->state = LPFC_NVMET_STE_RCV;
2139         ctxp->entry_cnt = 1;
2140         ctxp->flag = 0;
2141         ctxp->ctxbuf = ctx_buf;
2142         ctxp->rqb_buffer = (void *)nvmebuf;
2143         ctxp->hdwq = NULL;
2144         spin_lock_init(&ctxp->ctxlock);
2145
2146 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2147         if (isr_timestamp)
2148                 ctxp->ts_isr_cmd = isr_timestamp;
2149         ctxp->ts_cmd_nvme = 0;
2150         ctxp->ts_nvme_data = 0;
2151         ctxp->ts_data_wqput = 0;
2152         ctxp->ts_isr_data = 0;
2153         ctxp->ts_data_nvme = 0;
2154         ctxp->ts_nvme_status = 0;
2155         ctxp->ts_status_wqput = 0;
2156         ctxp->ts_isr_status = 0;
2157         ctxp->ts_status_nvme = 0;
2158 #endif
2159
2160         atomic_inc(&tgtp->rcv_fcp_cmd_in);
2161         /* check for cq processing load */
2162         if (!cqflag) {
2163                 lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2164                 return;
2165         }
2166
2167         if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
2168                 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2169                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2170                                 "6325 Unable to queue work for oxid x%x. "
2171                                 "FCP Drop IO [x%x x%x x%x]\n",
2172                                 ctxp->oxid,
2173                                 atomic_read(&tgtp->rcv_fcp_cmd_in),
2174                                 atomic_read(&tgtp->rcv_fcp_cmd_out),
2175                                 atomic_read(&tgtp->xmt_fcp_release));
2176
2177                 spin_lock_irqsave(&ctxp->ctxlock, iflag);
2178                 lpfc_nvmet_defer_release(phba, ctxp);
2179                 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
2180                 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
2181         }
2182 }
2183
2184 /**
2185  * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
2186  * @phba: pointer to lpfc hba data structure.
2187  * @pring: pointer to a SLI ring.
2188  * @nvmebuf: pointer to received nvme data structure.
2189  *
2190  * This routine is used to process an unsolicited event received from a SLI
2191  * (Service Level Interface) ring. The actual processing of the data buffer
2192  * associated with the unsolicited event is done by invoking the routine
2193  * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
2194  * SLI RQ on which the unsolicited event was received.
2195  **/
2196 void
2197 lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2198                           struct lpfc_iocbq *piocb)
2199 {
2200         struct lpfc_dmabuf *d_buf;
2201         struct hbq_dmabuf *nvmebuf;
2202
2203         d_buf = piocb->context2;
2204         nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2205
2206         if (phba->nvmet_support == 0) {
2207                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
2208                 return;
2209         }
2210         lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
2211 }
2212
2213 /**
2214  * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
2215  * @phba: pointer to lpfc hba data structure.
2216  * @idx: relative index of MRQ vector
2217  * @nvmebuf: pointer to received nvme data structure.
2218  * @isr_timestamp: in jiffies.
2219  * @cqflag: cq processing information regarding workload.
2220  *
2221  * This routine is used to process an unsolicited event received from a SLI
2222  * (Service Level Interface) ring. The actual processing of the data buffer
2223  * associated with the unsolicited event is done by invoking the routine
2224  * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
2225  * SLI RQ on which the unsolicited event was received.
2226  **/
2227 void
2228 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
2229                            uint32_t idx,
2230                            struct rqb_dmabuf *nvmebuf,
2231                            uint64_t isr_timestamp,
2232                            uint8_t cqflag)
2233 {
2234         if (phba->nvmet_support == 0) {
2235                 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2236                 return;
2237         }
2238         lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp, cqflag);
2239 }
2240
2241 /**
2242  * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
2243  * @phba: pointer to a host N_Port data structure.
2244  * @ctxp: Context info for NVME LS Request
2245  * @rspbuf: DMA buffer of NVME command.
2246  * @rspsize: size of the NVME command.
2247  *
2248  * This routine is used for allocating a lpfc-WQE data structure from
2249  * the driver lpfc-WQE free-list and prepare the WQE with the parameters
2250  * passed into the routine for discovery state machine to issue an Extended
2251  * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
2252  * and preparation routine that is used by all the discovery state machine
2253  * routines and the NVME command-specific fields will be later set up by
2254  * the individual discovery machine routines after calling this routine
2255  * allocating and preparing a generic WQE data structure. It fills in the
2256  * Buffer Descriptor Entries (BDEs), allocates buffers for both command
2257  * payload and response payload (if expected). The reference count on the
2258  * ndlp is incremented by 1 and the reference to the ndlp is put into
2259  * context1 of the WQE data structure for this WQE to hold the ndlp
2260  * reference for the command's callback function to access later.
2261  *
2262  * Return code
2263  *   Pointer to the newly allocated/prepared nvme wqe data structure
2264  *   NULL - when nvme wqe data structure allocation/preparation failed
2265  **/
2266 static struct lpfc_iocbq *
2267 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
2268                        struct lpfc_nvmet_rcv_ctx *ctxp,
2269                        dma_addr_t rspbuf, uint16_t rspsize)
2270 {
2271         struct lpfc_nodelist *ndlp;
2272         struct lpfc_iocbq *nvmewqe;
2273         union lpfc_wqe128 *wqe;
2274
2275         if (!lpfc_is_link_up(phba)) {
2276                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2277                                 "6104 NVMET prep LS wqe: link err: "
2278                                 "NPORT x%x oxid:x%x ste %d\n",
2279                                 ctxp->sid, ctxp->oxid, ctxp->state);
2280                 return NULL;
2281         }
2282
2283         /* Allocate buffer for  command wqe */
2284         nvmewqe = lpfc_sli_get_iocbq(phba);
2285         if (nvmewqe == NULL) {
2286                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2287                                 "6105 NVMET prep LS wqe: No WQE: "
2288                                 "NPORT x%x oxid x%x ste %d\n",
2289                                 ctxp->sid, ctxp->oxid, ctxp->state);
2290                 return NULL;
2291         }
2292
2293         ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2294         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2295             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2296             (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2297                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2298                                 "6106 NVMET prep LS wqe: No ndlp: "
2299                                 "NPORT x%x oxid x%x ste %d\n",
2300                                 ctxp->sid, ctxp->oxid, ctxp->state);
2301                 goto nvme_wqe_free_wqeq_exit;
2302         }
2303         ctxp->wqeq = nvmewqe;
2304
2305         /* prevent preparing wqe with NULL ndlp reference */
2306         nvmewqe->context1 = lpfc_nlp_get(ndlp);
2307         if (nvmewqe->context1 == NULL)
2308                 goto nvme_wqe_free_wqeq_exit;
2309         nvmewqe->context2 = ctxp;
2310
2311         wqe = &nvmewqe->wqe;
2312         memset(wqe, 0, sizeof(union lpfc_wqe));
2313
2314         /* Words 0 - 2 */
2315         wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2316         wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
2317         wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
2318         wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
2319
2320         /* Word 3 */
2321
2322         /* Word 4 */
2323
2324         /* Word 5 */
2325         bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
2326         bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
2327         bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
2328         bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
2329         bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
2330
2331         /* Word 6 */
2332         bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
2333                phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2334         bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
2335
2336         /* Word 7 */
2337         bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
2338                CMD_XMIT_SEQUENCE64_WQE);
2339         bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
2340         bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
2341         bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
2342
2343         /* Word 8 */
2344         wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
2345
2346         /* Word 9 */
2347         bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
2348         /* Needs to be set by caller */
2349         bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
2350
2351         /* Word 10 */
2352         bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
2353         bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2354         bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
2355                LPFC_WQE_LENLOC_WORD12);
2356         bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
2357
2358         /* Word 11 */
2359         bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
2360                LPFC_WQE_CQ_ID_DEFAULT);
2361         bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
2362                OTHER_COMMAND);
2363
2364         /* Word 12 */
2365         wqe->xmit_sequence.xmit_len = rspsize;
2366
2367         nvmewqe->retry = 1;
2368         nvmewqe->vport = phba->pport;
2369         nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2370         nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
2371
2372         /* Xmit NVMET response to remote NPORT <did> */
2373         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2374                         "6039 Xmit NVMET LS response to remote "
2375                         "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
2376                         ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
2377                         rspsize);
2378         return nvmewqe;
2379
2380 nvme_wqe_free_wqeq_exit:
2381         nvmewqe->context2 = NULL;
2382         nvmewqe->context3 = NULL;
2383         lpfc_sli_release_iocbq(phba, nvmewqe);
2384         return NULL;
2385 }
2386
2387
2388 static struct lpfc_iocbq *
2389 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
2390                         struct lpfc_nvmet_rcv_ctx *ctxp)
2391 {
2392         struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
2393         struct lpfc_nvmet_tgtport *tgtp;
2394         struct sli4_sge *sgl;
2395         struct lpfc_nodelist *ndlp;
2396         struct lpfc_iocbq *nvmewqe;
2397         struct scatterlist *sgel;
2398         union lpfc_wqe128 *wqe;
2399         struct ulp_bde64 *bde;
2400         uint32_t *txrdy;
2401         dma_addr_t physaddr;
2402         int i, cnt;
2403         int do_pbde;
2404         int xc = 1;
2405
2406         if (!lpfc_is_link_up(phba)) {
2407                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2408                                 "6107 NVMET prep FCP wqe: link err:"
2409                                 "NPORT x%x oxid x%x ste %d\n",
2410                                 ctxp->sid, ctxp->oxid, ctxp->state);
2411                 return NULL;
2412         }
2413
2414         ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2415         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2416             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2417              (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2418                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2419                                 "6108 NVMET prep FCP wqe: no ndlp: "
2420                                 "NPORT x%x oxid x%x ste %d\n",
2421                                 ctxp->sid, ctxp->oxid, ctxp->state);
2422                 return NULL;
2423         }
2424
2425         if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
2426                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2427                                 "6109 NVMET prep FCP wqe: seg cnt err: "
2428                                 "NPORT x%x oxid x%x ste %d cnt %d\n",
2429                                 ctxp->sid, ctxp->oxid, ctxp->state,
2430                                 phba->cfg_nvme_seg_cnt);
2431                 return NULL;
2432         }
2433
2434         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2435         nvmewqe = ctxp->wqeq;
2436         if (nvmewqe == NULL) {
2437                 /* Allocate buffer for  command wqe */
2438                 nvmewqe = ctxp->ctxbuf->iocbq;
2439                 if (nvmewqe == NULL) {
2440                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2441                                         "6110 NVMET prep FCP wqe: No "
2442                                         "WQE: NPORT x%x oxid x%x ste %d\n",
2443                                         ctxp->sid, ctxp->oxid, ctxp->state);
2444                         return NULL;
2445                 }
2446                 ctxp->wqeq = nvmewqe;
2447                 xc = 0; /* create new XRI */
2448                 nvmewqe->sli4_lxritag = NO_XRI;
2449                 nvmewqe->sli4_xritag = NO_XRI;
2450         }
2451
2452         /* Sanity check */
2453         if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
2454             (ctxp->entry_cnt == 1)) ||
2455             (ctxp->state == LPFC_NVMET_STE_DATA)) {
2456                 wqe = &nvmewqe->wqe;
2457         } else {
2458                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2459                                 "6111 Wrong state NVMET FCP: %d  cnt %d\n",
2460                                 ctxp->state, ctxp->entry_cnt);
2461                 return NULL;
2462         }
2463
2464         sgl  = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
2465         switch (rsp->op) {
2466         case NVMET_FCOP_READDATA:
2467         case NVMET_FCOP_READDATA_RSP:
2468                 /* From the tsend template, initialize words 7 - 11 */
2469                 memcpy(&wqe->words[7],
2470                        &lpfc_tsend_cmd_template.words[7],
2471                        sizeof(uint32_t) * 5);
2472
2473                 /* Words 0 - 2 : The first sg segment */
2474                 sgel = &rsp->sg[0];
2475                 physaddr = sg_dma_address(sgel);
2476                 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2477                 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
2478                 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
2479                 wqe->fcp_tsend.bde.addrHigh =
2480                         cpu_to_le32(putPaddrHigh(physaddr));
2481
2482                 /* Word 3 */
2483                 wqe->fcp_tsend.payload_offset_len = 0;
2484
2485                 /* Word 4 */
2486                 wqe->fcp_tsend.relative_offset = ctxp->offset;
2487
2488                 /* Word 5 */
2489                 wqe->fcp_tsend.reserved = 0;
2490
2491                 /* Word 6 */
2492                 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
2493                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2494                 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
2495                        nvmewqe->sli4_xritag);
2496
2497                 /* Word 7 - set ar later */
2498
2499                 /* Word 8 */
2500                 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
2501
2502                 /* Word 9 */
2503                 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
2504                 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
2505
2506                 /* Word 10 - set wqes later, in template xc=1 */
2507                 if (!xc)
2508                         bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0);
2509
2510                 /* Word 11 - set sup, irsp, irsplen later */
2511                 do_pbde = 0;
2512
2513                 /* Word 12 */
2514                 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2515
2516                 /* Setup 2 SKIP SGEs */
2517                 sgl->addr_hi = 0;
2518                 sgl->addr_lo = 0;
2519                 sgl->word2 = 0;
2520                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2521                 sgl->word2 = cpu_to_le32(sgl->word2);
2522                 sgl->sge_len = 0;
2523                 sgl++;
2524                 sgl->addr_hi = 0;
2525                 sgl->addr_lo = 0;
2526                 sgl->word2 = 0;
2527                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2528                 sgl->word2 = cpu_to_le32(sgl->word2);
2529                 sgl->sge_len = 0;
2530                 sgl++;
2531                 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
2532                         atomic_inc(&tgtp->xmt_fcp_read_rsp);
2533
2534                         /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2535
2536                         if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
2537                                 if (ndlp->nlp_flag & NLP_SUPPRESS_RSP)
2538                                         bf_set(wqe_sup,
2539                                                &wqe->fcp_tsend.wqe_com, 1);
2540                         } else {
2541                                 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
2542                                 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
2543                                 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
2544                                        ((rsp->rsplen >> 2) - 1));
2545                                 memcpy(&wqe->words[16], rsp->rspaddr,
2546                                        rsp->rsplen);
2547                         }
2548                 } else {
2549                         atomic_inc(&tgtp->xmt_fcp_read);
2550
2551                         /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2552                         bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
2553                 }
2554                 break;
2555
2556         case NVMET_FCOP_WRITEDATA:
2557                 /* From the treceive template, initialize words 3 - 11 */
2558                 memcpy(&wqe->words[3],
2559                        &lpfc_treceive_cmd_template.words[3],
2560                        sizeof(uint32_t) * 9);
2561
2562                 /* Words 0 - 2 : The first sg segment */
2563                 txrdy = dma_pool_alloc(phba->txrdy_payload_pool,
2564                                        GFP_KERNEL, &physaddr);
2565                 if (!txrdy) {
2566                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2567                                         "6041 Bad txrdy buffer: oxid x%x\n",
2568                                         ctxp->oxid);
2569                         return NULL;
2570                 }
2571                 ctxp->txrdy = txrdy;
2572                 ctxp->txrdy_phys = physaddr;
2573                 wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2574                 wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
2575                 wqe->fcp_treceive.bde.addrLow =
2576                         cpu_to_le32(putPaddrLow(physaddr));
2577                 wqe->fcp_treceive.bde.addrHigh =
2578                         cpu_to_le32(putPaddrHigh(physaddr));
2579
2580                 /* Word 4 */
2581                 wqe->fcp_treceive.relative_offset = ctxp->offset;
2582
2583                 /* Word 6 */
2584                 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
2585                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2586                 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
2587                        nvmewqe->sli4_xritag);
2588
2589                 /* Word 7 */
2590
2591                 /* Word 8 */
2592                 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
2593
2594                 /* Word 9 */
2595                 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
2596                 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
2597
2598                 /* Word 10 - in template xc=1 */
2599                 if (!xc)
2600                         bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
2601
2602                 /* Word 11 - set pbde later */
2603                 if (phba->cfg_enable_pbde) {
2604                         do_pbde = 1;
2605                 } else {
2606                         bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
2607                         do_pbde = 0;
2608                 }
2609
2610                 /* Word 12 */
2611                 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2612
2613                 /* Setup 1 TXRDY and 1 SKIP SGE */
2614                 txrdy[0] = 0;
2615                 txrdy[1] = cpu_to_be32(rsp->transfer_length);
2616                 txrdy[2] = 0;
2617
2618                 sgl->addr_hi = putPaddrHigh(physaddr);
2619                 sgl->addr_lo = putPaddrLow(physaddr);
2620                 sgl->word2 = 0;
2621                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2622                 sgl->word2 = cpu_to_le32(sgl->word2);
2623                 sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
2624                 sgl++;
2625                 sgl->addr_hi = 0;
2626                 sgl->addr_lo = 0;
2627                 sgl->word2 = 0;
2628                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2629                 sgl->word2 = cpu_to_le32(sgl->word2);
2630                 sgl->sge_len = 0;
2631                 sgl++;
2632                 atomic_inc(&tgtp->xmt_fcp_write);
2633                 break;
2634
2635         case NVMET_FCOP_RSP:
2636                 /* From the treceive template, initialize words 4 - 11 */
2637                 memcpy(&wqe->words[4],
2638                        &lpfc_trsp_cmd_template.words[4],
2639                        sizeof(uint32_t) * 8);
2640
2641                 /* Words 0 - 2 */
2642                 physaddr = rsp->rspdma;
2643                 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2644                 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
2645                 wqe->fcp_trsp.bde.addrLow =
2646                         cpu_to_le32(putPaddrLow(physaddr));
2647                 wqe->fcp_trsp.bde.addrHigh =
2648                         cpu_to_le32(putPaddrHigh(physaddr));
2649
2650                 /* Word 3 */
2651                 wqe->fcp_trsp.response_len = rsp->rsplen;
2652
2653                 /* Word 6 */
2654                 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
2655                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2656                 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
2657                        nvmewqe->sli4_xritag);
2658
2659                 /* Word 7 */
2660
2661                 /* Word 8 */
2662                 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
2663
2664                 /* Word 9 */
2665                 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
2666                 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
2667
2668                 /* Word 10 */
2669                 if (xc)
2670                         bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1);
2671
2672                 /* Word 11 */
2673                 /* In template wqes=0 irsp=0 irsplen=0 - good response */
2674                 if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) {
2675                         /* Bad response - embed it */
2676                         bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
2677                         bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
2678                         bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
2679                                ((rsp->rsplen >> 2) - 1));
2680                         memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
2681                 }
2682                 do_pbde = 0;
2683
2684                 /* Word 12 */
2685                 wqe->fcp_trsp.rsvd_12_15[0] = 0;
2686
2687                 /* Use rspbuf, NOT sg list */
2688                 rsp->sg_cnt = 0;
2689                 sgl->word2 = 0;
2690                 atomic_inc(&tgtp->xmt_fcp_rsp);
2691                 break;
2692
2693         default:
2694                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2695                                 "6064 Unknown Rsp Op %d\n",
2696                                 rsp->op);
2697                 return NULL;
2698         }
2699
2700         nvmewqe->retry = 1;
2701         nvmewqe->vport = phba->pport;
2702         nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2703         nvmewqe->context1 = ndlp;
2704
2705         for (i = 0; i < rsp->sg_cnt; i++) {
2706                 sgel = &rsp->sg[i];
2707                 physaddr = sg_dma_address(sgel);
2708                 cnt = sg_dma_len(sgel);
2709                 sgl->addr_hi = putPaddrHigh(physaddr);
2710                 sgl->addr_lo = putPaddrLow(physaddr);
2711                 sgl->word2 = 0;
2712                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2713                 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
2714                 if ((i+1) == rsp->sg_cnt)
2715                         bf_set(lpfc_sli4_sge_last, sgl, 1);
2716                 sgl->word2 = cpu_to_le32(sgl->word2);
2717                 sgl->sge_len = cpu_to_le32(cnt);
2718                 if (i == 0) {
2719                         bde = (struct ulp_bde64 *)&wqe->words[13];
2720                         if (do_pbde) {
2721                                 /* Words 13-15  (PBDE) */
2722                                 bde->addrLow = sgl->addr_lo;
2723                                 bde->addrHigh = sgl->addr_hi;
2724                                 bde->tus.f.bdeSize =
2725                                         le32_to_cpu(sgl->sge_len);
2726                                 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2727                                 bde->tus.w = cpu_to_le32(bde->tus.w);
2728                         } else {
2729                                 memset(bde, 0, sizeof(struct ulp_bde64));
2730                         }
2731                 }
2732                 sgl++;
2733                 ctxp->offset += cnt;
2734         }
2735         ctxp->state = LPFC_NVMET_STE_DATA;
2736         ctxp->entry_cnt++;
2737         return nvmewqe;
2738 }
2739
2740 /**
2741  * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
2742  * @phba: Pointer to HBA context object.
2743  * @cmdwqe: Pointer to driver command WQE object.
2744  * @wcqe: Pointer to driver response CQE object.
2745  *
2746  * The function is called from SLI ring event handler with no
2747  * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2748  * The function frees memory resources used for the NVME commands.
2749  **/
2750 static void
2751 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2752                              struct lpfc_wcqe_complete *wcqe)
2753 {
2754         struct lpfc_nvmet_rcv_ctx *ctxp;
2755         struct lpfc_nvmet_tgtport *tgtp;
2756         uint32_t result;
2757         unsigned long flags;
2758         bool released = false;
2759
2760         ctxp = cmdwqe->context2;
2761         result = wcqe->parameter;
2762
2763         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2764         if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2765                 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2766
2767         spin_lock_irqsave(&ctxp->ctxlock, flags);
2768         ctxp->state = LPFC_NVMET_STE_DONE;
2769
2770         /* Check if we already received a free context call
2771          * and we have completed processing an abort situation.
2772          */
2773         if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2774             !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2775                 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
2776                 list_del(&ctxp->list);
2777                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
2778                 released = true;
2779         }
2780         ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2781         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2782         atomic_inc(&tgtp->xmt_abort_rsp);
2783
2784         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2785                         "6165 ABORT cmpl: xri x%x flg x%x (%d) "
2786                         "WCQE: %08x %08x %08x %08x\n",
2787                         ctxp->oxid, ctxp->flag, released,
2788                         wcqe->word0, wcqe->total_data_placed,
2789                         result, wcqe->word3);
2790
2791         cmdwqe->context2 = NULL;
2792         cmdwqe->context3 = NULL;
2793         /*
2794          * if transport has released ctx, then can reuse it. Otherwise,
2795          * will be recycled by transport release call.
2796          */
2797         if (released)
2798                 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2799
2800         /* This is the iocbq for the abort, not the command */
2801         lpfc_sli_release_iocbq(phba, cmdwqe);
2802
2803         /* Since iaab/iaar are NOT set, there is no work left.
2804          * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2805          * should have been called already.
2806          */
2807 }
2808
2809 /**
2810  * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
2811  * @phba: Pointer to HBA context object.
2812  * @cmdwqe: Pointer to driver command WQE object.
2813  * @wcqe: Pointer to driver response CQE object.
2814  *
2815  * The function is called from SLI ring event handler with no
2816  * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2817  * The function frees memory resources used for the NVME commands.
2818  **/
2819 static void
2820 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2821                                struct lpfc_wcqe_complete *wcqe)
2822 {
2823         struct lpfc_nvmet_rcv_ctx *ctxp;
2824         struct lpfc_nvmet_tgtport *tgtp;
2825         unsigned long flags;
2826         uint32_t result;
2827         bool released = false;
2828
2829         ctxp = cmdwqe->context2;
2830         result = wcqe->parameter;
2831
2832         if (!ctxp) {
2833                 /* if context is clear, related io alrady complete */
2834                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2835                                 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
2836                                 wcqe->word0, wcqe->total_data_placed,
2837                                 result, wcqe->word3);
2838                 return;
2839         }
2840
2841         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2842         spin_lock_irqsave(&ctxp->ctxlock, flags);
2843         if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2844                 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2845
2846         /* Sanity check */
2847         if (ctxp->state != LPFC_NVMET_STE_ABORT) {
2848                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2849                                 "6112 ABTS Wrong state:%d oxid x%x\n",
2850                                 ctxp->state, ctxp->oxid);
2851         }
2852
2853         /* Check if we already received a free context call
2854          * and we have completed processing an abort situation.
2855          */
2856         ctxp->state = LPFC_NVMET_STE_DONE;
2857         if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2858             !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2859                 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
2860                 list_del(&ctxp->list);
2861                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
2862                 released = true;
2863         }
2864         ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2865         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2866         atomic_inc(&tgtp->xmt_abort_rsp);
2867
2868         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2869                         "6316 ABTS cmpl xri x%x flg x%x (%x) "
2870                         "WCQE: %08x %08x %08x %08x\n",
2871                         ctxp->oxid, ctxp->flag, released,
2872                         wcqe->word0, wcqe->total_data_placed,
2873                         result, wcqe->word3);
2874
2875         cmdwqe->context2 = NULL;
2876         cmdwqe->context3 = NULL;
2877         /*
2878          * if transport has released ctx, then can reuse it. Otherwise,
2879          * will be recycled by transport release call.
2880          */
2881         if (released)
2882                 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2883
2884         /* Since iaab/iaar are NOT set, there is no work left.
2885          * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2886          * should have been called already.
2887          */
2888 }
2889
2890 /**
2891  * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
2892  * @phba: Pointer to HBA context object.
2893  * @cmdwqe: Pointer to driver command WQE object.
2894  * @wcqe: Pointer to driver response CQE object.
2895  *
2896  * The function is called from SLI ring event handler with no
2897  * lock held. This function is the completion handler for NVME ABTS for LS cmds
2898  * The function frees memory resources used for the NVME commands.
2899  **/
2900 static void
2901 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2902                             struct lpfc_wcqe_complete *wcqe)
2903 {
2904         struct lpfc_nvmet_rcv_ctx *ctxp;
2905         struct lpfc_nvmet_tgtport *tgtp;
2906         uint32_t result;
2907
2908         ctxp = cmdwqe->context2;
2909         result = wcqe->parameter;
2910
2911         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2912         atomic_inc(&tgtp->xmt_ls_abort_cmpl);
2913
2914         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2915                         "6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n",
2916                         ctxp, wcqe->word0, wcqe->total_data_placed,
2917                         result, wcqe->word3);
2918
2919         if (!ctxp) {
2920                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2921                                 "6415 NVMET LS Abort No ctx: WCQE: "
2922                                  "%08x %08x %08x %08x\n",
2923                                 wcqe->word0, wcqe->total_data_placed,
2924                                 result, wcqe->word3);
2925
2926                 lpfc_sli_release_iocbq(phba, cmdwqe);
2927                 return;
2928         }
2929
2930         if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) {
2931                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2932                                 "6416 NVMET LS abort cmpl state mismatch: "
2933                                 "oxid x%x: %d %d\n",
2934                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
2935         }
2936
2937         cmdwqe->context2 = NULL;
2938         cmdwqe->context3 = NULL;
2939         lpfc_sli_release_iocbq(phba, cmdwqe);
2940         kfree(ctxp);
2941 }
2942
2943 static int
2944 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
2945                              struct lpfc_nvmet_rcv_ctx *ctxp,
2946                              uint32_t sid, uint16_t xri)
2947 {
2948         struct lpfc_nvmet_tgtport *tgtp;
2949         struct lpfc_iocbq *abts_wqeq;
2950         union lpfc_wqe128 *wqe_abts;
2951         struct lpfc_nodelist *ndlp;
2952
2953         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2954                         "6067 ABTS: sid %x xri x%x/x%x\n",
2955                         sid, xri, ctxp->wqeq->sli4_xritag);
2956
2957         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2958
2959         ndlp = lpfc_findnode_did(phba->pport, sid);
2960         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2961             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2962             (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2963                 atomic_inc(&tgtp->xmt_abort_rsp_error);
2964                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2965                                 "6134 Drop ABTS - wrong NDLP state x%x.\n",
2966                                 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
2967
2968                 /* No failure to an ABTS request. */
2969                 return 0;
2970         }
2971
2972         abts_wqeq = ctxp->wqeq;
2973         wqe_abts = &abts_wqeq->wqe;
2974
2975         /*
2976          * Since we zero the whole WQE, we need to ensure we set the WQE fields
2977          * that were initialized in lpfc_sli4_nvmet_alloc.
2978          */
2979         memset(wqe_abts, 0, sizeof(union lpfc_wqe));
2980
2981         /* Word 5 */
2982         bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
2983         bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
2984         bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
2985         bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
2986         bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
2987
2988         /* Word 6 */
2989         bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
2990                phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2991         bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
2992                abts_wqeq->sli4_xritag);
2993
2994         /* Word 7 */
2995         bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
2996                CMD_XMIT_SEQUENCE64_WQE);
2997         bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
2998         bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
2999         bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
3000
3001         /* Word 8 */
3002         wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
3003
3004         /* Word 9 */
3005         bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
3006         /* Needs to be set by caller */
3007         bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
3008
3009         /* Word 10 */
3010         bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
3011         bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
3012         bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
3013                LPFC_WQE_LENLOC_WORD12);
3014         bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
3015         bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
3016
3017         /* Word 11 */
3018         bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
3019                LPFC_WQE_CQ_ID_DEFAULT);
3020         bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
3021                OTHER_COMMAND);
3022
3023         abts_wqeq->vport = phba->pport;
3024         abts_wqeq->context1 = ndlp;
3025         abts_wqeq->context2 = ctxp;
3026         abts_wqeq->context3 = NULL;
3027         abts_wqeq->rsvd2 = 0;
3028         /* hba_wqidx should already be setup from command we are aborting */
3029         abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
3030         abts_wqeq->iocb.ulpLe = 1;
3031
3032         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3033                         "6069 Issue ABTS to xri x%x reqtag x%x\n",
3034                         xri, abts_wqeq->iotag);
3035         return 1;
3036 }
3037
3038 static int
3039 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
3040                                struct lpfc_nvmet_rcv_ctx *ctxp,
3041                                uint32_t sid, uint16_t xri)
3042 {
3043         struct lpfc_nvmet_tgtport *tgtp;
3044         struct lpfc_iocbq *abts_wqeq;
3045         union lpfc_wqe128 *abts_wqe;
3046         struct lpfc_nodelist *ndlp;
3047         unsigned long flags;
3048         int rc;
3049
3050         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3051         if (!ctxp->wqeq) {
3052                 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3053                 ctxp->wqeq->hba_wqidx = 0;
3054         }
3055
3056         ndlp = lpfc_findnode_did(phba->pport, sid);
3057         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
3058             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3059             (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3060                 atomic_inc(&tgtp->xmt_abort_rsp_error);
3061                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3062                                 "6160 Drop ABORT - wrong NDLP state x%x.\n",
3063                                 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3064
3065                 /* No failure to an ABTS request. */
3066                 spin_lock_irqsave(&ctxp->ctxlock, flags);
3067                 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3068                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3069                 return 0;
3070         }
3071
3072         /* Issue ABTS for this WQE based on iotag */
3073         ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
3074         spin_lock_irqsave(&ctxp->ctxlock, flags);
3075         if (!ctxp->abort_wqeq) {
3076                 atomic_inc(&tgtp->xmt_abort_rsp_error);
3077                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3078                                 "6161 ABORT failed: No wqeqs: "
3079                                 "xri: x%x\n", ctxp->oxid);
3080                 /* No failure to an ABTS request. */
3081                 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3082                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3083                 return 0;
3084         }
3085         abts_wqeq = ctxp->abort_wqeq;
3086         abts_wqe = &abts_wqeq->wqe;
3087         ctxp->state = LPFC_NVMET_STE_ABORT;
3088         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3089
3090         /* Announce entry to new IO submit field. */
3091         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3092                         "6162 ABORT Request to rport DID x%06x "
3093                         "for xri x%x x%x\n",
3094                         ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
3095
3096         /* If the hba is getting reset, this flag is set.  It is
3097          * cleared when the reset is complete and rings reestablished.
3098          */
3099         spin_lock_irqsave(&phba->hbalock, flags);
3100         /* driver queued commands are in process of being flushed */
3101         if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
3102                 spin_unlock_irqrestore(&phba->hbalock, flags);
3103                 atomic_inc(&tgtp->xmt_abort_rsp_error);
3104                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
3105                                 "6163 Driver in reset cleanup - flushing "
3106                                 "NVME Req now. hba_flag x%x oxid x%x\n",
3107                                 phba->hba_flag, ctxp->oxid);
3108                 lpfc_sli_release_iocbq(phba, abts_wqeq);
3109                 spin_lock_irqsave(&ctxp->ctxlock, flags);
3110                 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3111                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3112                 return 0;
3113         }
3114
3115         /* Outstanding abort is in progress */
3116         if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
3117                 spin_unlock_irqrestore(&phba->hbalock, flags);
3118                 atomic_inc(&tgtp->xmt_abort_rsp_error);
3119                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
3120                                 "6164 Outstanding NVME I/O Abort Request "
3121                                 "still pending on oxid x%x\n",
3122                                 ctxp->oxid);
3123                 lpfc_sli_release_iocbq(phba, abts_wqeq);
3124                 spin_lock_irqsave(&ctxp->ctxlock, flags);
3125                 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3126                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3127                 return 0;
3128         }
3129
3130         /* Ready - mark outstanding as aborted by driver. */
3131         abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
3132
3133         /* WQEs are reused.  Clear stale data and set key fields to
3134          * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
3135          */
3136         memset(abts_wqe, 0, sizeof(union lpfc_wqe));
3137
3138         /* word 3 */
3139         bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
3140
3141         /* word 7 */
3142         bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
3143         bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
3144
3145         /* word 8 - tell the FW to abort the IO associated with this
3146          * outstanding exchange ID.
3147          */
3148         abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
3149
3150         /* word 9 - this is the iotag for the abts_wqe completion. */
3151         bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
3152                abts_wqeq->iotag);
3153
3154         /* word 10 */
3155         bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
3156         bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
3157
3158         /* word 11 */
3159         bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
3160         bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
3161         bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
3162
3163         /* ABTS WQE must go to the same WQ as the WQE to be aborted */
3164         abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
3165         abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
3166         abts_wqeq->iocb_cmpl = 0;
3167         abts_wqeq->iocb_flag |= LPFC_IO_NVME;
3168         abts_wqeq->context2 = ctxp;
3169         abts_wqeq->vport = phba->pport;
3170         if (!ctxp->hdwq)
3171                 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3172
3173         rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3174         spin_unlock_irqrestore(&phba->hbalock, flags);
3175         if (rc == WQE_SUCCESS) {
3176                 atomic_inc(&tgtp->xmt_abort_sol);
3177                 return 0;
3178         }
3179
3180         atomic_inc(&tgtp->xmt_abort_rsp_error);
3181         spin_lock_irqsave(&ctxp->ctxlock, flags);
3182         ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3183         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3184         lpfc_sli_release_iocbq(phba, abts_wqeq);
3185         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3186                         "6166 Failed ABORT issue_wqe with status x%x "
3187                         "for oxid x%x.\n",
3188                         rc, ctxp->oxid);
3189         return 1;
3190 }
3191
3192 static int
3193 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
3194                                  struct lpfc_nvmet_rcv_ctx *ctxp,
3195                                  uint32_t sid, uint16_t xri)
3196 {
3197         struct lpfc_nvmet_tgtport *tgtp;
3198         struct lpfc_iocbq *abts_wqeq;
3199         unsigned long flags;
3200         bool released = false;
3201         int rc;
3202
3203         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3204         if (!ctxp->wqeq) {
3205                 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3206                 ctxp->wqeq->hba_wqidx = 0;
3207         }
3208
3209         if (ctxp->state == LPFC_NVMET_STE_FREE) {
3210                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3211                                 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
3212                                 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
3213                 rc = WQE_BUSY;
3214                 goto aerr;
3215         }
3216         ctxp->state = LPFC_NVMET_STE_ABORT;
3217         ctxp->entry_cnt++;
3218         rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
3219         if (rc == 0)
3220                 goto aerr;
3221
3222         spin_lock_irqsave(&phba->hbalock, flags);
3223         abts_wqeq = ctxp->wqeq;
3224         abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
3225         abts_wqeq->iocb_cmpl = NULL;
3226         abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
3227         if (!ctxp->hdwq)
3228                 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3229
3230         rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3231         spin_unlock_irqrestore(&phba->hbalock, flags);
3232         if (rc == WQE_SUCCESS) {
3233                 return 0;
3234         }
3235
3236 aerr:
3237         spin_lock_irqsave(&ctxp->ctxlock, flags);
3238         if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
3239                 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3240                 list_del(&ctxp->list);
3241                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3242                 released = true;
3243         }
3244         ctxp->flag &= ~(LPFC_NVMET_ABORT_OP | LPFC_NVMET_CTX_RLS);
3245         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3246
3247         atomic_inc(&tgtp->xmt_abort_rsp_error);
3248         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3249                         "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
3250                         ctxp->oxid, rc);
3251         if (released)
3252                 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3253         return 1;
3254 }
3255
3256 static int
3257 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
3258                                 struct lpfc_nvmet_rcv_ctx *ctxp,
3259                                 uint32_t sid, uint16_t xri)
3260 {
3261         struct lpfc_nvmet_tgtport *tgtp;
3262         struct lpfc_iocbq *abts_wqeq;
3263         unsigned long flags;
3264         int rc;
3265
3266         if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) ||
3267             (ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) {
3268                 ctxp->state = LPFC_NVMET_STE_LS_ABORT;
3269                 ctxp->entry_cnt++;
3270         } else {
3271                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3272                                 "6418 NVMET LS abort state mismatch "
3273                                 "IO x%x: %d %d\n",
3274                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3275                 ctxp->state = LPFC_NVMET_STE_LS_ABORT;
3276         }
3277
3278         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3279         if (!ctxp->wqeq) {
3280                 /* Issue ABTS for this WQE based on iotag */
3281                 ctxp->wqeq = lpfc_sli_get_iocbq(phba);
3282                 if (!ctxp->wqeq) {
3283                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3284                                         "6068 Abort failed: No wqeqs: "
3285                                         "xri: x%x\n", xri);
3286                         /* No failure to an ABTS request. */
3287                         kfree(ctxp);
3288                         return 0;
3289                 }
3290         }
3291         abts_wqeq = ctxp->wqeq;
3292
3293         if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
3294                 rc = WQE_BUSY;
3295                 goto out;
3296         }
3297
3298         spin_lock_irqsave(&phba->hbalock, flags);
3299         abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
3300         abts_wqeq->iocb_cmpl = 0;
3301         abts_wqeq->iocb_flag |=  LPFC_IO_NVME_LS;
3302         rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3303         spin_unlock_irqrestore(&phba->hbalock, flags);
3304         if (rc == WQE_SUCCESS) {
3305                 atomic_inc(&tgtp->xmt_abort_unsol);
3306                 return 0;
3307         }
3308 out:
3309         atomic_inc(&tgtp->xmt_abort_rsp_error);
3310         abts_wqeq->context2 = NULL;
3311         abts_wqeq->context3 = NULL;
3312         lpfc_sli_release_iocbq(phba, abts_wqeq);
3313         kfree(ctxp);
3314         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3315                         "6056 Failed to Issue ABTS. Status x%x\n", rc);
3316         return 0;
3317 }