Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[linux-2.6-microblaze.git] / drivers / scsi / lpfc / lpfc_nvmet.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channsel Host Bus Adapters.                               *
4  * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
30
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
38
39 #include <linux/nvme.h>
40 #include <linux/nvme-fc-driver.h>
41 #include <linux/nvme-fc.h>
42
43 #include "lpfc_version.h"
44 #include "lpfc_hw4.h"
45 #include "lpfc_hw.h"
46 #include "lpfc_sli.h"
47 #include "lpfc_sli4.h"
48 #include "lpfc_nl.h"
49 #include "lpfc_disc.h"
50 #include "lpfc.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_nvme.h"
53 #include "lpfc_nvmet.h"
54 #include "lpfc_logmsg.h"
55 #include "lpfc_crtn.h"
56 #include "lpfc_vport.h"
57 #include "lpfc_debugfs.h"
58
59 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
60                                                  struct lpfc_nvmet_rcv_ctx *,
61                                                  dma_addr_t rspbuf,
62                                                  uint16_t rspsize);
63 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
64                                                   struct lpfc_nvmet_rcv_ctx *);
65 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
66                                           struct lpfc_nvmet_rcv_ctx *,
67                                           uint32_t, uint16_t);
68 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
69                                             struct lpfc_nvmet_rcv_ctx *,
70                                             uint32_t, uint16_t);
71 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
72                                            struct lpfc_nvmet_rcv_ctx *,
73                                            uint32_t, uint16_t);
74 static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
75                                     struct lpfc_nvmet_rcv_ctx *);
76 static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *);
77
78 static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf);
79
80 static union lpfc_wqe128 lpfc_tsend_cmd_template;
81 static union lpfc_wqe128 lpfc_treceive_cmd_template;
82 static union lpfc_wqe128 lpfc_trsp_cmd_template;
83
84 /* Setup WQE templates for NVME IOs */
85 void
86 lpfc_nvmet_cmd_template(void)
87 {
88         union lpfc_wqe128 *wqe;
89
90         /* TSEND template */
91         wqe = &lpfc_tsend_cmd_template;
92         memset(wqe, 0, sizeof(union lpfc_wqe128));
93
94         /* Word 0, 1, 2 - BDE is variable */
95
96         /* Word 3 - payload_offset_len is zero */
97
98         /* Word 4 - relative_offset is variable */
99
100         /* Word 5 - is zero */
101
102         /* Word 6 - ctxt_tag, xri_tag is variable */
103
104         /* Word 7 - wqe_ar is variable */
105         bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
106         bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF);
107         bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3);
108         bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI);
109         bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
110
111         /* Word 8 - abort_tag is variable */
112
113         /* Word 9  - reqtag, rcvoxid is variable */
114
115         /* Word 10 - wqes, xc is variable */
116         bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
117         bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
118         bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
119         bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
120         bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
121         bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12);
122
123         /* Word 11 - sup, irsp, irsplen is variable */
124         bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND);
125         bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
126         bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
127         bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
128         bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
129         bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0);
130
131         /* Word 12 - fcp_data_len is variable */
132
133         /* Word 13, 14, 15 - PBDE is zero */
134
135         /* TRECEIVE template */
136         wqe = &lpfc_treceive_cmd_template;
137         memset(wqe, 0, sizeof(union lpfc_wqe128));
138
139         /* Word 0, 1, 2 - BDE is variable */
140
141         /* Word 3 */
142         wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
143
144         /* Word 4 - relative_offset is variable */
145
146         /* Word 5 - is zero */
147
148         /* Word 6 - ctxt_tag, xri_tag is variable */
149
150         /* Word 7 */
151         bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE);
152         bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF);
153         bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3);
154         bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI);
155         bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
156
157         /* Word 8 - abort_tag is variable */
158
159         /* Word 9  - reqtag, rcvoxid is variable */
160
161         /* Word 10 - xc is variable */
162         bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
163         bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
164         bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
165         bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
166         bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
167         bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
168
169         /* Word 11 - pbde is variable */
170         bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE);
171         bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
172         bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0);
173         bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
174         bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
175         bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1);
176
177         /* Word 12 - fcp_data_len is variable */
178
179         /* Word 13, 14, 15 - PBDE is variable */
180
181         /* TRSP template */
182         wqe = &lpfc_trsp_cmd_template;
183         memset(wqe, 0, sizeof(union lpfc_wqe128));
184
185         /* Word 0, 1, 2 - BDE is variable */
186
187         /* Word 3 - response_len is variable */
188
189         /* Word 4, 5 - is zero */
190
191         /* Word 6 - ctxt_tag, xri_tag is variable */
192
193         /* Word 7 */
194         bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
195         bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED);
196         bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3);
197         bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI);
198         bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); /* wqe_ar */
199
200         /* Word 8 - abort_tag is variable */
201
202         /* Word 9  - reqtag is variable */
203
204         /* Word 10 wqes, xc is variable */
205         bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1);
206         bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
207         bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
208         bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0);
209         bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE);
210         bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3);
211
212         /* Word 11 irsp, irsplen is variable */
213         bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP);
214         bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
215         bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0);
216         bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
217         bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
218         bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0);
219
220         /* Word 12, 13, 14, 15 - is zero */
221 }
222
223 void
224 lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
225 {
226         lockdep_assert_held(&ctxp->ctxlock);
227
228         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
229                         "6313 NVMET Defer ctx release xri x%x flg x%x\n",
230                         ctxp->oxid, ctxp->flag);
231
232         if (ctxp->flag & LPFC_NVMET_CTX_RLS)
233                 return;
234
235         ctxp->flag |= LPFC_NVMET_CTX_RLS;
236         spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
237         list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
238         spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
239 }
240
241 /**
242  * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
243  * @phba: Pointer to HBA context object.
244  * @cmdwqe: Pointer to driver command WQE object.
245  * @wcqe: Pointer to driver response CQE object.
246  *
247  * The function is called from SLI ring event handler with no
248  * lock held. This function is the completion handler for NVME LS commands
249  * The function frees memory resources used for the NVME commands.
250  **/
251 static void
252 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
253                           struct lpfc_wcqe_complete *wcqe)
254 {
255         struct lpfc_nvmet_tgtport *tgtp;
256         struct nvmefc_tgt_ls_req *rsp;
257         struct lpfc_nvmet_rcv_ctx *ctxp;
258         uint32_t status, result;
259
260         status = bf_get(lpfc_wcqe_c_status, wcqe);
261         result = wcqe->parameter;
262         ctxp = cmdwqe->context2;
263
264         if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) {
265                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
266                                 "6410 NVMET LS cmpl state mismatch IO x%x: "
267                                 "%d %d\n",
268                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
269         }
270
271         if (!phba->targetport)
272                 goto out;
273
274         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
275
276         if (tgtp) {
277                 if (status) {
278                         atomic_inc(&tgtp->xmt_ls_rsp_error);
279                         if (result == IOERR_ABORT_REQUESTED)
280                                 atomic_inc(&tgtp->xmt_ls_rsp_aborted);
281                         if (bf_get(lpfc_wcqe_c_xb, wcqe))
282                                 atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
283                 } else {
284                         atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
285                 }
286         }
287
288 out:
289         rsp = &ctxp->ctx.ls_req;
290
291         lpfc_nvmeio_data(phba, "NVMET LS  CMPL: xri x%x stat x%x result x%x\n",
292                          ctxp->oxid, status, result);
293
294         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
295                         "6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
296                         status, result, ctxp->oxid);
297
298         lpfc_nlp_put(cmdwqe->context1);
299         cmdwqe->context2 = NULL;
300         cmdwqe->context3 = NULL;
301         lpfc_sli_release_iocbq(phba, cmdwqe);
302         rsp->done(rsp);
303         kfree(ctxp);
304 }
305
306 /**
307  * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
308  * @phba: HBA buffer is associated with
309  * @ctxp: context to clean up
310  * @mp: Buffer to free
311  *
312  * Description: Frees the given DMA buffer in the appropriate way given by
313  * reposting it to its associated RQ so it can be reused.
314  *
315  * Notes: Takes phba->hbalock.  Can be called with or without other locks held.
316  *
317  * Returns: None
318  **/
319 void
320 lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
321 {
322 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
323         struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
324         struct lpfc_nvmet_tgtport *tgtp;
325         struct fc_frame_header *fc_hdr;
326         struct rqb_dmabuf *nvmebuf;
327         struct lpfc_nvmet_ctx_info *infop;
328         uint32_t *payload;
329         uint32_t size, oxid, sid;
330         int cpu;
331         unsigned long iflag;
332
333         if (ctxp->txrdy) {
334                 dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
335                               ctxp->txrdy_phys);
336                 ctxp->txrdy = NULL;
337                 ctxp->txrdy_phys = 0;
338         }
339
340         if (ctxp->state == LPFC_NVMET_STE_FREE) {
341                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
342                                 "6411 NVMET free, already free IO x%x: %d %d\n",
343                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
344         }
345
346         if (ctxp->rqb_buffer) {
347                 nvmebuf = ctxp->rqb_buffer;
348                 spin_lock_irqsave(&ctxp->ctxlock, iflag);
349                 ctxp->rqb_buffer = NULL;
350                 if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
351                         ctxp->flag &= ~LPFC_NVMET_CTX_REUSE_WQ;
352                         spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
353                         nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
354                 } else {
355                         spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
356                         lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
357                 }
358         }
359         ctxp->state = LPFC_NVMET_STE_FREE;
360
361         spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
362         if (phba->sli4_hba.nvmet_io_wait_cnt) {
363                 list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
364                                  nvmebuf, struct rqb_dmabuf,
365                                  hbuf.list);
366                 phba->sli4_hba.nvmet_io_wait_cnt--;
367                 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
368                                        iflag);
369
370                 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
371                 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
372                 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
373                 payload = (uint32_t *)(nvmebuf->dbuf.virt);
374                 size = nvmebuf->bytes_recv;
375                 sid = sli4_sid_from_fc_hdr(fc_hdr);
376
377                 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
378                 ctxp->wqeq = NULL;
379                 ctxp->txrdy = NULL;
380                 ctxp->offset = 0;
381                 ctxp->phba = phba;
382                 ctxp->size = size;
383                 ctxp->oxid = oxid;
384                 ctxp->sid = sid;
385                 ctxp->state = LPFC_NVMET_STE_RCV;
386                 ctxp->entry_cnt = 1;
387                 ctxp->flag = 0;
388                 ctxp->ctxbuf = ctx_buf;
389                 ctxp->rqb_buffer = (void *)nvmebuf;
390                 spin_lock_init(&ctxp->ctxlock);
391
392 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
393                 if (ctxp->ts_cmd_nvme) {
394                         ctxp->ts_cmd_nvme = ktime_get_ns();
395                         ctxp->ts_nvme_data = 0;
396                         ctxp->ts_data_wqput = 0;
397                         ctxp->ts_isr_data = 0;
398                         ctxp->ts_data_nvme = 0;
399                         ctxp->ts_nvme_status = 0;
400                         ctxp->ts_status_wqput = 0;
401                         ctxp->ts_isr_status = 0;
402                         ctxp->ts_status_nvme = 0;
403                 }
404 #endif
405                 atomic_inc(&tgtp->rcv_fcp_cmd_in);
406
407                 /*  flag new work queued, replacement buffer has already
408                  *  been reposted
409                  */
410                 spin_lock_irqsave(&ctxp->ctxlock, iflag);
411                 ctxp->flag |= LPFC_NVMET_CTX_REUSE_WQ;
412                 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
413
414                 if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
415                         atomic_inc(&tgtp->rcv_fcp_cmd_drop);
416                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
417                                         "6181 Unable to queue deferred work "
418                                         "for oxid x%x. "
419                                         "FCP Drop IO [x%x x%x x%x]\n",
420                                         ctxp->oxid,
421                                         atomic_read(&tgtp->rcv_fcp_cmd_in),
422                                         atomic_read(&tgtp->rcv_fcp_cmd_out),
423                                         atomic_read(&tgtp->xmt_fcp_release));
424
425                         spin_lock_irqsave(&ctxp->ctxlock, iflag);
426                         lpfc_nvmet_defer_release(phba, ctxp);
427                         spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
428                         lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
429                 }
430                 return;
431         }
432         spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
433
434         /*
435          * Use the CPU context list, from the MRQ the IO was received on
436          * (ctxp->idx), to save context structure.
437          */
438         cpu = smp_processor_id();
439         infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
440         spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
441         list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
442         infop->nvmet_ctx_list_cnt++;
443         spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
444 #endif
445 }
446
447 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
448 static void
449 lpfc_nvmet_ktime(struct lpfc_hba *phba,
450                  struct lpfc_nvmet_rcv_ctx *ctxp)
451 {
452         uint64_t seg1, seg2, seg3, seg4, seg5;
453         uint64_t seg6, seg7, seg8, seg9, seg10;
454         uint64_t segsum;
455
456         if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
457             !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
458             !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
459             !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
460             !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
461                 return;
462
463         if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
464                 return;
465         if (ctxp->ts_isr_cmd  > ctxp->ts_cmd_nvme)
466                 return;
467         if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
468                 return;
469         if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
470                 return;
471         if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
472                 return;
473         if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
474                 return;
475         if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
476                 return;
477         if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
478                 return;
479         if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
480                 return;
481         if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
482                 return;
483         /*
484          * Segment 1 - Time from FCP command received by MSI-X ISR
485          * to FCP command is passed to NVME Layer.
486          * Segment 2 - Time from FCP command payload handed
487          * off to NVME Layer to Driver receives a Command op
488          * from NVME Layer.
489          * Segment 3 - Time from Driver receives a Command op
490          * from NVME Layer to Command is put on WQ.
491          * Segment 4 - Time from Driver WQ put is done
492          * to MSI-X ISR for Command cmpl.
493          * Segment 5 - Time from MSI-X ISR for Command cmpl to
494          * Command cmpl is passed to NVME Layer.
495          * Segment 6 - Time from Command cmpl is passed to NVME
496          * Layer to Driver receives a RSP op from NVME Layer.
497          * Segment 7 - Time from Driver receives a RSP op from
498          * NVME Layer to WQ put is done on TRSP FCP Status.
499          * Segment 8 - Time from Driver WQ put is done on TRSP
500          * FCP Status to MSI-X ISR for TRSP cmpl.
501          * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
502          * TRSP cmpl is passed to NVME Layer.
503          * Segment 10 - Time from FCP command received by
504          * MSI-X ISR to command is completed on wire.
505          * (Segments 1 thru 8) for READDATA / WRITEDATA
506          * (Segments 1 thru 4) for READDATA_RSP
507          */
508         seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
509         segsum = seg1;
510
511         seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
512         if (segsum > seg2)
513                 return;
514         seg2 -= segsum;
515         segsum += seg2;
516
517         seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
518         if (segsum > seg3)
519                 return;
520         seg3 -= segsum;
521         segsum += seg3;
522
523         seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
524         if (segsum > seg4)
525                 return;
526         seg4 -= segsum;
527         segsum += seg4;
528
529         seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
530         if (segsum > seg5)
531                 return;
532         seg5 -= segsum;
533         segsum += seg5;
534
535
536         /* For auto rsp commands seg6 thru seg10 will be 0 */
537         if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
538                 seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
539                 if (segsum > seg6)
540                         return;
541                 seg6 -= segsum;
542                 segsum += seg6;
543
544                 seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
545                 if (segsum > seg7)
546                         return;
547                 seg7 -= segsum;
548                 segsum += seg7;
549
550                 seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
551                 if (segsum > seg8)
552                         return;
553                 seg8 -= segsum;
554                 segsum += seg8;
555
556                 seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
557                 if (segsum > seg9)
558                         return;
559                 seg9 -= segsum;
560                 segsum += seg9;
561
562                 if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
563                         return;
564                 seg10 = (ctxp->ts_isr_status -
565                         ctxp->ts_isr_cmd);
566         } else {
567                 if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
568                         return;
569                 seg6 =  0;
570                 seg7 =  0;
571                 seg8 =  0;
572                 seg9 =  0;
573                 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
574         }
575
576         phba->ktime_seg1_total += seg1;
577         if (seg1 < phba->ktime_seg1_min)
578                 phba->ktime_seg1_min = seg1;
579         else if (seg1 > phba->ktime_seg1_max)
580                 phba->ktime_seg1_max = seg1;
581
582         phba->ktime_seg2_total += seg2;
583         if (seg2 < phba->ktime_seg2_min)
584                 phba->ktime_seg2_min = seg2;
585         else if (seg2 > phba->ktime_seg2_max)
586                 phba->ktime_seg2_max = seg2;
587
588         phba->ktime_seg3_total += seg3;
589         if (seg3 < phba->ktime_seg3_min)
590                 phba->ktime_seg3_min = seg3;
591         else if (seg3 > phba->ktime_seg3_max)
592                 phba->ktime_seg3_max = seg3;
593
594         phba->ktime_seg4_total += seg4;
595         if (seg4 < phba->ktime_seg4_min)
596                 phba->ktime_seg4_min = seg4;
597         else if (seg4 > phba->ktime_seg4_max)
598                 phba->ktime_seg4_max = seg4;
599
600         phba->ktime_seg5_total += seg5;
601         if (seg5 < phba->ktime_seg5_min)
602                 phba->ktime_seg5_min = seg5;
603         else if (seg5 > phba->ktime_seg5_max)
604                 phba->ktime_seg5_max = seg5;
605
606         phba->ktime_data_samples++;
607         if (!seg6)
608                 goto out;
609
610         phba->ktime_seg6_total += seg6;
611         if (seg6 < phba->ktime_seg6_min)
612                 phba->ktime_seg6_min = seg6;
613         else if (seg6 > phba->ktime_seg6_max)
614                 phba->ktime_seg6_max = seg6;
615
616         phba->ktime_seg7_total += seg7;
617         if (seg7 < phba->ktime_seg7_min)
618                 phba->ktime_seg7_min = seg7;
619         else if (seg7 > phba->ktime_seg7_max)
620                 phba->ktime_seg7_max = seg7;
621
622         phba->ktime_seg8_total += seg8;
623         if (seg8 < phba->ktime_seg8_min)
624                 phba->ktime_seg8_min = seg8;
625         else if (seg8 > phba->ktime_seg8_max)
626                 phba->ktime_seg8_max = seg8;
627
628         phba->ktime_seg9_total += seg9;
629         if (seg9 < phba->ktime_seg9_min)
630                 phba->ktime_seg9_min = seg9;
631         else if (seg9 > phba->ktime_seg9_max)
632                 phba->ktime_seg9_max = seg9;
633 out:
634         phba->ktime_seg10_total += seg10;
635         if (seg10 < phba->ktime_seg10_min)
636                 phba->ktime_seg10_min = seg10;
637         else if (seg10 > phba->ktime_seg10_max)
638                 phba->ktime_seg10_max = seg10;
639         phba->ktime_status_samples++;
640 }
641 #endif
642
643 /**
644  * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
645  * @phba: Pointer to HBA context object.
646  * @cmdwqe: Pointer to driver command WQE object.
647  * @wcqe: Pointer to driver response CQE object.
648  *
649  * The function is called from SLI ring event handler with no
650  * lock held. This function is the completion handler for NVME FCP commands
651  * The function frees memory resources used for the NVME commands.
652  **/
653 static void
654 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
655                           struct lpfc_wcqe_complete *wcqe)
656 {
657         struct lpfc_nvmet_tgtport *tgtp;
658         struct nvmefc_tgt_fcp_req *rsp;
659         struct lpfc_nvmet_rcv_ctx *ctxp;
660         uint32_t status, result, op, start_clean, logerr;
661 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
662         uint32_t id;
663 #endif
664
665         ctxp = cmdwqe->context2;
666         ctxp->flag &= ~LPFC_NVMET_IO_INP;
667
668         rsp = &ctxp->ctx.fcp_req;
669         op = rsp->op;
670
671         status = bf_get(lpfc_wcqe_c_status, wcqe);
672         result = wcqe->parameter;
673
674         if (phba->targetport)
675                 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
676         else
677                 tgtp = NULL;
678
679         lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
680                          ctxp->oxid, op, status);
681
682         if (status) {
683                 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
684                 rsp->transferred_length = 0;
685                 if (tgtp) {
686                         atomic_inc(&tgtp->xmt_fcp_rsp_error);
687                         if (result == IOERR_ABORT_REQUESTED)
688                                 atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
689                 }
690
691                 logerr = LOG_NVME_IOERR;
692
693                 /* pick up SLI4 exhange busy condition */
694                 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
695                         ctxp->flag |= LPFC_NVMET_XBUSY;
696                         logerr |= LOG_NVME_ABTS;
697                         if (tgtp)
698                                 atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
699
700                 } else {
701                         ctxp->flag &= ~LPFC_NVMET_XBUSY;
702                 }
703
704                 lpfc_printf_log(phba, KERN_INFO, logerr,
705                                 "6315 IO Error Cmpl xri x%x: %x/%x XBUSY:x%x\n",
706                                 ctxp->oxid, status, result, ctxp->flag);
707
708         } else {
709                 rsp->fcp_error = NVME_SC_SUCCESS;
710                 if (op == NVMET_FCOP_RSP)
711                         rsp->transferred_length = rsp->rsplen;
712                 else
713                         rsp->transferred_length = rsp->transfer_length;
714                 if (tgtp)
715                         atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
716         }
717
718         if ((op == NVMET_FCOP_READDATA_RSP) ||
719             (op == NVMET_FCOP_RSP)) {
720                 /* Sanity check */
721                 ctxp->state = LPFC_NVMET_STE_DONE;
722                 ctxp->entry_cnt++;
723
724 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
725                 if (ctxp->ts_cmd_nvme) {
726                         if (rsp->op == NVMET_FCOP_READDATA_RSP) {
727                                 ctxp->ts_isr_data =
728                                         cmdwqe->isr_timestamp;
729                                 ctxp->ts_data_nvme =
730                                         ktime_get_ns();
731                                 ctxp->ts_nvme_status =
732                                         ctxp->ts_data_nvme;
733                                 ctxp->ts_status_wqput =
734                                         ctxp->ts_data_nvme;
735                                 ctxp->ts_isr_status =
736                                         ctxp->ts_data_nvme;
737                                 ctxp->ts_status_nvme =
738                                         ctxp->ts_data_nvme;
739                         } else {
740                                 ctxp->ts_isr_status =
741                                         cmdwqe->isr_timestamp;
742                                 ctxp->ts_status_nvme =
743                                         ktime_get_ns();
744                         }
745                 }
746 #endif
747                 rsp->done(rsp);
748 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
749                 if (ctxp->ts_cmd_nvme)
750                         lpfc_nvmet_ktime(phba, ctxp);
751 #endif
752                 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
753         } else {
754                 ctxp->entry_cnt++;
755                 start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
756                 memset(((char *)cmdwqe) + start_clean, 0,
757                        (sizeof(struct lpfc_iocbq) - start_clean));
758 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
759                 if (ctxp->ts_cmd_nvme) {
760                         ctxp->ts_isr_data = cmdwqe->isr_timestamp;
761                         ctxp->ts_data_nvme = ktime_get_ns();
762                 }
763 #endif
764                 rsp->done(rsp);
765         }
766 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
767         if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
768                 id = smp_processor_id();
769                 if (id < LPFC_CHECK_CPU_CNT) {
770                         if (ctxp->cpu != id)
771                                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
772                                                 "6704 CPU Check cmdcmpl: "
773                                                 "cpu %d expect %d\n",
774                                                 id, ctxp->cpu);
775                         phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_cmpl_io[id]++;
776                 }
777         }
778 #endif
779 }
780
781 static int
782 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
783                       struct nvmefc_tgt_ls_req *rsp)
784 {
785         struct lpfc_nvmet_rcv_ctx *ctxp =
786                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
787         struct lpfc_hba *phba = ctxp->phba;
788         struct hbq_dmabuf *nvmebuf =
789                 (struct hbq_dmabuf *)ctxp->rqb_buffer;
790         struct lpfc_iocbq *nvmewqeq;
791         struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
792         struct lpfc_dmabuf dmabuf;
793         struct ulp_bde64 bpl;
794         int rc;
795
796         if (phba->pport->load_flag & FC_UNLOADING)
797                 return -ENODEV;
798
799         if (phba->pport->load_flag & FC_UNLOADING)
800                 return -ENODEV;
801
802         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
803                         "6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
804
805         if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) ||
806             (ctxp->entry_cnt != 1)) {
807                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
808                                 "6412 NVMET LS rsp state mismatch "
809                                 "oxid x%x: %d %d\n",
810                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
811         }
812         ctxp->state = LPFC_NVMET_STE_LS_RSP;
813         ctxp->entry_cnt++;
814
815         nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
816                                       rsp->rsplen);
817         if (nvmewqeq == NULL) {
818                 atomic_inc(&nvmep->xmt_ls_drop);
819                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
820                                 "6150 LS Drop IO x%x: Prep\n",
821                                 ctxp->oxid);
822                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
823                 atomic_inc(&nvmep->xmt_ls_abort);
824                 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
825                                                 ctxp->sid, ctxp->oxid);
826                 return -ENOMEM;
827         }
828
829         /* Save numBdes for bpl2sgl */
830         nvmewqeq->rsvd2 = 1;
831         nvmewqeq->hba_wqidx = 0;
832         nvmewqeq->context3 = &dmabuf;
833         dmabuf.virt = &bpl;
834         bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
835         bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
836         bpl.tus.f.bdeSize = rsp->rsplen;
837         bpl.tus.f.bdeFlags = 0;
838         bpl.tus.w = le32_to_cpu(bpl.tus.w);
839
840         nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
841         nvmewqeq->iocb_cmpl = NULL;
842         nvmewqeq->context2 = ctxp;
843
844         lpfc_nvmeio_data(phba, "NVMET LS  RESP: xri x%x wqidx x%x len x%x\n",
845                          ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
846
847         rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
848         if (rc == WQE_SUCCESS) {
849                 /*
850                  * Okay to repost buffer here, but wait till cmpl
851                  * before freeing ctxp and iocbq.
852                  */
853                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
854                 ctxp->rqb_buffer = 0;
855                 atomic_inc(&nvmep->xmt_ls_rsp);
856                 return 0;
857         }
858         /* Give back resources */
859         atomic_inc(&nvmep->xmt_ls_drop);
860         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
861                         "6151 LS Drop IO x%x: Issue %d\n",
862                         ctxp->oxid, rc);
863
864         lpfc_nlp_put(nvmewqeq->context1);
865
866         lpfc_in_buf_free(phba, &nvmebuf->dbuf);
867         atomic_inc(&nvmep->xmt_ls_abort);
868         lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
869         return -ENXIO;
870 }
871
872 static int
873 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
874                       struct nvmefc_tgt_fcp_req *rsp)
875 {
876         struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
877         struct lpfc_nvmet_rcv_ctx *ctxp =
878                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
879         struct lpfc_hba *phba = ctxp->phba;
880         struct lpfc_queue *wq;
881         struct lpfc_iocbq *nvmewqeq;
882         struct lpfc_sli_ring *pring;
883         unsigned long iflags;
884         int rc;
885
886         if (phba->pport->load_flag & FC_UNLOADING) {
887                 rc = -ENODEV;
888                 goto aerr;
889         }
890
891         if (phba->pport->load_flag & FC_UNLOADING) {
892                 rc = -ENODEV;
893                 goto aerr;
894         }
895
896 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
897         if (ctxp->ts_cmd_nvme) {
898                 if (rsp->op == NVMET_FCOP_RSP)
899                         ctxp->ts_nvme_status = ktime_get_ns();
900                 else
901                         ctxp->ts_nvme_data = ktime_get_ns();
902         }
903
904         /* Setup the hdw queue if not already set */
905         if (!ctxp->hdwq)
906                 ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
907
908         if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
909                 int id = smp_processor_id();
910                 if (id < LPFC_CHECK_CPU_CNT) {
911                         if (rsp->hwqid != id)
912                                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
913                                                 "6705 CPU Check OP: "
914                                                 "cpu %d expect %d\n",
915                                                 id, rsp->hwqid);
916                         phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_xmt_io[id]++;
917                 }
918                 ctxp->cpu = id; /* Setup cpu for cmpl check */
919         }
920 #endif
921
922         /* Sanity check */
923         if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) ||
924             (ctxp->state == LPFC_NVMET_STE_ABORT)) {
925                 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
926                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
927                                 "6102 IO xri x%x aborted\n",
928                                 ctxp->oxid);
929                 rc = -ENXIO;
930                 goto aerr;
931         }
932
933         nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
934         if (nvmewqeq == NULL) {
935                 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
936                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
937                                 "6152 FCP Drop IO x%x: Prep\n",
938                                 ctxp->oxid);
939                 rc = -ENXIO;
940                 goto aerr;
941         }
942
943         nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
944         nvmewqeq->iocb_cmpl = NULL;
945         nvmewqeq->context2 = ctxp;
946         nvmewqeq->iocb_flag |=  LPFC_IO_NVMET;
947         ctxp->wqeq->hba_wqidx = rsp->hwqid;
948
949         lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
950                          ctxp->oxid, rsp->op, rsp->rsplen);
951
952         ctxp->flag |= LPFC_NVMET_IO_INP;
953         rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
954         if (rc == WQE_SUCCESS) {
955 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
956                 if (!ctxp->ts_cmd_nvme)
957                         return 0;
958                 if (rsp->op == NVMET_FCOP_RSP)
959                         ctxp->ts_status_wqput = ktime_get_ns();
960                 else
961                         ctxp->ts_data_wqput = ktime_get_ns();
962 #endif
963                 return 0;
964         }
965
966         if (rc == -EBUSY) {
967                 /*
968                  * WQ was full, so queue nvmewqeq to be sent after
969                  * WQE release CQE
970                  */
971                 ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
972                 wq = ctxp->hdwq->nvme_wq;
973                 pring = wq->pring;
974                 spin_lock_irqsave(&pring->ring_lock, iflags);
975                 list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
976                 wq->q_flag |= HBA_NVMET_WQFULL;
977                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
978                 atomic_inc(&lpfc_nvmep->defer_wqfull);
979                 return 0;
980         }
981
982         /* Give back resources */
983         atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
984         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
985                         "6153 FCP Drop IO x%x: Issue: %d\n",
986                         ctxp->oxid, rc);
987
988         ctxp->wqeq->hba_wqidx = 0;
989         nvmewqeq->context2 = NULL;
990         nvmewqeq->context3 = NULL;
991         rc = -EBUSY;
992 aerr:
993         return rc;
994 }
995
996 static void
997 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
998 {
999         struct lpfc_nvmet_tgtport *tport = targetport->private;
1000
1001         /* release any threads waiting for the unreg to complete */
1002         if (tport->phba->targetport)
1003                 complete(tport->tport_unreg_cmp);
1004 }
1005
1006 static void
1007 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
1008                          struct nvmefc_tgt_fcp_req *req)
1009 {
1010         struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1011         struct lpfc_nvmet_rcv_ctx *ctxp =
1012                 container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1013         struct lpfc_hba *phba = ctxp->phba;
1014         struct lpfc_queue *wq;
1015         unsigned long flags;
1016
1017         if (phba->pport->load_flag & FC_UNLOADING)
1018                 return;
1019
1020         if (phba->pport->load_flag & FC_UNLOADING)
1021                 return;
1022
1023         if (!ctxp->hdwq)
1024                 ctxp->hdwq = &phba->sli4_hba.hdwq[0];
1025
1026         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1027                         "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
1028                         ctxp->oxid, ctxp->flag, ctxp->state);
1029
1030         lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
1031                          ctxp->oxid, ctxp->flag, ctxp->state);
1032
1033         atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
1034
1035         spin_lock_irqsave(&ctxp->ctxlock, flags);
1036
1037         /* Since iaab/iaar are NOT set, we need to check
1038          * if the firmware is in process of aborting IO
1039          */
1040         if (ctxp->flag & LPFC_NVMET_XBUSY) {
1041                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1042                 return;
1043         }
1044         ctxp->flag |= LPFC_NVMET_ABORT_OP;
1045
1046         if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) {
1047                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1048                 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1049                                                  ctxp->oxid);
1050                 wq = ctxp->hdwq->nvme_wq;
1051                 lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
1052                 return;
1053         }
1054         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1055
1056         /* An state of LPFC_NVMET_STE_RCV means we have just received
1057          * the NVME command and have not started processing it.
1058          * (by issuing any IO WQEs on this exchange yet)
1059          */
1060         if (ctxp->state == LPFC_NVMET_STE_RCV)
1061                 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1062                                                  ctxp->oxid);
1063         else
1064                 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1065                                                ctxp->oxid);
1066 }
1067
1068 static void
1069 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
1070                            struct nvmefc_tgt_fcp_req *rsp)
1071 {
1072         struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1073         struct lpfc_nvmet_rcv_ctx *ctxp =
1074                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1075         struct lpfc_hba *phba = ctxp->phba;
1076         unsigned long flags;
1077         bool aborting = false;
1078
1079         spin_lock_irqsave(&ctxp->ctxlock, flags);
1080         if (ctxp->flag & LPFC_NVMET_XBUSY)
1081                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1082                                 "6027 NVMET release with XBUSY flag x%x"
1083                                 " oxid x%x\n",
1084                                 ctxp->flag, ctxp->oxid);
1085         else if (ctxp->state != LPFC_NVMET_STE_DONE &&
1086                  ctxp->state != LPFC_NVMET_STE_ABORT)
1087                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1088                                 "6413 NVMET release bad state %d %d oxid x%x\n",
1089                                 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1090
1091         if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
1092             (ctxp->flag & LPFC_NVMET_XBUSY)) {
1093                 aborting = true;
1094                 /* let the abort path do the real release */
1095                 lpfc_nvmet_defer_release(phba, ctxp);
1096         }
1097         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1098
1099         lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
1100                          ctxp->state, aborting);
1101
1102         atomic_inc(&lpfc_nvmep->xmt_fcp_release);
1103
1104         if (aborting)
1105                 return;
1106
1107         lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1108 }
1109
1110 static void
1111 lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
1112                      struct nvmefc_tgt_fcp_req *rsp)
1113 {
1114         struct lpfc_nvmet_tgtport *tgtp;
1115         struct lpfc_nvmet_rcv_ctx *ctxp =
1116                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1117         struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
1118         struct lpfc_hba *phba = ctxp->phba;
1119         unsigned long iflag;
1120
1121
1122         lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
1123                          ctxp->oxid, ctxp->size, smp_processor_id());
1124
1125         if (!nvmebuf) {
1126                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1127                                 "6425 Defer rcv: no buffer xri x%x: "
1128                                 "flg %x ste %x\n",
1129                                 ctxp->oxid, ctxp->flag, ctxp->state);
1130                 return;
1131         }
1132
1133         tgtp = phba->targetport->private;
1134         if (tgtp)
1135                 atomic_inc(&tgtp->rcv_fcp_cmd_defer);
1136
1137         /* Free the nvmebuf since a new buffer already replaced it */
1138         nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1139         spin_lock_irqsave(&ctxp->ctxlock, iflag);
1140         ctxp->rqb_buffer = NULL;
1141         spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1142 }
1143
1144 static struct nvmet_fc_target_template lpfc_tgttemplate = {
1145         .targetport_delete = lpfc_nvmet_targetport_delete,
1146         .xmt_ls_rsp     = lpfc_nvmet_xmt_ls_rsp,
1147         .fcp_op         = lpfc_nvmet_xmt_fcp_op,
1148         .fcp_abort      = lpfc_nvmet_xmt_fcp_abort,
1149         .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
1150         .defer_rcv      = lpfc_nvmet_defer_rcv,
1151
1152         .max_hw_queues  = 1,
1153         .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1154         .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1155         .dma_boundary = 0xFFFFFFFF,
1156
1157         /* optional features */
1158         .target_features = 0,
1159         /* sizes of additional private data for data structures */
1160         .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
1161 };
1162
1163 static void
1164 __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
1165                 struct lpfc_nvmet_ctx_info *infop)
1166 {
1167         struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
1168         unsigned long flags;
1169
1170         spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
1171         list_for_each_entry_safe(ctx_buf, next_ctx_buf,
1172                                 &infop->nvmet_ctx_list, list) {
1173                 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1174                 list_del_init(&ctx_buf->list);
1175                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1176
1177                 __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
1178                 ctx_buf->sglq->state = SGL_FREED;
1179                 ctx_buf->sglq->ndlp = NULL;
1180
1181                 spin_lock(&phba->sli4_hba.sgl_list_lock);
1182                 list_add_tail(&ctx_buf->sglq->list,
1183                                 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1184                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1185
1186                 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1187                 kfree(ctx_buf->context);
1188         }
1189         spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
1190 }
1191
1192 static void
1193 lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
1194 {
1195         struct lpfc_nvmet_ctx_info *infop;
1196         int i, j;
1197
1198         /* The first context list, MRQ 0 CPU 0 */
1199         infop = phba->sli4_hba.nvmet_ctx_info;
1200         if (!infop)
1201                 return;
1202
1203         /* Cycle the the entire CPU context list for every MRQ */
1204         for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
1205                 for_each_present_cpu(j) {
1206                         infop = lpfc_get_ctx_list(phba, j, i);
1207                         __lpfc_nvmet_clean_io_for_cpu(phba, infop);
1208                 }
1209         }
1210         kfree(phba->sli4_hba.nvmet_ctx_info);
1211         phba->sli4_hba.nvmet_ctx_info = NULL;
1212 }
1213
1214 static int
1215 lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1216 {
1217         struct lpfc_nvmet_ctxbuf *ctx_buf;
1218         struct lpfc_iocbq *nvmewqe;
1219         union lpfc_wqe128 *wqe;
1220         struct lpfc_nvmet_ctx_info *last_infop;
1221         struct lpfc_nvmet_ctx_info *infop;
1222         int i, j, idx, cpu;
1223
1224         lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1225                         "6403 Allocate NVMET resources for %d XRIs\n",
1226                         phba->sli4_hba.nvmet_xri_cnt);
1227
1228         phba->sli4_hba.nvmet_ctx_info = kcalloc(
1229                 phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq,
1230                 sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
1231         if (!phba->sli4_hba.nvmet_ctx_info) {
1232                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1233                                 "6419 Failed allocate memory for "
1234                                 "nvmet context lists\n");
1235                 return -ENOMEM;
1236         }
1237
1238         /*
1239          * Assuming X CPUs in the system, and Y MRQs, allocate some
1240          * lpfc_nvmet_ctx_info structures as follows:
1241          *
1242          * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
1243          * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
1244          * ...
1245          * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
1246          *
1247          * Each line represents a MRQ "silo" containing an entry for
1248          * every CPU.
1249          *
1250          * MRQ X is initially assumed to be associated with CPU X, thus
1251          * contexts are initially distributed across all MRQs using
1252          * the MRQ index (N) as follows cpuN/mrqN. When contexts are
1253          * freed, the are freed to the MRQ silo based on the CPU number
1254          * of the IO completion. Thus a context that was allocated for MRQ A
1255          * whose IO completed on CPU B will be freed to cpuB/mrqA.
1256          */
1257         for_each_possible_cpu(i) {
1258                 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1259                         infop = lpfc_get_ctx_list(phba, i, j);
1260                         INIT_LIST_HEAD(&infop->nvmet_ctx_list);
1261                         spin_lock_init(&infop->nvmet_ctx_list_lock);
1262                         infop->nvmet_ctx_list_cnt = 0;
1263                 }
1264         }
1265
1266         /*
1267          * Setup the next CPU context info ptr for each MRQ.
1268          * MRQ 0 will cycle thru CPUs 0 - X separately from
1269          * MRQ 1 cycling thru CPUs 0 - X, and so on.
1270          */
1271         for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1272                 last_infop = lpfc_get_ctx_list(phba,
1273                                                cpumask_first(cpu_present_mask),
1274                                                j);
1275                 for (i = phba->sli4_hba.num_possible_cpu - 1;  i >= 0; i--) {
1276                         infop = lpfc_get_ctx_list(phba, i, j);
1277                         infop->nvmet_ctx_next_cpu = last_infop;
1278                         last_infop = infop;
1279                 }
1280         }
1281
1282         /* For all nvmet xris, allocate resources needed to process a
1283          * received command on a per xri basis.
1284          */
1285         idx = 0;
1286         cpu = cpumask_first(cpu_present_mask);
1287         for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
1288                 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
1289                 if (!ctx_buf) {
1290                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1291                                         "6404 Ran out of memory for NVMET\n");
1292                         return -ENOMEM;
1293                 }
1294
1295                 ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
1296                                            GFP_KERNEL);
1297                 if (!ctx_buf->context) {
1298                         kfree(ctx_buf);
1299                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1300                                         "6405 Ran out of NVMET "
1301                                         "context memory\n");
1302                         return -ENOMEM;
1303                 }
1304                 ctx_buf->context->ctxbuf = ctx_buf;
1305                 ctx_buf->context->state = LPFC_NVMET_STE_FREE;
1306
1307                 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
1308                 if (!ctx_buf->iocbq) {
1309                         kfree(ctx_buf->context);
1310                         kfree(ctx_buf);
1311                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1312                                         "6406 Ran out of NVMET iocb/WQEs\n");
1313                         return -ENOMEM;
1314                 }
1315                 ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
1316                 nvmewqe = ctx_buf->iocbq;
1317                 wqe = &nvmewqe->wqe;
1318
1319                 /* Initialize WQE */
1320                 memset(wqe, 0, sizeof(union lpfc_wqe));
1321
1322                 ctx_buf->iocbq->context1 = NULL;
1323                 spin_lock(&phba->sli4_hba.sgl_list_lock);
1324                 ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
1325                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1326                 if (!ctx_buf->sglq) {
1327                         lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1328                         kfree(ctx_buf->context);
1329                         kfree(ctx_buf);
1330                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1331                                         "6407 Ran out of NVMET XRIs\n");
1332                         return -ENOMEM;
1333                 }
1334                 INIT_WORK(&ctx_buf->defer_work, lpfc_nvmet_fcp_rqst_defer_work);
1335
1336                 /*
1337                  * Add ctx to MRQidx context list. Our initial assumption
1338                  * is MRQidx will be associated with CPUidx. This association
1339                  * can change on the fly.
1340                  */
1341                 infop = lpfc_get_ctx_list(phba, cpu, idx);
1342                 spin_lock(&infop->nvmet_ctx_list_lock);
1343                 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
1344                 infop->nvmet_ctx_list_cnt++;
1345                 spin_unlock(&infop->nvmet_ctx_list_lock);
1346
1347                 /* Spread ctx structures evenly across all MRQs */
1348                 idx++;
1349                 if (idx >= phba->cfg_nvmet_mrq) {
1350                         idx = 0;
1351                         cpu = cpumask_first(cpu_present_mask);
1352                         continue;
1353                 }
1354                 cpu = cpumask_next(cpu, cpu_present_mask);
1355                 if (cpu == nr_cpu_ids)
1356                         cpu = cpumask_first(cpu_present_mask);
1357
1358         }
1359
1360         for_each_present_cpu(i) {
1361                 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1362                         infop = lpfc_get_ctx_list(phba, i, j);
1363                         lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1364                                         "6408 TOTAL NVMET ctx for CPU %d "
1365                                         "MRQ %d: cnt %d nextcpu %p\n",
1366                                         i, j, infop->nvmet_ctx_list_cnt,
1367                                         infop->nvmet_ctx_next_cpu);
1368                 }
1369         }
1370         return 0;
1371 }
1372
1373 int
1374 lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1375 {
1376         struct lpfc_vport  *vport = phba->pport;
1377         struct lpfc_nvmet_tgtport *tgtp;
1378         struct nvmet_fc_port_info pinfo;
1379         int error;
1380
1381         if (phba->targetport)
1382                 return 0;
1383
1384         error = lpfc_nvmet_setup_io_context(phba);
1385         if (error)
1386                 return error;
1387
1388         memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
1389         pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
1390         pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
1391         pinfo.port_id = vport->fc_myDID;
1392
1393         /* We need to tell the transport layer + 1 because it takes page
1394          * alignment into account. When space for the SGL is allocated we
1395          * allocate + 3, one for cmd, one for rsp and one for this alignment
1396          */
1397         lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
1398         lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue;
1399         lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
1400
1401 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1402         error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
1403                                              &phba->pcidev->dev,
1404                                              &phba->targetport);
1405 #else
1406         error = -ENOENT;
1407 #endif
1408         if (error) {
1409                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1410                                 "6025 Cannot register NVME targetport x%x: "
1411                                 "portnm %llx nodenm %llx segs %d qs %d\n",
1412                                 error,
1413                                 pinfo.port_name, pinfo.node_name,
1414                                 lpfc_tgttemplate.max_sgl_segments,
1415                                 lpfc_tgttemplate.max_hw_queues);
1416                 phba->targetport = NULL;
1417                 phba->nvmet_support = 0;
1418
1419                 lpfc_nvmet_cleanup_io_context(phba);
1420
1421         } else {
1422                 tgtp = (struct lpfc_nvmet_tgtport *)
1423                         phba->targetport->private;
1424                 tgtp->phba = phba;
1425
1426                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1427                                 "6026 Registered NVME "
1428                                 "targetport: %p, private %p "
1429                                 "portnm %llx nodenm %llx segs %d qs %d\n",
1430                                 phba->targetport, tgtp,
1431                                 pinfo.port_name, pinfo.node_name,
1432                                 lpfc_tgttemplate.max_sgl_segments,
1433                                 lpfc_tgttemplate.max_hw_queues);
1434
1435                 atomic_set(&tgtp->rcv_ls_req_in, 0);
1436                 atomic_set(&tgtp->rcv_ls_req_out, 0);
1437                 atomic_set(&tgtp->rcv_ls_req_drop, 0);
1438                 atomic_set(&tgtp->xmt_ls_abort, 0);
1439                 atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
1440                 atomic_set(&tgtp->xmt_ls_rsp, 0);
1441                 atomic_set(&tgtp->xmt_ls_drop, 0);
1442                 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
1443                 atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
1444                 atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
1445                 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
1446                 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1447                 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1448                 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
1449                 atomic_set(&tgtp->xmt_fcp_drop, 0);
1450                 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1451                 atomic_set(&tgtp->xmt_fcp_read, 0);
1452                 atomic_set(&tgtp->xmt_fcp_write, 0);
1453                 atomic_set(&tgtp->xmt_fcp_rsp, 0);
1454                 atomic_set(&tgtp->xmt_fcp_release, 0);
1455                 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1456                 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1457                 atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
1458                 atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
1459                 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1460                 atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
1461                 atomic_set(&tgtp->xmt_fcp_abort, 0);
1462                 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1463                 atomic_set(&tgtp->xmt_abort_unsol, 0);
1464                 atomic_set(&tgtp->xmt_abort_sol, 0);
1465                 atomic_set(&tgtp->xmt_abort_rsp, 0);
1466                 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
1467                 atomic_set(&tgtp->defer_ctx, 0);
1468                 atomic_set(&tgtp->defer_fod, 0);
1469                 atomic_set(&tgtp->defer_wqfull, 0);
1470         }
1471         return error;
1472 }
1473
1474 int
1475 lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
1476 {
1477         struct lpfc_vport  *vport = phba->pport;
1478
1479         if (!phba->targetport)
1480                 return 0;
1481
1482         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1483                          "6007 Update NVMET port %p did x%x\n",
1484                          phba->targetport, vport->fc_myDID);
1485
1486         phba->targetport->port_id = vport->fc_myDID;
1487         return 0;
1488 }
1489
1490 /**
1491  * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
1492  * @phba: pointer to lpfc hba data structure.
1493  * @axri: pointer to the nvmet xri abort wcqe structure.
1494  *
1495  * This routine is invoked by the worker thread to process a SLI4 fast-path
1496  * NVMET aborted xri.
1497  **/
1498 void
1499 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1500                             struct sli4_wcqe_xri_aborted *axri)
1501 {
1502         uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
1503         uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1504         struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1505         struct lpfc_nvmet_tgtport *tgtp;
1506         struct lpfc_nodelist *ndlp;
1507         unsigned long iflag = 0;
1508         int rrq_empty = 0;
1509         bool released = false;
1510
1511         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1512                         "6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
1513
1514         if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1515                 return;
1516
1517         if (phba->targetport) {
1518                 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1519                 atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
1520         }
1521
1522         spin_lock_irqsave(&phba->hbalock, iflag);
1523         spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1524         list_for_each_entry_safe(ctxp, next_ctxp,
1525                                  &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1526                                  list) {
1527                 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1528                         continue;
1529
1530                 spin_lock(&ctxp->ctxlock);
1531                 /* Check if we already received a free context call
1532                  * and we have completed processing an abort situation.
1533                  */
1534                 if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
1535                     !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
1536                         list_del(&ctxp->list);
1537                         released = true;
1538                 }
1539                 ctxp->flag &= ~LPFC_NVMET_XBUSY;
1540                 spin_unlock(&ctxp->ctxlock);
1541                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1542
1543                 rrq_empty = list_empty(&phba->active_rrq_list);
1544                 spin_unlock_irqrestore(&phba->hbalock, iflag);
1545                 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1546                 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1547                     (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
1548                      ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
1549                         lpfc_set_rrq_active(phba, ndlp,
1550                                 ctxp->ctxbuf->sglq->sli4_lxritag,
1551                                 rxid, 1);
1552                         lpfc_sli4_abts_err_handler(phba, ndlp, axri);
1553                 }
1554
1555                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1556                                 "6318 XB aborted oxid %x flg x%x (%x)\n",
1557                                 ctxp->oxid, ctxp->flag, released);
1558                 if (released)
1559                         lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1560
1561                 if (rrq_empty)
1562                         lpfc_worker_wake_up(phba);
1563                 return;
1564         }
1565         spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1566         spin_unlock_irqrestore(&phba->hbalock, iflag);
1567 }
1568
1569 int
1570 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
1571                            struct fc_frame_header *fc_hdr)
1572 {
1573 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1574         struct lpfc_hba *phba = vport->phba;
1575         struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1576         struct nvmefc_tgt_fcp_req *rsp;
1577         uint16_t xri;
1578         unsigned long iflag = 0;
1579
1580         xri = be16_to_cpu(fc_hdr->fh_ox_id);
1581
1582         spin_lock_irqsave(&phba->hbalock, iflag);
1583         spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1584         list_for_each_entry_safe(ctxp, next_ctxp,
1585                                  &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1586                                  list) {
1587                 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1588                         continue;
1589
1590                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1591                 spin_unlock_irqrestore(&phba->hbalock, iflag);
1592
1593                 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1594                 ctxp->flag |= LPFC_NVMET_ABTS_RCV;
1595                 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1596
1597                 lpfc_nvmeio_data(phba,
1598                         "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1599                         xri, smp_processor_id(), 0);
1600
1601                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1602                                 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
1603
1604                 rsp = &ctxp->ctx.fcp_req;
1605                 nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
1606
1607                 /* Respond with BA_ACC accordingly */
1608                 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1609                 return 0;
1610         }
1611         spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1612         spin_unlock_irqrestore(&phba->hbalock, iflag);
1613
1614         lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1615                          xri, smp_processor_id(), 1);
1616
1617         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1618                         "6320 NVMET Rcv ABTS:rjt xri x%x\n", xri);
1619
1620         /* Respond with BA_RJT accordingly */
1621         lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
1622 #endif
1623         return 0;
1624 }
1625
1626 static void
1627 lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
1628                         struct lpfc_nvmet_rcv_ctx *ctxp)
1629 {
1630         struct lpfc_sli_ring *pring;
1631         struct lpfc_iocbq *nvmewqeq;
1632         struct lpfc_iocbq *next_nvmewqeq;
1633         unsigned long iflags;
1634         struct lpfc_wcqe_complete wcqe;
1635         struct lpfc_wcqe_complete *wcqep;
1636
1637         pring = wq->pring;
1638         wcqep = &wcqe;
1639
1640         /* Fake an ABORT error code back to cmpl routine */
1641         memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete));
1642         bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT);
1643         wcqep->parameter = IOERR_ABORT_REQUESTED;
1644
1645         spin_lock_irqsave(&pring->ring_lock, iflags);
1646         list_for_each_entry_safe(nvmewqeq, next_nvmewqeq,
1647                                  &wq->wqfull_list, list) {
1648                 if (ctxp) {
1649                         /* Checking for a specific IO to flush */
1650                         if (nvmewqeq->context2 == ctxp) {
1651                                 list_del(&nvmewqeq->list);
1652                                 spin_unlock_irqrestore(&pring->ring_lock,
1653                                                        iflags);
1654                                 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
1655                                                           wcqep);
1656                                 return;
1657                         }
1658                         continue;
1659                 } else {
1660                         /* Flush all IOs */
1661                         list_del(&nvmewqeq->list);
1662                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
1663                         lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep);
1664                         spin_lock_irqsave(&pring->ring_lock, iflags);
1665                 }
1666         }
1667         if (!ctxp)
1668                 wq->q_flag &= ~HBA_NVMET_WQFULL;
1669         spin_unlock_irqrestore(&pring->ring_lock, iflags);
1670 }
1671
1672 void
1673 lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
1674                           struct lpfc_queue *wq)
1675 {
1676 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1677         struct lpfc_sli_ring *pring;
1678         struct lpfc_iocbq *nvmewqeq;
1679         struct lpfc_nvmet_rcv_ctx *ctxp;
1680         unsigned long iflags;
1681         int rc;
1682
1683         /*
1684          * Some WQE slots are available, so try to re-issue anything
1685          * on the WQ wqfull_list.
1686          */
1687         pring = wq->pring;
1688         spin_lock_irqsave(&pring->ring_lock, iflags);
1689         while (!list_empty(&wq->wqfull_list)) {
1690                 list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
1691                                  list);
1692                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1693                 ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmewqeq->context2;
1694                 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
1695                 spin_lock_irqsave(&pring->ring_lock, iflags);
1696                 if (rc == -EBUSY) {
1697                         /* WQ was full again, so put it back on the list */
1698                         list_add(&nvmewqeq->list, &wq->wqfull_list);
1699                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
1700                         return;
1701                 }
1702         }
1703         wq->q_flag &= ~HBA_NVMET_WQFULL;
1704         spin_unlock_irqrestore(&pring->ring_lock, iflags);
1705
1706 #endif
1707 }
1708
1709 void
1710 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
1711 {
1712 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1713         struct lpfc_nvmet_tgtport *tgtp;
1714         struct lpfc_queue *wq;
1715         uint32_t qidx;
1716         DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
1717
1718         if (phba->nvmet_support == 0)
1719                 return;
1720         if (phba->targetport) {
1721                 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1722                 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
1723                         wq = phba->sli4_hba.hdwq[qidx].nvme_wq;
1724                         lpfc_nvmet_wqfull_flush(phba, wq, NULL);
1725                 }
1726                 tgtp->tport_unreg_cmp = &tport_unreg_cmp;
1727                 nvmet_fc_unregister_targetport(phba->targetport);
1728                 wait_for_completion_timeout(&tport_unreg_cmp, 5);
1729                 lpfc_nvmet_cleanup_io_context(phba);
1730         }
1731         phba->targetport = NULL;
1732 #endif
1733 }
1734
1735 /**
1736  * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
1737  * @phba: pointer to lpfc hba data structure.
1738  * @pring: pointer to a SLI ring.
1739  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1740  *
1741  * This routine is used for processing the WQE associated with a unsolicited
1742  * event. It first determines whether there is an existing ndlp that matches
1743  * the DID from the unsolicited WQE. If not, it will create a new one with
1744  * the DID from the unsolicited WQE. The ELS command from the unsolicited
1745  * WQE is then used to invoke the proper routine and to set up proper state
1746  * of the discovery state machine.
1747  **/
1748 static void
1749 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1750                            struct hbq_dmabuf *nvmebuf)
1751 {
1752 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1753         struct lpfc_nvmet_tgtport *tgtp;
1754         struct fc_frame_header *fc_hdr;
1755         struct lpfc_nvmet_rcv_ctx *ctxp;
1756         uint32_t *payload;
1757         uint32_t size, oxid, sid, rc;
1758
1759         fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1760         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1761
1762         if (!phba->targetport) {
1763                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1764                                 "6154 LS Drop IO x%x\n", oxid);
1765                 oxid = 0;
1766                 size = 0;
1767                 sid = 0;
1768                 ctxp = NULL;
1769                 goto dropit;
1770         }
1771
1772         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1773         payload = (uint32_t *)(nvmebuf->dbuf.virt);
1774         size = bf_get(lpfc_rcqe_length,  &nvmebuf->cq_event.cqe.rcqe_cmpl);
1775         sid = sli4_sid_from_fc_hdr(fc_hdr);
1776
1777         ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
1778         if (ctxp == NULL) {
1779                 atomic_inc(&tgtp->rcv_ls_req_drop);
1780                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1781                                 "6155 LS Drop IO x%x: Alloc\n",
1782                                 oxid);
1783 dropit:
1784                 lpfc_nvmeio_data(phba, "NVMET LS  DROP: "
1785                                  "xri x%x sz %d from %06x\n",
1786                                  oxid, size, sid);
1787                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1788                 return;
1789         }
1790         ctxp->phba = phba;
1791         ctxp->size = size;
1792         ctxp->oxid = oxid;
1793         ctxp->sid = sid;
1794         ctxp->wqeq = NULL;
1795         ctxp->state = LPFC_NVMET_STE_LS_RCV;
1796         ctxp->entry_cnt = 1;
1797         ctxp->rqb_buffer = (void *)nvmebuf;
1798         ctxp->hdwq = &phba->sli4_hba.hdwq[0];
1799
1800         lpfc_nvmeio_data(phba, "NVMET LS   RCV: xri x%x sz %d from %06x\n",
1801                          oxid, size, sid);
1802         /*
1803          * The calling sequence should be:
1804          * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
1805          * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
1806          */
1807         atomic_inc(&tgtp->rcv_ls_req_in);
1808         rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
1809                                  payload, size);
1810
1811         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1812                         "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
1813                         "%08x %08x %08x\n", size, rc,
1814                         *payload, *(payload+1), *(payload+2),
1815                         *(payload+3), *(payload+4), *(payload+5));
1816
1817         if (rc == 0) {
1818                 atomic_inc(&tgtp->rcv_ls_req_out);
1819                 return;
1820         }
1821
1822         lpfc_nvmeio_data(phba, "NVMET LS  DROP: xri x%x sz %d from %06x\n",
1823                          oxid, size, sid);
1824
1825         atomic_inc(&tgtp->rcv_ls_req_drop);
1826         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1827                         "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
1828                         ctxp->oxid, rc);
1829
1830         /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
1831         lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1832
1833         atomic_inc(&tgtp->xmt_ls_abort);
1834         lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
1835 #endif
1836 }
1837
1838 static void
1839 lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
1840 {
1841 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1842         struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
1843         struct lpfc_hba *phba = ctxp->phba;
1844         struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
1845         struct lpfc_nvmet_tgtport *tgtp;
1846         uint32_t *payload;
1847         uint32_t rc;
1848         unsigned long iflags;
1849
1850         if (!nvmebuf) {
1851                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1852                         "6159 process_rcv_fcp_req, nvmebuf is NULL, "
1853                         "oxid: x%x flg: x%x state: x%x\n",
1854                         ctxp->oxid, ctxp->flag, ctxp->state);
1855                 spin_lock_irqsave(&ctxp->ctxlock, iflags);
1856                 lpfc_nvmet_defer_release(phba, ctxp);
1857                 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
1858                 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1859                                                  ctxp->oxid);
1860                 return;
1861         }
1862
1863         payload = (uint32_t *)(nvmebuf->dbuf.virt);
1864         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1865         /*
1866          * The calling sequence should be:
1867          * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
1868          * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
1869          * When we return from nvmet_fc_rcv_fcp_req, all relevant info
1870          * the NVME command / FC header is stored.
1871          * A buffer has already been reposted for this IO, so just free
1872          * the nvmebuf.
1873          */
1874         rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
1875                                   payload, ctxp->size);
1876         /* Process FCP command */
1877         if (rc == 0) {
1878                 atomic_inc(&tgtp->rcv_fcp_cmd_out);
1879                 return;
1880         }
1881
1882         /* Processing of FCP command is deferred */
1883         if (rc == -EOVERFLOW) {
1884                 lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d "
1885                                  "from %06x\n",
1886                                  ctxp->oxid, ctxp->size, ctxp->sid);
1887                 atomic_inc(&tgtp->rcv_fcp_cmd_out);
1888                 atomic_inc(&tgtp->defer_fod);
1889                 return;
1890         }
1891         atomic_inc(&tgtp->rcv_fcp_cmd_drop);
1892         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1893                         "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
1894                         ctxp->oxid, rc,
1895                         atomic_read(&tgtp->rcv_fcp_cmd_in),
1896                         atomic_read(&tgtp->rcv_fcp_cmd_out),
1897                         atomic_read(&tgtp->xmt_fcp_release));
1898         lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
1899                          ctxp->oxid, ctxp->size, ctxp->sid);
1900         spin_lock_irqsave(&ctxp->ctxlock, iflags);
1901         lpfc_nvmet_defer_release(phba, ctxp);
1902         spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
1903         lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
1904 #endif
1905 }
1906
1907 static void
1908 lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work)
1909 {
1910 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1911         struct lpfc_nvmet_ctxbuf *ctx_buf =
1912                 container_of(work, struct lpfc_nvmet_ctxbuf, defer_work);
1913
1914         lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
1915 #endif
1916 }
1917
1918 static struct lpfc_nvmet_ctxbuf *
1919 lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
1920                              struct lpfc_nvmet_ctx_info *current_infop)
1921 {
1922 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1923         struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
1924         struct lpfc_nvmet_ctx_info *get_infop;
1925         int i;
1926
1927         /*
1928          * The current_infop for the MRQ a NVME command IU was received
1929          * on is empty. Our goal is to replenish this MRQs context
1930          * list from a another CPUs.
1931          *
1932          * First we need to pick a context list to start looking on.
1933          * nvmet_ctx_start_cpu has available context the last time
1934          * we needed to replenish this CPU where nvmet_ctx_next_cpu
1935          * is just the next sequential CPU for this MRQ.
1936          */
1937         if (current_infop->nvmet_ctx_start_cpu)
1938                 get_infop = current_infop->nvmet_ctx_start_cpu;
1939         else
1940                 get_infop = current_infop->nvmet_ctx_next_cpu;
1941
1942         for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) {
1943                 if (get_infop == current_infop) {
1944                         get_infop = get_infop->nvmet_ctx_next_cpu;
1945                         continue;
1946                 }
1947                 spin_lock(&get_infop->nvmet_ctx_list_lock);
1948
1949                 /* Just take the entire context list, if there are any */
1950                 if (get_infop->nvmet_ctx_list_cnt) {
1951                         list_splice_init(&get_infop->nvmet_ctx_list,
1952                                     &current_infop->nvmet_ctx_list);
1953                         current_infop->nvmet_ctx_list_cnt =
1954                                 get_infop->nvmet_ctx_list_cnt - 1;
1955                         get_infop->nvmet_ctx_list_cnt = 0;
1956                         spin_unlock(&get_infop->nvmet_ctx_list_lock);
1957
1958                         current_infop->nvmet_ctx_start_cpu = get_infop;
1959                         list_remove_head(&current_infop->nvmet_ctx_list,
1960                                          ctx_buf, struct lpfc_nvmet_ctxbuf,
1961                                          list);
1962                         return ctx_buf;
1963                 }
1964
1965                 /* Otherwise, move on to the next CPU for this MRQ */
1966                 spin_unlock(&get_infop->nvmet_ctx_list_lock);
1967                 get_infop = get_infop->nvmet_ctx_next_cpu;
1968         }
1969
1970 #endif
1971         /* Nothing found, all contexts for the MRQ are in-flight */
1972         return NULL;
1973 }
1974
1975 /**
1976  * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
1977  * @phba: pointer to lpfc hba data structure.
1978  * @idx: relative index of MRQ vector
1979  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1980  *
1981  * This routine is used for processing the WQE associated with a unsolicited
1982  * event. It first determines whether there is an existing ndlp that matches
1983  * the DID from the unsolicited WQE. If not, it will create a new one with
1984  * the DID from the unsolicited WQE. The ELS command from the unsolicited
1985  * WQE is then used to invoke the proper routine and to set up proper state
1986  * of the discovery state machine.
1987  **/
1988 static void
1989 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1990                             uint32_t idx,
1991                             struct rqb_dmabuf *nvmebuf,
1992                             uint64_t isr_timestamp)
1993 {
1994         struct lpfc_nvmet_rcv_ctx *ctxp;
1995         struct lpfc_nvmet_tgtport *tgtp;
1996         struct fc_frame_header *fc_hdr;
1997         struct lpfc_nvmet_ctxbuf *ctx_buf;
1998         struct lpfc_nvmet_ctx_info *current_infop;
1999         uint32_t *payload;
2000         uint32_t size, oxid, sid, qno;
2001         unsigned long iflag;
2002         int current_cpu;
2003
2004         if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
2005                 return;
2006
2007         ctx_buf = NULL;
2008         if (!nvmebuf || !phba->targetport) {
2009                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2010                                 "6157 NVMET FCP Drop IO\n");
2011                 if (nvmebuf)
2012                         lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2013                 return;
2014         }
2015
2016         /*
2017          * Get a pointer to the context list for this MRQ based on
2018          * the CPU this MRQ IRQ is associated with. If the CPU association
2019          * changes from our initial assumption, the context list could
2020          * be empty, thus it would need to be replenished with the
2021          * context list from another CPU for this MRQ.
2022          */
2023         current_cpu = smp_processor_id();
2024         current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
2025         spin_lock_irqsave(&current_infop->nvmet_ctx_list_lock, iflag);
2026         if (current_infop->nvmet_ctx_list_cnt) {
2027                 list_remove_head(&current_infop->nvmet_ctx_list,
2028                                  ctx_buf, struct lpfc_nvmet_ctxbuf, list);
2029                 current_infop->nvmet_ctx_list_cnt--;
2030         } else {
2031                 ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
2032         }
2033         spin_unlock_irqrestore(&current_infop->nvmet_ctx_list_lock, iflag);
2034
2035         fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
2036         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2037         size = nvmebuf->bytes_recv;
2038
2039 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2040         if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
2041                 if (current_cpu < LPFC_CHECK_CPU_CNT) {
2042                         if (idx != current_cpu)
2043                                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2044                                                 "6703 CPU Check rcv: "
2045                                                 "cpu %d expect %d\n",
2046                                                 current_cpu, idx);
2047                         phba->sli4_hba.hdwq[idx].cpucheck_rcv_io[current_cpu]++;
2048                 }
2049         }
2050 #endif
2051
2052         lpfc_nvmeio_data(phba, "NVMET FCP  RCV: xri x%x sz %d CPU %02x\n",
2053                          oxid, size, smp_processor_id());
2054
2055         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2056
2057         if (!ctx_buf) {
2058                 /* Queue this NVME IO to process later */
2059                 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
2060                 list_add_tail(&nvmebuf->hbuf.list,
2061                               &phba->sli4_hba.lpfc_nvmet_io_wait_list);
2062                 phba->sli4_hba.nvmet_io_wait_cnt++;
2063                 phba->sli4_hba.nvmet_io_wait_total++;
2064                 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
2065                                        iflag);
2066
2067                 /* Post a brand new DMA buffer to RQ */
2068                 qno = nvmebuf->idx;
2069                 lpfc_post_rq_buffer(
2070                         phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2071                         phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2072
2073                 atomic_inc(&tgtp->defer_ctx);
2074                 return;
2075         }
2076
2077         payload = (uint32_t *)(nvmebuf->dbuf.virt);
2078         sid = sli4_sid_from_fc_hdr(fc_hdr);
2079
2080         ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
2081         if (ctxp->state != LPFC_NVMET_STE_FREE) {
2082                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2083                                 "6414 NVMET Context corrupt %d %d oxid x%x\n",
2084                                 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
2085         }
2086         ctxp->wqeq = NULL;
2087         ctxp->txrdy = NULL;
2088         ctxp->offset = 0;
2089         ctxp->phba = phba;
2090         ctxp->size = size;
2091         ctxp->oxid = oxid;
2092         ctxp->sid = sid;
2093         ctxp->idx = idx;
2094         ctxp->state = LPFC_NVMET_STE_RCV;
2095         ctxp->entry_cnt = 1;
2096         ctxp->flag = 0;
2097         ctxp->ctxbuf = ctx_buf;
2098         ctxp->rqb_buffer = (void *)nvmebuf;
2099         ctxp->hdwq = NULL;
2100         spin_lock_init(&ctxp->ctxlock);
2101
2102 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2103         if (isr_timestamp) {
2104                 ctxp->ts_isr_cmd = isr_timestamp;
2105                 ctxp->ts_cmd_nvme = ktime_get_ns();
2106                 ctxp->ts_nvme_data = 0;
2107                 ctxp->ts_data_wqput = 0;
2108                 ctxp->ts_isr_data = 0;
2109                 ctxp->ts_data_nvme = 0;
2110                 ctxp->ts_nvme_status = 0;
2111                 ctxp->ts_status_wqput = 0;
2112                 ctxp->ts_isr_status = 0;
2113                 ctxp->ts_status_nvme = 0;
2114         } else {
2115                 ctxp->ts_cmd_nvme = 0;
2116         }
2117 #endif
2118
2119         atomic_inc(&tgtp->rcv_fcp_cmd_in);
2120         lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2121 }
2122
2123 /**
2124  * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
2125  * @phba: pointer to lpfc hba data structure.
2126  * @pring: pointer to a SLI ring.
2127  * @nvmebuf: pointer to received nvme data structure.
2128  *
2129  * This routine is used to process an unsolicited event received from a SLI
2130  * (Service Level Interface) ring. The actual processing of the data buffer
2131  * associated with the unsolicited event is done by invoking the routine
2132  * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
2133  * SLI RQ on which the unsolicited event was received.
2134  **/
2135 void
2136 lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2137                           struct lpfc_iocbq *piocb)
2138 {
2139         struct lpfc_dmabuf *d_buf;
2140         struct hbq_dmabuf *nvmebuf;
2141
2142         d_buf = piocb->context2;
2143         nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2144
2145         if (phba->nvmet_support == 0) {
2146                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
2147                 return;
2148         }
2149         lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
2150 }
2151
2152 /**
2153  * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
2154  * @phba: pointer to lpfc hba data structure.
2155  * @idx: relative index of MRQ vector
2156  * @nvmebuf: pointer to received nvme data structure.
2157  *
2158  * This routine is used to process an unsolicited event received from a SLI
2159  * (Service Level Interface) ring. The actual processing of the data buffer
2160  * associated with the unsolicited event is done by invoking the routine
2161  * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
2162  * SLI RQ on which the unsolicited event was received.
2163  **/
2164 void
2165 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
2166                            uint32_t idx,
2167                            struct rqb_dmabuf *nvmebuf,
2168                            uint64_t isr_timestamp)
2169 {
2170         if (phba->nvmet_support == 0) {
2171                 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2172                 return;
2173         }
2174         lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf,
2175                                     isr_timestamp);
2176 }
2177
2178 /**
2179  * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
2180  * @phba: pointer to a host N_Port data structure.
2181  * @ctxp: Context info for NVME LS Request
2182  * @rspbuf: DMA buffer of NVME command.
2183  * @rspsize: size of the NVME command.
2184  *
2185  * This routine is used for allocating a lpfc-WQE data structure from
2186  * the driver lpfc-WQE free-list and prepare the WQE with the parameters
2187  * passed into the routine for discovery state machine to issue an Extended
2188  * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
2189  * and preparation routine that is used by all the discovery state machine
2190  * routines and the NVME command-specific fields will be later set up by
2191  * the individual discovery machine routines after calling this routine
2192  * allocating and preparing a generic WQE data structure. It fills in the
2193  * Buffer Descriptor Entries (BDEs), allocates buffers for both command
2194  * payload and response payload (if expected). The reference count on the
2195  * ndlp is incremented by 1 and the reference to the ndlp is put into
2196  * context1 of the WQE data structure for this WQE to hold the ndlp
2197  * reference for the command's callback function to access later.
2198  *
2199  * Return code
2200  *   Pointer to the newly allocated/prepared nvme wqe data structure
2201  *   NULL - when nvme wqe data structure allocation/preparation failed
2202  **/
2203 static struct lpfc_iocbq *
2204 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
2205                        struct lpfc_nvmet_rcv_ctx *ctxp,
2206                        dma_addr_t rspbuf, uint16_t rspsize)
2207 {
2208         struct lpfc_nodelist *ndlp;
2209         struct lpfc_iocbq *nvmewqe;
2210         union lpfc_wqe128 *wqe;
2211
2212         if (!lpfc_is_link_up(phba)) {
2213                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2214                                 "6104 NVMET prep LS wqe: link err: "
2215                                 "NPORT x%x oxid:x%x ste %d\n",
2216                                 ctxp->sid, ctxp->oxid, ctxp->state);
2217                 return NULL;
2218         }
2219
2220         /* Allocate buffer for  command wqe */
2221         nvmewqe = lpfc_sli_get_iocbq(phba);
2222         if (nvmewqe == NULL) {
2223                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2224                                 "6105 NVMET prep LS wqe: No WQE: "
2225                                 "NPORT x%x oxid x%x ste %d\n",
2226                                 ctxp->sid, ctxp->oxid, ctxp->state);
2227                 return NULL;
2228         }
2229
2230         ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2231         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2232             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2233             (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2234                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2235                                 "6106 NVMET prep LS wqe: No ndlp: "
2236                                 "NPORT x%x oxid x%x ste %d\n",
2237                                 ctxp->sid, ctxp->oxid, ctxp->state);
2238                 goto nvme_wqe_free_wqeq_exit;
2239         }
2240         ctxp->wqeq = nvmewqe;
2241
2242         /* prevent preparing wqe with NULL ndlp reference */
2243         nvmewqe->context1 = lpfc_nlp_get(ndlp);
2244         if (nvmewqe->context1 == NULL)
2245                 goto nvme_wqe_free_wqeq_exit;
2246         nvmewqe->context2 = ctxp;
2247
2248         wqe = &nvmewqe->wqe;
2249         memset(wqe, 0, sizeof(union lpfc_wqe));
2250
2251         /* Words 0 - 2 */
2252         wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2253         wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
2254         wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
2255         wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
2256
2257         /* Word 3 */
2258
2259         /* Word 4 */
2260
2261         /* Word 5 */
2262         bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
2263         bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
2264         bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
2265         bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
2266         bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
2267
2268         /* Word 6 */
2269         bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
2270                phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2271         bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
2272
2273         /* Word 7 */
2274         bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
2275                CMD_XMIT_SEQUENCE64_WQE);
2276         bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
2277         bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
2278         bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
2279
2280         /* Word 8 */
2281         wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
2282
2283         /* Word 9 */
2284         bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
2285         /* Needs to be set by caller */
2286         bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
2287
2288         /* Word 10 */
2289         bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
2290         bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2291         bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
2292                LPFC_WQE_LENLOC_WORD12);
2293         bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
2294
2295         /* Word 11 */
2296         bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
2297                LPFC_WQE_CQ_ID_DEFAULT);
2298         bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
2299                OTHER_COMMAND);
2300
2301         /* Word 12 */
2302         wqe->xmit_sequence.xmit_len = rspsize;
2303
2304         nvmewqe->retry = 1;
2305         nvmewqe->vport = phba->pport;
2306         nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2307         nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
2308
2309         /* Xmit NVMET response to remote NPORT <did> */
2310         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2311                         "6039 Xmit NVMET LS response to remote "
2312                         "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
2313                         ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
2314                         rspsize);
2315         return nvmewqe;
2316
2317 nvme_wqe_free_wqeq_exit:
2318         nvmewqe->context2 = NULL;
2319         nvmewqe->context3 = NULL;
2320         lpfc_sli_release_iocbq(phba, nvmewqe);
2321         return NULL;
2322 }
2323
2324
2325 static struct lpfc_iocbq *
2326 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
2327                         struct lpfc_nvmet_rcv_ctx *ctxp)
2328 {
2329         struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
2330         struct lpfc_nvmet_tgtport *tgtp;
2331         struct sli4_sge *sgl;
2332         struct lpfc_nodelist *ndlp;
2333         struct lpfc_iocbq *nvmewqe;
2334         struct scatterlist *sgel;
2335         union lpfc_wqe128 *wqe;
2336         struct ulp_bde64 *bde;
2337         uint32_t *txrdy;
2338         dma_addr_t physaddr;
2339         int i, cnt;
2340         int do_pbde;
2341         int xc = 1;
2342
2343         if (!lpfc_is_link_up(phba)) {
2344                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2345                                 "6107 NVMET prep FCP wqe: link err:"
2346                                 "NPORT x%x oxid x%x ste %d\n",
2347                                 ctxp->sid, ctxp->oxid, ctxp->state);
2348                 return NULL;
2349         }
2350
2351         ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2352         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2353             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2354              (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2355                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2356                                 "6108 NVMET prep FCP wqe: no ndlp: "
2357                                 "NPORT x%x oxid x%x ste %d\n",
2358                                 ctxp->sid, ctxp->oxid, ctxp->state);
2359                 return NULL;
2360         }
2361
2362         if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
2363                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2364                                 "6109 NVMET prep FCP wqe: seg cnt err: "
2365                                 "NPORT x%x oxid x%x ste %d cnt %d\n",
2366                                 ctxp->sid, ctxp->oxid, ctxp->state,
2367                                 phba->cfg_nvme_seg_cnt);
2368                 return NULL;
2369         }
2370
2371         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2372         nvmewqe = ctxp->wqeq;
2373         if (nvmewqe == NULL) {
2374                 /* Allocate buffer for  command wqe */
2375                 nvmewqe = ctxp->ctxbuf->iocbq;
2376                 if (nvmewqe == NULL) {
2377                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2378                                         "6110 NVMET prep FCP wqe: No "
2379                                         "WQE: NPORT x%x oxid x%x ste %d\n",
2380                                         ctxp->sid, ctxp->oxid, ctxp->state);
2381                         return NULL;
2382                 }
2383                 ctxp->wqeq = nvmewqe;
2384                 xc = 0; /* create new XRI */
2385                 nvmewqe->sli4_lxritag = NO_XRI;
2386                 nvmewqe->sli4_xritag = NO_XRI;
2387         }
2388
2389         /* Sanity check */
2390         if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
2391             (ctxp->entry_cnt == 1)) ||
2392             (ctxp->state == LPFC_NVMET_STE_DATA)) {
2393                 wqe = &nvmewqe->wqe;
2394         } else {
2395                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2396                                 "6111 Wrong state NVMET FCP: %d  cnt %d\n",
2397                                 ctxp->state, ctxp->entry_cnt);
2398                 return NULL;
2399         }
2400
2401         sgl  = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
2402         switch (rsp->op) {
2403         case NVMET_FCOP_READDATA:
2404         case NVMET_FCOP_READDATA_RSP:
2405                 /* From the tsend template, initialize words 7 - 11 */
2406                 memcpy(&wqe->words[7],
2407                        &lpfc_tsend_cmd_template.words[7],
2408                        sizeof(uint32_t) * 5);
2409
2410                 /* Words 0 - 2 : The first sg segment */
2411                 sgel = &rsp->sg[0];
2412                 physaddr = sg_dma_address(sgel);
2413                 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2414                 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
2415                 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
2416                 wqe->fcp_tsend.bde.addrHigh =
2417                         cpu_to_le32(putPaddrHigh(physaddr));
2418
2419                 /* Word 3 */
2420                 wqe->fcp_tsend.payload_offset_len = 0;
2421
2422                 /* Word 4 */
2423                 wqe->fcp_tsend.relative_offset = ctxp->offset;
2424
2425                 /* Word 5 */
2426                 wqe->fcp_tsend.reserved = 0;
2427
2428                 /* Word 6 */
2429                 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
2430                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2431                 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
2432                        nvmewqe->sli4_xritag);
2433
2434                 /* Word 7 - set ar later */
2435
2436                 /* Word 8 */
2437                 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
2438
2439                 /* Word 9 */
2440                 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
2441                 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
2442
2443                 /* Word 10 - set wqes later, in template xc=1 */
2444                 if (!xc)
2445                         bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0);
2446
2447                 /* Word 11 - set sup, irsp, irsplen later */
2448                 do_pbde = 0;
2449
2450                 /* Word 12 */
2451                 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2452
2453                 /* Setup 2 SKIP SGEs */
2454                 sgl->addr_hi = 0;
2455                 sgl->addr_lo = 0;
2456                 sgl->word2 = 0;
2457                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2458                 sgl->word2 = cpu_to_le32(sgl->word2);
2459                 sgl->sge_len = 0;
2460                 sgl++;
2461                 sgl->addr_hi = 0;
2462                 sgl->addr_lo = 0;
2463                 sgl->word2 = 0;
2464                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2465                 sgl->word2 = cpu_to_le32(sgl->word2);
2466                 sgl->sge_len = 0;
2467                 sgl++;
2468                 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
2469                         atomic_inc(&tgtp->xmt_fcp_read_rsp);
2470
2471                         /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2472
2473                         if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
2474                                 if (ndlp->nlp_flag & NLP_SUPPRESS_RSP)
2475                                         bf_set(wqe_sup,
2476                                                &wqe->fcp_tsend.wqe_com, 1);
2477                         } else {
2478                                 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
2479                                 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
2480                                 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
2481                                        ((rsp->rsplen >> 2) - 1));
2482                                 memcpy(&wqe->words[16], rsp->rspaddr,
2483                                        rsp->rsplen);
2484                         }
2485                 } else {
2486                         atomic_inc(&tgtp->xmt_fcp_read);
2487
2488                         /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2489                         bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
2490                 }
2491                 break;
2492
2493         case NVMET_FCOP_WRITEDATA:
2494                 /* From the treceive template, initialize words 3 - 11 */
2495                 memcpy(&wqe->words[3],
2496                        &lpfc_treceive_cmd_template.words[3],
2497                        sizeof(uint32_t) * 9);
2498
2499                 /* Words 0 - 2 : The first sg segment */
2500                 txrdy = dma_pool_alloc(phba->txrdy_payload_pool,
2501                                        GFP_KERNEL, &physaddr);
2502                 if (!txrdy) {
2503                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2504                                         "6041 Bad txrdy buffer: oxid x%x\n",
2505                                         ctxp->oxid);
2506                         return NULL;
2507                 }
2508                 ctxp->txrdy = txrdy;
2509                 ctxp->txrdy_phys = physaddr;
2510                 wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2511                 wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
2512                 wqe->fcp_treceive.bde.addrLow =
2513                         cpu_to_le32(putPaddrLow(physaddr));
2514                 wqe->fcp_treceive.bde.addrHigh =
2515                         cpu_to_le32(putPaddrHigh(physaddr));
2516
2517                 /* Word 4 */
2518                 wqe->fcp_treceive.relative_offset = ctxp->offset;
2519
2520                 /* Word 6 */
2521                 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
2522                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2523                 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
2524                        nvmewqe->sli4_xritag);
2525
2526                 /* Word 7 */
2527
2528                 /* Word 8 */
2529                 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
2530
2531                 /* Word 9 */
2532                 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
2533                 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
2534
2535                 /* Word 10 - in template xc=1 */
2536                 if (!xc)
2537                         bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
2538
2539                 /* Word 11 - set pbde later */
2540                 if (phba->cfg_enable_pbde) {
2541                         do_pbde = 1;
2542                 } else {
2543                         bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
2544                         do_pbde = 0;
2545                 }
2546
2547                 /* Word 12 */
2548                 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2549
2550                 /* Setup 1 TXRDY and 1 SKIP SGE */
2551                 txrdy[0] = 0;
2552                 txrdy[1] = cpu_to_be32(rsp->transfer_length);
2553                 txrdy[2] = 0;
2554
2555                 sgl->addr_hi = putPaddrHigh(physaddr);
2556                 sgl->addr_lo = putPaddrLow(physaddr);
2557                 sgl->word2 = 0;
2558                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2559                 sgl->word2 = cpu_to_le32(sgl->word2);
2560                 sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
2561                 sgl++;
2562                 sgl->addr_hi = 0;
2563                 sgl->addr_lo = 0;
2564                 sgl->word2 = 0;
2565                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2566                 sgl->word2 = cpu_to_le32(sgl->word2);
2567                 sgl->sge_len = 0;
2568                 sgl++;
2569                 atomic_inc(&tgtp->xmt_fcp_write);
2570                 break;
2571
2572         case NVMET_FCOP_RSP:
2573                 /* From the treceive template, initialize words 4 - 11 */
2574                 memcpy(&wqe->words[4],
2575                        &lpfc_trsp_cmd_template.words[4],
2576                        sizeof(uint32_t) * 8);
2577
2578                 /* Words 0 - 2 */
2579                 physaddr = rsp->rspdma;
2580                 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2581                 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
2582                 wqe->fcp_trsp.bde.addrLow =
2583                         cpu_to_le32(putPaddrLow(physaddr));
2584                 wqe->fcp_trsp.bde.addrHigh =
2585                         cpu_to_le32(putPaddrHigh(physaddr));
2586
2587                 /* Word 3 */
2588                 wqe->fcp_trsp.response_len = rsp->rsplen;
2589
2590                 /* Word 6 */
2591                 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
2592                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2593                 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
2594                        nvmewqe->sli4_xritag);
2595
2596                 /* Word 7 */
2597
2598                 /* Word 8 */
2599                 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
2600
2601                 /* Word 9 */
2602                 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
2603                 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
2604
2605                 /* Word 10 */
2606                 if (xc)
2607                         bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1);
2608
2609                 /* Word 11 */
2610                 /* In template wqes=0 irsp=0 irsplen=0 - good response */
2611                 if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) {
2612                         /* Bad response - embed it */
2613                         bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
2614                         bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
2615                         bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
2616                                ((rsp->rsplen >> 2) - 1));
2617                         memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
2618                 }
2619                 do_pbde = 0;
2620
2621                 /* Word 12 */
2622                 wqe->fcp_trsp.rsvd_12_15[0] = 0;
2623
2624                 /* Use rspbuf, NOT sg list */
2625                 rsp->sg_cnt = 0;
2626                 sgl->word2 = 0;
2627                 atomic_inc(&tgtp->xmt_fcp_rsp);
2628                 break;
2629
2630         default:
2631                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2632                                 "6064 Unknown Rsp Op %d\n",
2633                                 rsp->op);
2634                 return NULL;
2635         }
2636
2637         nvmewqe->retry = 1;
2638         nvmewqe->vport = phba->pport;
2639         nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2640         nvmewqe->context1 = ndlp;
2641
2642         for (i = 0; i < rsp->sg_cnt; i++) {
2643                 sgel = &rsp->sg[i];
2644                 physaddr = sg_dma_address(sgel);
2645                 cnt = sg_dma_len(sgel);
2646                 sgl->addr_hi = putPaddrHigh(physaddr);
2647                 sgl->addr_lo = putPaddrLow(physaddr);
2648                 sgl->word2 = 0;
2649                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2650                 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
2651                 if ((i+1) == rsp->sg_cnt)
2652                         bf_set(lpfc_sli4_sge_last, sgl, 1);
2653                 sgl->word2 = cpu_to_le32(sgl->word2);
2654                 sgl->sge_len = cpu_to_le32(cnt);
2655                 if (i == 0) {
2656                         bde = (struct ulp_bde64 *)&wqe->words[13];
2657                         if (do_pbde) {
2658                                 /* Words 13-15  (PBDE) */
2659                                 bde->addrLow = sgl->addr_lo;
2660                                 bde->addrHigh = sgl->addr_hi;
2661                                 bde->tus.f.bdeSize =
2662                                         le32_to_cpu(sgl->sge_len);
2663                                 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2664                                 bde->tus.w = cpu_to_le32(bde->tus.w);
2665                         } else {
2666                                 memset(bde, 0, sizeof(struct ulp_bde64));
2667                         }
2668                 }
2669                 sgl++;
2670                 ctxp->offset += cnt;
2671         }
2672         ctxp->state = LPFC_NVMET_STE_DATA;
2673         ctxp->entry_cnt++;
2674         return nvmewqe;
2675 }
2676
2677 /**
2678  * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
2679  * @phba: Pointer to HBA context object.
2680  * @cmdwqe: Pointer to driver command WQE object.
2681  * @wcqe: Pointer to driver response CQE object.
2682  *
2683  * The function is called from SLI ring event handler with no
2684  * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2685  * The function frees memory resources used for the NVME commands.
2686  **/
2687 static void
2688 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2689                              struct lpfc_wcqe_complete *wcqe)
2690 {
2691         struct lpfc_nvmet_rcv_ctx *ctxp;
2692         struct lpfc_nvmet_tgtport *tgtp;
2693         uint32_t status, result;
2694         unsigned long flags;
2695         bool released = false;
2696
2697         ctxp = cmdwqe->context2;
2698         status = bf_get(lpfc_wcqe_c_status, wcqe);
2699         result = wcqe->parameter;
2700
2701         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2702         if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2703                 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2704
2705         spin_lock_irqsave(&ctxp->ctxlock, flags);
2706         ctxp->state = LPFC_NVMET_STE_DONE;
2707
2708         /* Check if we already received a free context call
2709          * and we have completed processing an abort situation.
2710          */
2711         if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2712             !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2713                 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
2714                 list_del(&ctxp->list);
2715                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
2716                 released = true;
2717         }
2718         ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2719         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2720         atomic_inc(&tgtp->xmt_abort_rsp);
2721
2722         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2723                         "6165 ABORT cmpl: xri x%x flg x%x (%d) "
2724                         "WCQE: %08x %08x %08x %08x\n",
2725                         ctxp->oxid, ctxp->flag, released,
2726                         wcqe->word0, wcqe->total_data_placed,
2727                         result, wcqe->word3);
2728
2729         cmdwqe->context2 = NULL;
2730         cmdwqe->context3 = NULL;
2731         /*
2732          * if transport has released ctx, then can reuse it. Otherwise,
2733          * will be recycled by transport release call.
2734          */
2735         if (released)
2736                 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2737
2738         /* This is the iocbq for the abort, not the command */
2739         lpfc_sli_release_iocbq(phba, cmdwqe);
2740
2741         /* Since iaab/iaar are NOT set, there is no work left.
2742          * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2743          * should have been called already.
2744          */
2745 }
2746
2747 /**
2748  * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
2749  * @phba: Pointer to HBA context object.
2750  * @cmdwqe: Pointer to driver command WQE object.
2751  * @wcqe: Pointer to driver response CQE object.
2752  *
2753  * The function is called from SLI ring event handler with no
2754  * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2755  * The function frees memory resources used for the NVME commands.
2756  **/
2757 static void
2758 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2759                                struct lpfc_wcqe_complete *wcqe)
2760 {
2761         struct lpfc_nvmet_rcv_ctx *ctxp;
2762         struct lpfc_nvmet_tgtport *tgtp;
2763         unsigned long flags;
2764         uint32_t status, result;
2765         bool released = false;
2766
2767         ctxp = cmdwqe->context2;
2768         status = bf_get(lpfc_wcqe_c_status, wcqe);
2769         result = wcqe->parameter;
2770
2771         if (!ctxp) {
2772                 /* if context is clear, related io alrady complete */
2773                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2774                                 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
2775                                 wcqe->word0, wcqe->total_data_placed,
2776                                 result, wcqe->word3);
2777                 return;
2778         }
2779
2780         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2781         spin_lock_irqsave(&ctxp->ctxlock, flags);
2782         if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2783                 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2784
2785         /* Sanity check */
2786         if (ctxp->state != LPFC_NVMET_STE_ABORT) {
2787                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2788                                 "6112 ABTS Wrong state:%d oxid x%x\n",
2789                                 ctxp->state, ctxp->oxid);
2790         }
2791
2792         /* Check if we already received a free context call
2793          * and we have completed processing an abort situation.
2794          */
2795         ctxp->state = LPFC_NVMET_STE_DONE;
2796         if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2797             !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2798                 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
2799                 list_del(&ctxp->list);
2800                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
2801                 released = true;
2802         }
2803         ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2804         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2805         atomic_inc(&tgtp->xmt_abort_rsp);
2806
2807         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2808                         "6316 ABTS cmpl xri x%x flg x%x (%x) "
2809                         "WCQE: %08x %08x %08x %08x\n",
2810                         ctxp->oxid, ctxp->flag, released,
2811                         wcqe->word0, wcqe->total_data_placed,
2812                         result, wcqe->word3);
2813
2814         cmdwqe->context2 = NULL;
2815         cmdwqe->context3 = NULL;
2816         /*
2817          * if transport has released ctx, then can reuse it. Otherwise,
2818          * will be recycled by transport release call.
2819          */
2820         if (released)
2821                 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2822
2823         /* Since iaab/iaar are NOT set, there is no work left.
2824          * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2825          * should have been called already.
2826          */
2827 }
2828
2829 /**
2830  * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
2831  * @phba: Pointer to HBA context object.
2832  * @cmdwqe: Pointer to driver command WQE object.
2833  * @wcqe: Pointer to driver response CQE object.
2834  *
2835  * The function is called from SLI ring event handler with no
2836  * lock held. This function is the completion handler for NVME ABTS for LS cmds
2837  * The function frees memory resources used for the NVME commands.
2838  **/
2839 static void
2840 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2841                             struct lpfc_wcqe_complete *wcqe)
2842 {
2843         struct lpfc_nvmet_rcv_ctx *ctxp;
2844         struct lpfc_nvmet_tgtport *tgtp;
2845         uint32_t status, result;
2846
2847         ctxp = cmdwqe->context2;
2848         status = bf_get(lpfc_wcqe_c_status, wcqe);
2849         result = wcqe->parameter;
2850
2851         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2852         atomic_inc(&tgtp->xmt_ls_abort_cmpl);
2853
2854         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2855                         "6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n",
2856                         ctxp, wcqe->word0, wcqe->total_data_placed,
2857                         result, wcqe->word3);
2858
2859         if (!ctxp) {
2860                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2861                                 "6415 NVMET LS Abort No ctx: WCQE: "
2862                                  "%08x %08x %08x %08x\n",
2863                                 wcqe->word0, wcqe->total_data_placed,
2864                                 result, wcqe->word3);
2865
2866                 lpfc_sli_release_iocbq(phba, cmdwqe);
2867                 return;
2868         }
2869
2870         if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) {
2871                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2872                                 "6416 NVMET LS abort cmpl state mismatch: "
2873                                 "oxid x%x: %d %d\n",
2874                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
2875         }
2876
2877         cmdwqe->context2 = NULL;
2878         cmdwqe->context3 = NULL;
2879         lpfc_sli_release_iocbq(phba, cmdwqe);
2880         kfree(ctxp);
2881 }
2882
2883 static int
2884 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
2885                              struct lpfc_nvmet_rcv_ctx *ctxp,
2886                              uint32_t sid, uint16_t xri)
2887 {
2888         struct lpfc_nvmet_tgtport *tgtp;
2889         struct lpfc_iocbq *abts_wqeq;
2890         union lpfc_wqe128 *wqe_abts;
2891         struct lpfc_nodelist *ndlp;
2892
2893         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2894                         "6067 ABTS: sid %x xri x%x/x%x\n",
2895                         sid, xri, ctxp->wqeq->sli4_xritag);
2896
2897         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2898
2899         ndlp = lpfc_findnode_did(phba->pport, sid);
2900         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2901             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2902             (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2903                 atomic_inc(&tgtp->xmt_abort_rsp_error);
2904                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2905                                 "6134 Drop ABTS - wrong NDLP state x%x.\n",
2906                                 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
2907
2908                 /* No failure to an ABTS request. */
2909                 return 0;
2910         }
2911
2912         abts_wqeq = ctxp->wqeq;
2913         wqe_abts = &abts_wqeq->wqe;
2914
2915         /*
2916          * Since we zero the whole WQE, we need to ensure we set the WQE fields
2917          * that were initialized in lpfc_sli4_nvmet_alloc.
2918          */
2919         memset(wqe_abts, 0, sizeof(union lpfc_wqe));
2920
2921         /* Word 5 */
2922         bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
2923         bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
2924         bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
2925         bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
2926         bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
2927
2928         /* Word 6 */
2929         bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
2930                phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2931         bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
2932                abts_wqeq->sli4_xritag);
2933
2934         /* Word 7 */
2935         bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
2936                CMD_XMIT_SEQUENCE64_WQE);
2937         bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
2938         bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
2939         bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
2940
2941         /* Word 8 */
2942         wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
2943
2944         /* Word 9 */
2945         bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
2946         /* Needs to be set by caller */
2947         bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
2948
2949         /* Word 10 */
2950         bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
2951         bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2952         bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
2953                LPFC_WQE_LENLOC_WORD12);
2954         bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
2955         bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
2956
2957         /* Word 11 */
2958         bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
2959                LPFC_WQE_CQ_ID_DEFAULT);
2960         bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
2961                OTHER_COMMAND);
2962
2963         abts_wqeq->vport = phba->pport;
2964         abts_wqeq->context1 = ndlp;
2965         abts_wqeq->context2 = ctxp;
2966         abts_wqeq->context3 = NULL;
2967         abts_wqeq->rsvd2 = 0;
2968         /* hba_wqidx should already be setup from command we are aborting */
2969         abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
2970         abts_wqeq->iocb.ulpLe = 1;
2971
2972         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2973                         "6069 Issue ABTS to xri x%x reqtag x%x\n",
2974                         xri, abts_wqeq->iotag);
2975         return 1;
2976 }
2977
2978 static int
2979 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
2980                                struct lpfc_nvmet_rcv_ctx *ctxp,
2981                                uint32_t sid, uint16_t xri)
2982 {
2983         struct lpfc_nvmet_tgtport *tgtp;
2984         struct lpfc_iocbq *abts_wqeq;
2985         union lpfc_wqe128 *abts_wqe;
2986         struct lpfc_nodelist *ndlp;
2987         unsigned long flags;
2988         int rc;
2989
2990         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2991         if (!ctxp->wqeq) {
2992                 ctxp->wqeq = ctxp->ctxbuf->iocbq;
2993                 ctxp->wqeq->hba_wqidx = 0;
2994         }
2995
2996         ndlp = lpfc_findnode_did(phba->pport, sid);
2997         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2998             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2999             (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3000                 atomic_inc(&tgtp->xmt_abort_rsp_error);
3001                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3002                                 "6160 Drop ABORT - wrong NDLP state x%x.\n",
3003                                 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3004
3005                 /* No failure to an ABTS request. */
3006                 spin_lock_irqsave(&ctxp->ctxlock, flags);
3007                 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3008                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3009                 return 0;
3010         }
3011
3012         /* Issue ABTS for this WQE based on iotag */
3013         ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
3014         spin_lock_irqsave(&ctxp->ctxlock, flags);
3015         if (!ctxp->abort_wqeq) {
3016                 atomic_inc(&tgtp->xmt_abort_rsp_error);
3017                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3018                                 "6161 ABORT failed: No wqeqs: "
3019                                 "xri: x%x\n", ctxp->oxid);
3020                 /* No failure to an ABTS request. */
3021                 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3022                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3023                 return 0;
3024         }
3025         abts_wqeq = ctxp->abort_wqeq;
3026         abts_wqe = &abts_wqeq->wqe;
3027         ctxp->state = LPFC_NVMET_STE_ABORT;
3028         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3029
3030         /* Announce entry to new IO submit field. */
3031         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3032                         "6162 ABORT Request to rport DID x%06x "
3033                         "for xri x%x x%x\n",
3034                         ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
3035
3036         /* If the hba is getting reset, this flag is set.  It is
3037          * cleared when the reset is complete and rings reestablished.
3038          */
3039         spin_lock_irqsave(&phba->hbalock, flags);
3040         /* driver queued commands are in process of being flushed */
3041         if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
3042                 spin_unlock_irqrestore(&phba->hbalock, flags);
3043                 atomic_inc(&tgtp->xmt_abort_rsp_error);
3044                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
3045                                 "6163 Driver in reset cleanup - flushing "
3046                                 "NVME Req now. hba_flag x%x oxid x%x\n",
3047                                 phba->hba_flag, ctxp->oxid);
3048                 lpfc_sli_release_iocbq(phba, abts_wqeq);
3049                 spin_lock_irqsave(&ctxp->ctxlock, flags);
3050                 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3051                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3052                 return 0;
3053         }
3054
3055         /* Outstanding abort is in progress */
3056         if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
3057                 spin_unlock_irqrestore(&phba->hbalock, flags);
3058                 atomic_inc(&tgtp->xmt_abort_rsp_error);
3059                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
3060                                 "6164 Outstanding NVME I/O Abort Request "
3061                                 "still pending on oxid x%x\n",
3062                                 ctxp->oxid);
3063                 lpfc_sli_release_iocbq(phba, abts_wqeq);
3064                 spin_lock_irqsave(&ctxp->ctxlock, flags);
3065                 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3066                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3067                 return 0;
3068         }
3069
3070         /* Ready - mark outstanding as aborted by driver. */
3071         abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
3072
3073         /* WQEs are reused.  Clear stale data and set key fields to
3074          * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
3075          */
3076         memset(abts_wqe, 0, sizeof(union lpfc_wqe));
3077
3078         /* word 3 */
3079         bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
3080
3081         /* word 7 */
3082         bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
3083         bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
3084
3085         /* word 8 - tell the FW to abort the IO associated with this
3086          * outstanding exchange ID.
3087          */
3088         abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
3089
3090         /* word 9 - this is the iotag for the abts_wqe completion. */
3091         bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
3092                abts_wqeq->iotag);
3093
3094         /* word 10 */
3095         bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
3096         bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
3097
3098         /* word 11 */
3099         bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
3100         bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
3101         bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
3102
3103         /* ABTS WQE must go to the same WQ as the WQE to be aborted */
3104         abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
3105         abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
3106         abts_wqeq->iocb_cmpl = 0;
3107         abts_wqeq->iocb_flag |= LPFC_IO_NVME;
3108         abts_wqeq->context2 = ctxp;
3109         abts_wqeq->vport = phba->pport;
3110         if (!ctxp->hdwq)
3111                 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3112
3113         rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3114         spin_unlock_irqrestore(&phba->hbalock, flags);
3115         if (rc == WQE_SUCCESS) {
3116                 atomic_inc(&tgtp->xmt_abort_sol);
3117                 return 0;
3118         }
3119
3120         atomic_inc(&tgtp->xmt_abort_rsp_error);
3121         spin_lock_irqsave(&ctxp->ctxlock, flags);
3122         ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3123         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3124         lpfc_sli_release_iocbq(phba, abts_wqeq);
3125         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3126                         "6166 Failed ABORT issue_wqe with status x%x "
3127                         "for oxid x%x.\n",
3128                         rc, ctxp->oxid);
3129         return 1;
3130 }
3131
3132 static int
3133 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
3134                                  struct lpfc_nvmet_rcv_ctx *ctxp,
3135                                  uint32_t sid, uint16_t xri)
3136 {
3137         struct lpfc_nvmet_tgtport *tgtp;
3138         struct lpfc_iocbq *abts_wqeq;
3139         unsigned long flags;
3140         bool released = false;
3141         int rc;
3142
3143         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3144         if (!ctxp->wqeq) {
3145                 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3146                 ctxp->wqeq->hba_wqidx = 0;
3147         }
3148
3149         if (ctxp->state == LPFC_NVMET_STE_FREE) {
3150                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3151                                 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
3152                                 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
3153                 rc = WQE_BUSY;
3154                 goto aerr;
3155         }
3156         ctxp->state = LPFC_NVMET_STE_ABORT;
3157         ctxp->entry_cnt++;
3158         rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
3159         if (rc == 0)
3160                 goto aerr;
3161
3162         spin_lock_irqsave(&phba->hbalock, flags);
3163         abts_wqeq = ctxp->wqeq;
3164         abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
3165         abts_wqeq->iocb_cmpl = NULL;
3166         abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
3167         if (!ctxp->hdwq)
3168                 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3169
3170         rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3171         spin_unlock_irqrestore(&phba->hbalock, flags);
3172         if (rc == WQE_SUCCESS) {
3173                 return 0;
3174         }
3175
3176 aerr:
3177         spin_lock_irqsave(&ctxp->ctxlock, flags);
3178         if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
3179                 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3180                 list_del(&ctxp->list);
3181                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3182                 released = true;
3183         }
3184         ctxp->flag &= ~(LPFC_NVMET_ABORT_OP | LPFC_NVMET_CTX_RLS);
3185         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3186
3187         atomic_inc(&tgtp->xmt_abort_rsp_error);
3188         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3189                         "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
3190                         ctxp->oxid, rc);
3191         if (released)
3192                 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3193         return 1;
3194 }
3195
3196 static int
3197 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
3198                                 struct lpfc_nvmet_rcv_ctx *ctxp,
3199                                 uint32_t sid, uint16_t xri)
3200 {
3201         struct lpfc_nvmet_tgtport *tgtp;
3202         struct lpfc_iocbq *abts_wqeq;
3203         union lpfc_wqe128 *wqe_abts;
3204         unsigned long flags;
3205         int rc;
3206
3207         if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) ||
3208             (ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) {
3209                 ctxp->state = LPFC_NVMET_STE_LS_ABORT;
3210                 ctxp->entry_cnt++;
3211         } else {
3212                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3213                                 "6418 NVMET LS abort state mismatch "
3214                                 "IO x%x: %d %d\n",
3215                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3216                 ctxp->state = LPFC_NVMET_STE_LS_ABORT;
3217         }
3218
3219         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3220         if (!ctxp->wqeq) {
3221                 /* Issue ABTS for this WQE based on iotag */
3222                 ctxp->wqeq = lpfc_sli_get_iocbq(phba);
3223                 if (!ctxp->wqeq) {
3224                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3225                                         "6068 Abort failed: No wqeqs: "
3226                                         "xri: x%x\n", xri);
3227                         /* No failure to an ABTS request. */
3228                         kfree(ctxp);
3229                         return 0;
3230                 }
3231         }
3232         abts_wqeq = ctxp->wqeq;
3233         wqe_abts = &abts_wqeq->wqe;
3234
3235         if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
3236                 rc = WQE_BUSY;
3237                 goto out;
3238         }
3239
3240         spin_lock_irqsave(&phba->hbalock, flags);
3241         abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
3242         abts_wqeq->iocb_cmpl = 0;
3243         abts_wqeq->iocb_flag |=  LPFC_IO_NVME_LS;
3244         rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3245         spin_unlock_irqrestore(&phba->hbalock, flags);
3246         if (rc == WQE_SUCCESS) {
3247                 atomic_inc(&tgtp->xmt_abort_unsol);
3248                 return 0;
3249         }
3250 out:
3251         atomic_inc(&tgtp->xmt_abort_rsp_error);
3252         abts_wqeq->context2 = NULL;
3253         abts_wqeq->context3 = NULL;
3254         lpfc_sli_release_iocbq(phba, abts_wqeq);
3255         kfree(ctxp);
3256         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3257                         "6056 Failed to Issue ABTS. Status x%x\n", rc);
3258         return 0;
3259 }