f2a30ee9702b12f677a644f4b8453f12cdad9abb
[linux-2.6-microblaze.git] / drivers / scsi / lpfc / lpfc_nvmet.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channsel Host Bus Adapters.                               *
4  * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
30
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
38
39 #include <linux/nvme.h>
40 #include <linux/nvme-fc-driver.h>
41 #include <linux/nvme-fc.h>
42
43 #include "lpfc_version.h"
44 #include "lpfc_hw4.h"
45 #include "lpfc_hw.h"
46 #include "lpfc_sli.h"
47 #include "lpfc_sli4.h"
48 #include "lpfc_nl.h"
49 #include "lpfc_disc.h"
50 #include "lpfc.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_nvme.h"
53 #include "lpfc_nvmet.h"
54 #include "lpfc_logmsg.h"
55 #include "lpfc_crtn.h"
56 #include "lpfc_vport.h"
57 #include "lpfc_debugfs.h"
58
59 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
60                                                  struct lpfc_nvmet_rcv_ctx *,
61                                                  dma_addr_t rspbuf,
62                                                  uint16_t rspsize);
63 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
64                                                   struct lpfc_nvmet_rcv_ctx *);
65 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
66                                           struct lpfc_nvmet_rcv_ctx *,
67                                           uint32_t, uint16_t);
68 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
69                                             struct lpfc_nvmet_rcv_ctx *,
70                                             uint32_t, uint16_t);
71 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
72                                            struct lpfc_nvmet_rcv_ctx *,
73                                            uint32_t, uint16_t);
74 static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
75                                     struct lpfc_nvmet_rcv_ctx *);
76
77 static union lpfc_wqe128 lpfc_tsend_cmd_template;
78 static union lpfc_wqe128 lpfc_treceive_cmd_template;
79 static union lpfc_wqe128 lpfc_trsp_cmd_template;
80
81 /* Setup WQE templates for NVME IOs */
82 void
83 lpfc_nvmet_cmd_template(void)
84 {
85         union lpfc_wqe128 *wqe;
86
87         /* TSEND template */
88         wqe = &lpfc_tsend_cmd_template;
89         memset(wqe, 0, sizeof(union lpfc_wqe128));
90
91         /* Word 0, 1, 2 - BDE is variable */
92
93         /* Word 3 - payload_offset_len is zero */
94
95         /* Word 4 - relative_offset is variable */
96
97         /* Word 5 - is zero */
98
99         /* Word 6 - ctxt_tag, xri_tag is variable */
100
101         /* Word 7 - wqe_ar is variable */
102         bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
103         bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF);
104         bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3);
105         bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI);
106         bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
107
108         /* Word 8 - abort_tag is variable */
109
110         /* Word 9  - reqtag, rcvoxid is variable */
111
112         /* Word 10 - wqes, xc is variable */
113         bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
114         bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
115         bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
116         bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
117         bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
118         bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12);
119
120         /* Word 11 - sup, irsp, irsplen is variable */
121         bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND);
122         bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
123         bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
124         bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
125         bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
126         bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0);
127
128         /* Word 12 - fcp_data_len is variable */
129
130         /* Word 13, 14, 15 - PBDE is zero */
131
132         /* TRECEIVE template */
133         wqe = &lpfc_treceive_cmd_template;
134         memset(wqe, 0, sizeof(union lpfc_wqe128));
135
136         /* Word 0, 1, 2 - BDE is variable */
137
138         /* Word 3 */
139         wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
140
141         /* Word 4 - relative_offset is variable */
142
143         /* Word 5 - is zero */
144
145         /* Word 6 - ctxt_tag, xri_tag is variable */
146
147         /* Word 7 */
148         bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE);
149         bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF);
150         bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3);
151         bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI);
152         bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
153
154         /* Word 8 - abort_tag is variable */
155
156         /* Word 9  - reqtag, rcvoxid is variable */
157
158         /* Word 10 - xc is variable */
159         bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
160         bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
161         bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
162         bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
163         bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
164         bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
165
166         /* Word 11 - pbde is variable */
167         bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE);
168         bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
169         bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0);
170         bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
171         bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
172         bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1);
173
174         /* Word 12 - fcp_data_len is variable */
175
176         /* Word 13, 14, 15 - PBDE is variable */
177
178         /* TRSP template */
179         wqe = &lpfc_trsp_cmd_template;
180         memset(wqe, 0, sizeof(union lpfc_wqe128));
181
182         /* Word 0, 1, 2 - BDE is variable */
183
184         /* Word 3 - response_len is variable */
185
186         /* Word 4, 5 - is zero */
187
188         /* Word 6 - ctxt_tag, xri_tag is variable */
189
190         /* Word 7 */
191         bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
192         bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED);
193         bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3);
194         bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI);
195         bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); /* wqe_ar */
196
197         /* Word 8 - abort_tag is variable */
198
199         /* Word 9  - reqtag is variable */
200
201         /* Word 10 wqes, xc is variable */
202         bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1);
203         bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
204         bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
205         bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0);
206         bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE);
207         bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3);
208
209         /* Word 11 irsp, irsplen is variable */
210         bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP);
211         bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
212         bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0);
213         bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
214         bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
215         bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0);
216
217         /* Word 12, 13, 14, 15 - is zero */
218 }
219
220 void
221 lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
222 {
223         unsigned long iflag;
224
225         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
226                         "6313 NVMET Defer ctx release xri x%x flg x%x\n",
227                         ctxp->oxid, ctxp->flag);
228
229         spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
230         if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
231                 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
232                                        iflag);
233                 return;
234         }
235         ctxp->flag |= LPFC_NVMET_CTX_RLS;
236         list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
237         spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
238 }
239
240 /**
241  * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
242  * @phba: Pointer to HBA context object.
243  * @cmdwqe: Pointer to driver command WQE object.
244  * @wcqe: Pointer to driver response CQE object.
245  *
246  * The function is called from SLI ring event handler with no
247  * lock held. This function is the completion handler for NVME LS commands
248  * The function frees memory resources used for the NVME commands.
249  **/
250 static void
251 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
252                           struct lpfc_wcqe_complete *wcqe)
253 {
254         struct lpfc_nvmet_tgtport *tgtp;
255         struct nvmefc_tgt_ls_req *rsp;
256         struct lpfc_nvmet_rcv_ctx *ctxp;
257         uint32_t status, result;
258
259         status = bf_get(lpfc_wcqe_c_status, wcqe);
260         result = wcqe->parameter;
261         ctxp = cmdwqe->context2;
262
263         if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) {
264                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
265                                 "6410 NVMET LS cmpl state mismatch IO x%x: "
266                                 "%d %d\n",
267                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
268         }
269
270         if (!phba->targetport)
271                 goto out;
272
273         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
274
275         if (tgtp) {
276                 if (status) {
277                         atomic_inc(&tgtp->xmt_ls_rsp_error);
278                         if (result == IOERR_ABORT_REQUESTED)
279                                 atomic_inc(&tgtp->xmt_ls_rsp_aborted);
280                         if (bf_get(lpfc_wcqe_c_xb, wcqe))
281                                 atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
282                 } else {
283                         atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
284                 }
285         }
286
287 out:
288         rsp = &ctxp->ctx.ls_req;
289
290         lpfc_nvmeio_data(phba, "NVMET LS  CMPL: xri x%x stat x%x result x%x\n",
291                          ctxp->oxid, status, result);
292
293         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
294                         "6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
295                         status, result, ctxp->oxid);
296
297         lpfc_nlp_put(cmdwqe->context1);
298         cmdwqe->context2 = NULL;
299         cmdwqe->context3 = NULL;
300         lpfc_sli_release_iocbq(phba, cmdwqe);
301         rsp->done(rsp);
302         kfree(ctxp);
303 }
304
305 /**
306  * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
307  * @phba: HBA buffer is associated with
308  * @ctxp: context to clean up
309  * @mp: Buffer to free
310  *
311  * Description: Frees the given DMA buffer in the appropriate way given by
312  * reposting it to its associated RQ so it can be reused.
313  *
314  * Notes: Takes phba->hbalock.  Can be called with or without other locks held.
315  *
316  * Returns: None
317  **/
318 void
319 lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
320 {
321 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
322         struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
323         struct lpfc_nvmet_tgtport *tgtp;
324         struct fc_frame_header *fc_hdr;
325         struct rqb_dmabuf *nvmebuf;
326         struct lpfc_nvmet_ctx_info *infop;
327         uint32_t *payload;
328         uint32_t size, oxid, sid, rc;
329         int cpu;
330         unsigned long iflag;
331
332         if (ctxp->txrdy) {
333                 dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
334                               ctxp->txrdy_phys);
335                 ctxp->txrdy = NULL;
336                 ctxp->txrdy_phys = 0;
337         }
338
339         if (ctxp->state == LPFC_NVMET_STE_FREE) {
340                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
341                                 "6411 NVMET free, already free IO x%x: %d %d\n",
342                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
343         }
344         ctxp->state = LPFC_NVMET_STE_FREE;
345
346         spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
347         if (phba->sli4_hba.nvmet_io_wait_cnt) {
348                 list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
349                                  nvmebuf, struct rqb_dmabuf,
350                                  hbuf.list);
351                 phba->sli4_hba.nvmet_io_wait_cnt--;
352                 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
353                                        iflag);
354
355                 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
356                 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
357                 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
358                 payload = (uint32_t *)(nvmebuf->dbuf.virt);
359                 size = nvmebuf->bytes_recv;
360                 sid = sli4_sid_from_fc_hdr(fc_hdr);
361
362                 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
363                 ctxp->wqeq = NULL;
364                 ctxp->txrdy = NULL;
365                 ctxp->offset = 0;
366                 ctxp->phba = phba;
367                 ctxp->size = size;
368                 ctxp->oxid = oxid;
369                 ctxp->sid = sid;
370                 ctxp->state = LPFC_NVMET_STE_RCV;
371                 ctxp->entry_cnt = 1;
372                 ctxp->flag = 0;
373                 ctxp->ctxbuf = ctx_buf;
374                 ctxp->rqb_buffer = (void *)nvmebuf;
375                 spin_lock_init(&ctxp->ctxlock);
376
377 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
378                 if (ctxp->ts_cmd_nvme) {
379                         ctxp->ts_cmd_nvme = ktime_get_ns();
380                         ctxp->ts_nvme_data = 0;
381                         ctxp->ts_data_wqput = 0;
382                         ctxp->ts_isr_data = 0;
383                         ctxp->ts_data_nvme = 0;
384                         ctxp->ts_nvme_status = 0;
385                         ctxp->ts_status_wqput = 0;
386                         ctxp->ts_isr_status = 0;
387                         ctxp->ts_status_nvme = 0;
388                 }
389 #endif
390                 atomic_inc(&tgtp->rcv_fcp_cmd_in);
391                 /*
392                  * The calling sequence should be:
393                  * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
394                  * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
395                  * When we return from nvmet_fc_rcv_fcp_req, all relevant info
396                  * the NVME command / FC header is stored.
397                  * A buffer has already been reposted for this IO, so just free
398                  * the nvmebuf.
399                  */
400                 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
401                                           payload, size);
402
403                 /* Process FCP command */
404                 if (rc == 0) {
405                         ctxp->rqb_buffer = NULL;
406                         atomic_inc(&tgtp->rcv_fcp_cmd_out);
407                         nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
408                         return;
409                 }
410
411                 /* Processing of FCP command is deferred */
412                 if (rc == -EOVERFLOW) {
413                         lpfc_nvmeio_data(phba,
414                                          "NVMET RCV BUSY: xri x%x sz %d "
415                                          "from %06x\n",
416                                          oxid, size, sid);
417                         atomic_inc(&tgtp->rcv_fcp_cmd_out);
418                         return;
419                 }
420                 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
421                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
422                                 "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
423                                 ctxp->oxid, rc,
424                                 atomic_read(&tgtp->rcv_fcp_cmd_in),
425                                 atomic_read(&tgtp->rcv_fcp_cmd_out),
426                                 atomic_read(&tgtp->xmt_fcp_release));
427
428                 lpfc_nvmet_defer_release(phba, ctxp);
429                 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
430                 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
431                 return;
432         }
433         spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
434
435         /*
436          * Use the CPU context list, from the MRQ the IO was received on
437          * (ctxp->idx), to save context structure.
438          */
439         cpu = smp_processor_id();
440         infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
441         spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
442         list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
443         infop->nvmet_ctx_list_cnt++;
444         spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
445 #endif
446 }
447
448 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
449 static void
450 lpfc_nvmet_ktime(struct lpfc_hba *phba,
451                  struct lpfc_nvmet_rcv_ctx *ctxp)
452 {
453         uint64_t seg1, seg2, seg3, seg4, seg5;
454         uint64_t seg6, seg7, seg8, seg9, seg10;
455         uint64_t segsum;
456
457         if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
458             !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
459             !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
460             !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
461             !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
462                 return;
463
464         if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
465                 return;
466         if (ctxp->ts_isr_cmd  > ctxp->ts_cmd_nvme)
467                 return;
468         if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
469                 return;
470         if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
471                 return;
472         if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
473                 return;
474         if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
475                 return;
476         if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
477                 return;
478         if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
479                 return;
480         if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
481                 return;
482         if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
483                 return;
484         /*
485          * Segment 1 - Time from FCP command received by MSI-X ISR
486          * to FCP command is passed to NVME Layer.
487          * Segment 2 - Time from FCP command payload handed
488          * off to NVME Layer to Driver receives a Command op
489          * from NVME Layer.
490          * Segment 3 - Time from Driver receives a Command op
491          * from NVME Layer to Command is put on WQ.
492          * Segment 4 - Time from Driver WQ put is done
493          * to MSI-X ISR for Command cmpl.
494          * Segment 5 - Time from MSI-X ISR for Command cmpl to
495          * Command cmpl is passed to NVME Layer.
496          * Segment 6 - Time from Command cmpl is passed to NVME
497          * Layer to Driver receives a RSP op from NVME Layer.
498          * Segment 7 - Time from Driver receives a RSP op from
499          * NVME Layer to WQ put is done on TRSP FCP Status.
500          * Segment 8 - Time from Driver WQ put is done on TRSP
501          * FCP Status to MSI-X ISR for TRSP cmpl.
502          * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
503          * TRSP cmpl is passed to NVME Layer.
504          * Segment 10 - Time from FCP command received by
505          * MSI-X ISR to command is completed on wire.
506          * (Segments 1 thru 8) for READDATA / WRITEDATA
507          * (Segments 1 thru 4) for READDATA_RSP
508          */
509         seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
510         segsum = seg1;
511
512         seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
513         if (segsum > seg2)
514                 return;
515         seg2 -= segsum;
516         segsum += seg2;
517
518         seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
519         if (segsum > seg3)
520                 return;
521         seg3 -= segsum;
522         segsum += seg3;
523
524         seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
525         if (segsum > seg4)
526                 return;
527         seg4 -= segsum;
528         segsum += seg4;
529
530         seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
531         if (segsum > seg5)
532                 return;
533         seg5 -= segsum;
534         segsum += seg5;
535
536
537         /* For auto rsp commands seg6 thru seg10 will be 0 */
538         if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
539                 seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
540                 if (segsum > seg6)
541                         return;
542                 seg6 -= segsum;
543                 segsum += seg6;
544
545                 seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
546                 if (segsum > seg7)
547                         return;
548                 seg7 -= segsum;
549                 segsum += seg7;
550
551                 seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
552                 if (segsum > seg8)
553                         return;
554                 seg8 -= segsum;
555                 segsum += seg8;
556
557                 seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
558                 if (segsum > seg9)
559                         return;
560                 seg9 -= segsum;
561                 segsum += seg9;
562
563                 if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
564                         return;
565                 seg10 = (ctxp->ts_isr_status -
566                         ctxp->ts_isr_cmd);
567         } else {
568                 if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
569                         return;
570                 seg6 =  0;
571                 seg7 =  0;
572                 seg8 =  0;
573                 seg9 =  0;
574                 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
575         }
576
577         phba->ktime_seg1_total += seg1;
578         if (seg1 < phba->ktime_seg1_min)
579                 phba->ktime_seg1_min = seg1;
580         else if (seg1 > phba->ktime_seg1_max)
581                 phba->ktime_seg1_max = seg1;
582
583         phba->ktime_seg2_total += seg2;
584         if (seg2 < phba->ktime_seg2_min)
585                 phba->ktime_seg2_min = seg2;
586         else if (seg2 > phba->ktime_seg2_max)
587                 phba->ktime_seg2_max = seg2;
588
589         phba->ktime_seg3_total += seg3;
590         if (seg3 < phba->ktime_seg3_min)
591                 phba->ktime_seg3_min = seg3;
592         else if (seg3 > phba->ktime_seg3_max)
593                 phba->ktime_seg3_max = seg3;
594
595         phba->ktime_seg4_total += seg4;
596         if (seg4 < phba->ktime_seg4_min)
597                 phba->ktime_seg4_min = seg4;
598         else if (seg4 > phba->ktime_seg4_max)
599                 phba->ktime_seg4_max = seg4;
600
601         phba->ktime_seg5_total += seg5;
602         if (seg5 < phba->ktime_seg5_min)
603                 phba->ktime_seg5_min = seg5;
604         else if (seg5 > phba->ktime_seg5_max)
605                 phba->ktime_seg5_max = seg5;
606
607         phba->ktime_data_samples++;
608         if (!seg6)
609                 goto out;
610
611         phba->ktime_seg6_total += seg6;
612         if (seg6 < phba->ktime_seg6_min)
613                 phba->ktime_seg6_min = seg6;
614         else if (seg6 > phba->ktime_seg6_max)
615                 phba->ktime_seg6_max = seg6;
616
617         phba->ktime_seg7_total += seg7;
618         if (seg7 < phba->ktime_seg7_min)
619                 phba->ktime_seg7_min = seg7;
620         else if (seg7 > phba->ktime_seg7_max)
621                 phba->ktime_seg7_max = seg7;
622
623         phba->ktime_seg8_total += seg8;
624         if (seg8 < phba->ktime_seg8_min)
625                 phba->ktime_seg8_min = seg8;
626         else if (seg8 > phba->ktime_seg8_max)
627                 phba->ktime_seg8_max = seg8;
628
629         phba->ktime_seg9_total += seg9;
630         if (seg9 < phba->ktime_seg9_min)
631                 phba->ktime_seg9_min = seg9;
632         else if (seg9 > phba->ktime_seg9_max)
633                 phba->ktime_seg9_max = seg9;
634 out:
635         phba->ktime_seg10_total += seg10;
636         if (seg10 < phba->ktime_seg10_min)
637                 phba->ktime_seg10_min = seg10;
638         else if (seg10 > phba->ktime_seg10_max)
639                 phba->ktime_seg10_max = seg10;
640         phba->ktime_status_samples++;
641 }
642 #endif
643
644 /**
645  * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
646  * @phba: Pointer to HBA context object.
647  * @cmdwqe: Pointer to driver command WQE object.
648  * @wcqe: Pointer to driver response CQE object.
649  *
650  * The function is called from SLI ring event handler with no
651  * lock held. This function is the completion handler for NVME FCP commands
652  * The function frees memory resources used for the NVME commands.
653  **/
654 static void
655 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
656                           struct lpfc_wcqe_complete *wcqe)
657 {
658         struct lpfc_nvmet_tgtport *tgtp;
659         struct nvmefc_tgt_fcp_req *rsp;
660         struct lpfc_nvmet_rcv_ctx *ctxp;
661         uint32_t status, result, op, start_clean, logerr;
662 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
663         uint32_t id;
664 #endif
665
666         ctxp = cmdwqe->context2;
667         ctxp->flag &= ~LPFC_NVMET_IO_INP;
668
669         rsp = &ctxp->ctx.fcp_req;
670         op = rsp->op;
671
672         status = bf_get(lpfc_wcqe_c_status, wcqe);
673         result = wcqe->parameter;
674
675         if (phba->targetport)
676                 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
677         else
678                 tgtp = NULL;
679
680         lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
681                          ctxp->oxid, op, status);
682
683         if (status) {
684                 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
685                 rsp->transferred_length = 0;
686                 if (tgtp) {
687                         atomic_inc(&tgtp->xmt_fcp_rsp_error);
688                         if (result == IOERR_ABORT_REQUESTED)
689                                 atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
690                 }
691
692                 logerr = LOG_NVME_IOERR;
693
694                 /* pick up SLI4 exhange busy condition */
695                 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
696                         ctxp->flag |= LPFC_NVMET_XBUSY;
697                         logerr |= LOG_NVME_ABTS;
698                         if (tgtp)
699                                 atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
700
701                 } else {
702                         ctxp->flag &= ~LPFC_NVMET_XBUSY;
703                 }
704
705                 lpfc_printf_log(phba, KERN_INFO, logerr,
706                                 "6315 IO Error Cmpl xri x%x: %x/%x XBUSY:x%x\n",
707                                 ctxp->oxid, status, result, ctxp->flag);
708
709         } else {
710                 rsp->fcp_error = NVME_SC_SUCCESS;
711                 if (op == NVMET_FCOP_RSP)
712                         rsp->transferred_length = rsp->rsplen;
713                 else
714                         rsp->transferred_length = rsp->transfer_length;
715                 if (tgtp)
716                         atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
717         }
718
719         if ((op == NVMET_FCOP_READDATA_RSP) ||
720             (op == NVMET_FCOP_RSP)) {
721                 /* Sanity check */
722                 ctxp->state = LPFC_NVMET_STE_DONE;
723                 ctxp->entry_cnt++;
724
725 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
726                 if (ctxp->ts_cmd_nvme) {
727                         if (rsp->op == NVMET_FCOP_READDATA_RSP) {
728                                 ctxp->ts_isr_data =
729                                         cmdwqe->isr_timestamp;
730                                 ctxp->ts_data_nvme =
731                                         ktime_get_ns();
732                                 ctxp->ts_nvme_status =
733                                         ctxp->ts_data_nvme;
734                                 ctxp->ts_status_wqput =
735                                         ctxp->ts_data_nvme;
736                                 ctxp->ts_isr_status =
737                                         ctxp->ts_data_nvme;
738                                 ctxp->ts_status_nvme =
739                                         ctxp->ts_data_nvme;
740                         } else {
741                                 ctxp->ts_isr_status =
742                                         cmdwqe->isr_timestamp;
743                                 ctxp->ts_status_nvme =
744                                         ktime_get_ns();
745                         }
746                 }
747                 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
748                         id = smp_processor_id();
749                         if (ctxp->cpu != id)
750                                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
751                                                 "6703 CPU Check cmpl: "
752                                                 "cpu %d expect %d\n",
753                                                 id, ctxp->cpu);
754                         if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
755                                 phba->cpucheck_cmpl_io[id]++;
756                 }
757 #endif
758                 rsp->done(rsp);
759 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
760                 if (ctxp->ts_cmd_nvme)
761                         lpfc_nvmet_ktime(phba, ctxp);
762 #endif
763                 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
764         } else {
765                 ctxp->entry_cnt++;
766                 start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
767                 memset(((char *)cmdwqe) + start_clean, 0,
768                        (sizeof(struct lpfc_iocbq) - start_clean));
769 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
770                 if (ctxp->ts_cmd_nvme) {
771                         ctxp->ts_isr_data = cmdwqe->isr_timestamp;
772                         ctxp->ts_data_nvme = ktime_get_ns();
773                 }
774                 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
775                         id = smp_processor_id();
776                         if (ctxp->cpu != id)
777                                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
778                                                 "6704 CPU Check cmdcmpl: "
779                                                 "cpu %d expect %d\n",
780                                                 id, ctxp->cpu);
781                         if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
782                                 phba->cpucheck_ccmpl_io[id]++;
783                 }
784 #endif
785                 rsp->done(rsp);
786         }
787 }
788
789 static int
790 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
791                       struct nvmefc_tgt_ls_req *rsp)
792 {
793         struct lpfc_nvmet_rcv_ctx *ctxp =
794                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
795         struct lpfc_hba *phba = ctxp->phba;
796         struct hbq_dmabuf *nvmebuf =
797                 (struct hbq_dmabuf *)ctxp->rqb_buffer;
798         struct lpfc_iocbq *nvmewqeq;
799         struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
800         struct lpfc_dmabuf dmabuf;
801         struct ulp_bde64 bpl;
802         int rc;
803
804         if (phba->pport->load_flag & FC_UNLOADING)
805                 return -ENODEV;
806
807         if (phba->pport->load_flag & FC_UNLOADING)
808                 return -ENODEV;
809
810         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
811                         "6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
812
813         if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) ||
814             (ctxp->entry_cnt != 1)) {
815                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
816                                 "6412 NVMET LS rsp state mismatch "
817                                 "oxid x%x: %d %d\n",
818                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
819         }
820         ctxp->state = LPFC_NVMET_STE_LS_RSP;
821         ctxp->entry_cnt++;
822
823         nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
824                                       rsp->rsplen);
825         if (nvmewqeq == NULL) {
826                 atomic_inc(&nvmep->xmt_ls_drop);
827                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
828                                 "6150 LS Drop IO x%x: Prep\n",
829                                 ctxp->oxid);
830                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
831                 atomic_inc(&nvmep->xmt_ls_abort);
832                 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
833                                                 ctxp->sid, ctxp->oxid);
834                 return -ENOMEM;
835         }
836
837         /* Save numBdes for bpl2sgl */
838         nvmewqeq->rsvd2 = 1;
839         nvmewqeq->hba_wqidx = 0;
840         nvmewqeq->context3 = &dmabuf;
841         dmabuf.virt = &bpl;
842         bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
843         bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
844         bpl.tus.f.bdeSize = rsp->rsplen;
845         bpl.tus.f.bdeFlags = 0;
846         bpl.tus.w = le32_to_cpu(bpl.tus.w);
847
848         nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
849         nvmewqeq->iocb_cmpl = NULL;
850         nvmewqeq->context2 = ctxp;
851
852         lpfc_nvmeio_data(phba, "NVMET LS  RESP: xri x%x wqidx x%x len x%x\n",
853                          ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
854
855         rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq);
856         if (rc == WQE_SUCCESS) {
857                 /*
858                  * Okay to repost buffer here, but wait till cmpl
859                  * before freeing ctxp and iocbq.
860                  */
861                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
862                 ctxp->rqb_buffer = 0;
863                 atomic_inc(&nvmep->xmt_ls_rsp);
864                 return 0;
865         }
866         /* Give back resources */
867         atomic_inc(&nvmep->xmt_ls_drop);
868         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
869                         "6151 LS Drop IO x%x: Issue %d\n",
870                         ctxp->oxid, rc);
871
872         lpfc_nlp_put(nvmewqeq->context1);
873
874         lpfc_in_buf_free(phba, &nvmebuf->dbuf);
875         atomic_inc(&nvmep->xmt_ls_abort);
876         lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
877         return -ENXIO;
878 }
879
880 static int
881 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
882                       struct nvmefc_tgt_fcp_req *rsp)
883 {
884         struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
885         struct lpfc_nvmet_rcv_ctx *ctxp =
886                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
887         struct lpfc_hba *phba = ctxp->phba;
888         struct lpfc_queue *wq;
889         struct lpfc_iocbq *nvmewqeq;
890         struct lpfc_sli_ring *pring;
891         unsigned long iflags;
892         int rc;
893
894         if (phba->pport->load_flag & FC_UNLOADING) {
895                 rc = -ENODEV;
896                 goto aerr;
897         }
898
899         if (phba->pport->load_flag & FC_UNLOADING) {
900                 rc = -ENODEV;
901                 goto aerr;
902         }
903
904 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
905         if (ctxp->ts_cmd_nvme) {
906                 if (rsp->op == NVMET_FCOP_RSP)
907                         ctxp->ts_nvme_status = ktime_get_ns();
908                 else
909                         ctxp->ts_nvme_data = ktime_get_ns();
910         }
911         if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
912                 int id = smp_processor_id();
913                 ctxp->cpu = id;
914                 if (id < LPFC_CHECK_CPU_CNT)
915                         phba->cpucheck_xmt_io[id]++;
916                 if (rsp->hwqid != id) {
917                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
918                                         "6705 CPU Check OP: "
919                                         "cpu %d expect %d\n",
920                                         id, rsp->hwqid);
921                         ctxp->cpu = rsp->hwqid;
922                 }
923         }
924 #endif
925
926         /* Sanity check */
927         if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) ||
928             (ctxp->state == LPFC_NVMET_STE_ABORT)) {
929                 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
930                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
931                                 "6102 IO xri x%x aborted\n",
932                                 ctxp->oxid);
933                 rc = -ENXIO;
934                 goto aerr;
935         }
936
937         nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
938         if (nvmewqeq == NULL) {
939                 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
940                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
941                                 "6152 FCP Drop IO x%x: Prep\n",
942                                 ctxp->oxid);
943                 rc = -ENXIO;
944                 goto aerr;
945         }
946
947         nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
948         nvmewqeq->iocb_cmpl = NULL;
949         nvmewqeq->context2 = ctxp;
950         nvmewqeq->iocb_flag |=  LPFC_IO_NVMET;
951         ctxp->wqeq->hba_wqidx = rsp->hwqid;
952
953         lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
954                          ctxp->oxid, rsp->op, rsp->rsplen);
955
956         ctxp->flag |= LPFC_NVMET_IO_INP;
957         rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
958         if (rc == WQE_SUCCESS) {
959 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
960                 if (!ctxp->ts_cmd_nvme)
961                         return 0;
962                 if (rsp->op == NVMET_FCOP_RSP)
963                         ctxp->ts_status_wqput = ktime_get_ns();
964                 else
965                         ctxp->ts_data_wqput = ktime_get_ns();
966 #endif
967                 return 0;
968         }
969
970         if (rc == -EBUSY) {
971                 /*
972                  * WQ was full, so queue nvmewqeq to be sent after
973                  * WQE release CQE
974                  */
975                 ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
976                 wq = phba->sli4_hba.hdwq[rsp->hwqid].nvme_wq;
977                 pring = wq->pring;
978                 spin_lock_irqsave(&pring->ring_lock, iflags);
979                 list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
980                 wq->q_flag |= HBA_NVMET_WQFULL;
981                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
982                 atomic_inc(&lpfc_nvmep->defer_wqfull);
983                 return 0;
984         }
985
986         /* Give back resources */
987         atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
988         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
989                         "6153 FCP Drop IO x%x: Issue: %d\n",
990                         ctxp->oxid, rc);
991
992         ctxp->wqeq->hba_wqidx = 0;
993         nvmewqeq->context2 = NULL;
994         nvmewqeq->context3 = NULL;
995         rc = -EBUSY;
996 aerr:
997         return rc;
998 }
999
1000 static void
1001 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
1002 {
1003         struct lpfc_nvmet_tgtport *tport = targetport->private;
1004
1005         /* release any threads waiting for the unreg to complete */
1006         complete(&tport->tport_unreg_done);
1007 }
1008
1009 static void
1010 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
1011                          struct nvmefc_tgt_fcp_req *req)
1012 {
1013         struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1014         struct lpfc_nvmet_rcv_ctx *ctxp =
1015                 container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1016         struct lpfc_hba *phba = ctxp->phba;
1017         struct lpfc_queue *wq;
1018         unsigned long flags;
1019
1020         if (phba->pport->load_flag & FC_UNLOADING)
1021                 return;
1022
1023         if (phba->pport->load_flag & FC_UNLOADING)
1024                 return;
1025
1026         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1027                         "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
1028                         ctxp->oxid, ctxp->flag, ctxp->state);
1029
1030         lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
1031                          ctxp->oxid, ctxp->flag, ctxp->state);
1032
1033         atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
1034
1035         spin_lock_irqsave(&ctxp->ctxlock, flags);
1036         ctxp->state = LPFC_NVMET_STE_ABORT;
1037
1038         /* Since iaab/iaar are NOT set, we need to check
1039          * if the firmware is in process of aborting IO
1040          */
1041         if (ctxp->flag & LPFC_NVMET_XBUSY) {
1042                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1043                 return;
1044         }
1045         ctxp->flag |= LPFC_NVMET_ABORT_OP;
1046
1047         if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) {
1048                 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1049                                                  ctxp->oxid);
1050                 wq = phba->sli4_hba.hdwq[ctxp->wqeq->hba_wqidx].nvme_wq;
1051                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1052                 lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
1053                 return;
1054         }
1055
1056         /* An state of LPFC_NVMET_STE_RCV means we have just received
1057          * the NVME command and have not started processing it.
1058          * (by issuing any IO WQEs on this exchange yet)
1059          */
1060         if (ctxp->state == LPFC_NVMET_STE_RCV)
1061                 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1062                                                  ctxp->oxid);
1063         else
1064                 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1065                                                ctxp->oxid);
1066         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1067 }
1068
1069 static void
1070 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
1071                            struct nvmefc_tgt_fcp_req *rsp)
1072 {
1073         struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1074         struct lpfc_nvmet_rcv_ctx *ctxp =
1075                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1076         struct lpfc_hba *phba = ctxp->phba;
1077         unsigned long flags;
1078         bool aborting = false;
1079
1080         if (ctxp->state != LPFC_NVMET_STE_DONE &&
1081             ctxp->state != LPFC_NVMET_STE_ABORT) {
1082                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1083                                 "6413 NVMET release bad state %d %d oxid x%x\n",
1084                                 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1085         }
1086
1087         spin_lock_irqsave(&ctxp->ctxlock, flags);
1088         if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
1089             (ctxp->flag & LPFC_NVMET_XBUSY)) {
1090                 aborting = true;
1091                 /* let the abort path do the real release */
1092                 lpfc_nvmet_defer_release(phba, ctxp);
1093         }
1094         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1095
1096         lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
1097                          ctxp->state, aborting);
1098
1099         atomic_inc(&lpfc_nvmep->xmt_fcp_release);
1100
1101         if (aborting)
1102                 return;
1103
1104         lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1105 }
1106
1107 static void
1108 lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
1109                      struct nvmefc_tgt_fcp_req *rsp)
1110 {
1111         struct lpfc_nvmet_tgtport *tgtp;
1112         struct lpfc_nvmet_rcv_ctx *ctxp =
1113                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1114         struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
1115         struct lpfc_hba *phba = ctxp->phba;
1116
1117         lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
1118                          ctxp->oxid, ctxp->size, smp_processor_id());
1119
1120         if (!nvmebuf) {
1121                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1122                                 "6425 Defer rcv: no buffer xri x%x: "
1123                                 "flg %x ste %x\n",
1124                                 ctxp->oxid, ctxp->flag, ctxp->state);
1125                 return;
1126         }
1127
1128         tgtp = phba->targetport->private;
1129         if (tgtp)
1130                 atomic_inc(&tgtp->rcv_fcp_cmd_defer);
1131
1132         /* Free the nvmebuf since a new buffer already replaced it */
1133         nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1134 }
1135
1136 static struct nvmet_fc_target_template lpfc_tgttemplate = {
1137         .targetport_delete = lpfc_nvmet_targetport_delete,
1138         .xmt_ls_rsp     = lpfc_nvmet_xmt_ls_rsp,
1139         .fcp_op         = lpfc_nvmet_xmt_fcp_op,
1140         .fcp_abort      = lpfc_nvmet_xmt_fcp_abort,
1141         .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
1142         .defer_rcv      = lpfc_nvmet_defer_rcv,
1143
1144         .max_hw_queues  = 1,
1145         .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1146         .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1147         .dma_boundary = 0xFFFFFFFF,
1148
1149         /* optional features */
1150         .target_features = 0,
1151         /* sizes of additional private data for data structures */
1152         .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
1153 };
1154
1155 static void
1156 __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
1157                 struct lpfc_nvmet_ctx_info *infop)
1158 {
1159         struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
1160         unsigned long flags;
1161
1162         spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
1163         list_for_each_entry_safe(ctx_buf, next_ctx_buf,
1164                                 &infop->nvmet_ctx_list, list) {
1165                 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1166                 list_del_init(&ctx_buf->list);
1167                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1168
1169                 __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
1170                 ctx_buf->sglq->state = SGL_FREED;
1171                 ctx_buf->sglq->ndlp = NULL;
1172
1173                 spin_lock(&phba->sli4_hba.sgl_list_lock);
1174                 list_add_tail(&ctx_buf->sglq->list,
1175                                 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1176                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1177
1178                 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1179                 kfree(ctx_buf->context);
1180         }
1181         spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
1182 }
1183
1184 static void
1185 lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
1186 {
1187         struct lpfc_nvmet_ctx_info *infop;
1188         int i, j;
1189
1190         /* The first context list, MRQ 0 CPU 0 */
1191         infop = phba->sli4_hba.nvmet_ctx_info;
1192         if (!infop)
1193                 return;
1194
1195         /* Cycle the the entire CPU context list for every MRQ */
1196         for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
1197                 for (j = 0; j < phba->sli4_hba.num_present_cpu; j++) {
1198                         __lpfc_nvmet_clean_io_for_cpu(phba, infop);
1199                         infop++; /* next */
1200                 }
1201         }
1202         kfree(phba->sli4_hba.nvmet_ctx_info);
1203         phba->sli4_hba.nvmet_ctx_info = NULL;
1204 }
1205
1206 static int
1207 lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1208 {
1209         struct lpfc_nvmet_ctxbuf *ctx_buf;
1210         struct lpfc_iocbq *nvmewqe;
1211         union lpfc_wqe128 *wqe;
1212         struct lpfc_nvmet_ctx_info *last_infop;
1213         struct lpfc_nvmet_ctx_info *infop;
1214         int i, j, idx;
1215
1216         lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1217                         "6403 Allocate NVMET resources for %d XRIs\n",
1218                         phba->sli4_hba.nvmet_xri_cnt);
1219
1220         phba->sli4_hba.nvmet_ctx_info = kcalloc(
1221                 phba->sli4_hba.num_present_cpu * phba->cfg_nvmet_mrq,
1222                 sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
1223         if (!phba->sli4_hba.nvmet_ctx_info) {
1224                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1225                                 "6419 Failed allocate memory for "
1226                                 "nvmet context lists\n");
1227                 return -ENOMEM;
1228         }
1229
1230         /*
1231          * Assuming X CPUs in the system, and Y MRQs, allocate some
1232          * lpfc_nvmet_ctx_info structures as follows:
1233          *
1234          * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
1235          * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
1236          * ...
1237          * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
1238          *
1239          * Each line represents a MRQ "silo" containing an entry for
1240          * every CPU.
1241          *
1242          * MRQ X is initially assumed to be associated with CPU X, thus
1243          * contexts are initially distributed across all MRQs using
1244          * the MRQ index (N) as follows cpuN/mrqN. When contexts are
1245          * freed, the are freed to the MRQ silo based on the CPU number
1246          * of the IO completion. Thus a context that was allocated for MRQ A
1247          * whose IO completed on CPU B will be freed to cpuB/mrqA.
1248          */
1249         infop = phba->sli4_hba.nvmet_ctx_info;
1250         for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1251                 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1252                         INIT_LIST_HEAD(&infop->nvmet_ctx_list);
1253                         spin_lock_init(&infop->nvmet_ctx_list_lock);
1254                         infop->nvmet_ctx_list_cnt = 0;
1255                         infop++;
1256                 }
1257         }
1258
1259         /*
1260          * Setup the next CPU context info ptr for each MRQ.
1261          * MRQ 0 will cycle thru CPUs 0 - X separately from
1262          * MRQ 1 cycling thru CPUs 0 - X, and so on.
1263          */
1264         for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1265                 last_infop = lpfc_get_ctx_list(phba, 0, j);
1266                 for (i = phba->sli4_hba.num_present_cpu - 1;  i >= 0; i--) {
1267                         infop = lpfc_get_ctx_list(phba, i, j);
1268                         infop->nvmet_ctx_next_cpu = last_infop;
1269                         last_infop = infop;
1270                 }
1271         }
1272
1273         /* For all nvmet xris, allocate resources needed to process a
1274          * received command on a per xri basis.
1275          */
1276         idx = 0;
1277         for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
1278                 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
1279                 if (!ctx_buf) {
1280                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1281                                         "6404 Ran out of memory for NVMET\n");
1282                         return -ENOMEM;
1283                 }
1284
1285                 ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
1286                                            GFP_KERNEL);
1287                 if (!ctx_buf->context) {
1288                         kfree(ctx_buf);
1289                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1290                                         "6405 Ran out of NVMET "
1291                                         "context memory\n");
1292                         return -ENOMEM;
1293                 }
1294                 ctx_buf->context->ctxbuf = ctx_buf;
1295                 ctx_buf->context->state = LPFC_NVMET_STE_FREE;
1296
1297                 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
1298                 if (!ctx_buf->iocbq) {
1299                         kfree(ctx_buf->context);
1300                         kfree(ctx_buf);
1301                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1302                                         "6406 Ran out of NVMET iocb/WQEs\n");
1303                         return -ENOMEM;
1304                 }
1305                 ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
1306                 nvmewqe = ctx_buf->iocbq;
1307                 wqe = &nvmewqe->wqe;
1308
1309                 /* Initialize WQE */
1310                 memset(wqe, 0, sizeof(union lpfc_wqe));
1311
1312                 ctx_buf->iocbq->context1 = NULL;
1313                 spin_lock(&phba->sli4_hba.sgl_list_lock);
1314                 ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
1315                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1316                 if (!ctx_buf->sglq) {
1317                         lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1318                         kfree(ctx_buf->context);
1319                         kfree(ctx_buf);
1320                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1321                                         "6407 Ran out of NVMET XRIs\n");
1322                         return -ENOMEM;
1323                 }
1324
1325                 /*
1326                  * Add ctx to MRQidx context list. Our initial assumption
1327                  * is MRQidx will be associated with CPUidx. This association
1328                  * can change on the fly.
1329                  */
1330                 infop = lpfc_get_ctx_list(phba, idx, idx);
1331                 spin_lock(&infop->nvmet_ctx_list_lock);
1332                 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
1333                 infop->nvmet_ctx_list_cnt++;
1334                 spin_unlock(&infop->nvmet_ctx_list_lock);
1335
1336                 /* Spread ctx structures evenly across all MRQs */
1337                 idx++;
1338                 if (idx >= phba->cfg_nvmet_mrq)
1339                         idx = 0;
1340         }
1341
1342         for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1343                 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1344                         infop = lpfc_get_ctx_list(phba, i, j);
1345                         lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1346                                         "6408 TOTAL NVMET ctx for CPU %d "
1347                                         "MRQ %d: cnt %d nextcpu %p\n",
1348                                         i, j, infop->nvmet_ctx_list_cnt,
1349                                         infop->nvmet_ctx_next_cpu);
1350                 }
1351         }
1352         return 0;
1353 }
1354
1355 int
1356 lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1357 {
1358         struct lpfc_vport  *vport = phba->pport;
1359         struct lpfc_nvmet_tgtport *tgtp;
1360         struct nvmet_fc_port_info pinfo;
1361         int error;
1362
1363         if (phba->targetport)
1364                 return 0;
1365
1366         error = lpfc_nvmet_setup_io_context(phba);
1367         if (error)
1368                 return error;
1369
1370         memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
1371         pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
1372         pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
1373         pinfo.port_id = vport->fc_myDID;
1374
1375         /* We need to tell the transport layer + 1 because it takes page
1376          * alignment into account. When space for the SGL is allocated we
1377          * allocate + 3, one for cmd, one for rsp and one for this alignment
1378          */
1379         lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
1380         lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue;
1381         lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
1382
1383 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1384         error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
1385                                              &phba->pcidev->dev,
1386                                              &phba->targetport);
1387 #else
1388         error = -ENOENT;
1389 #endif
1390         if (error) {
1391                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1392                                 "6025 Cannot register NVME targetport x%x: "
1393                                 "portnm %llx nodenm %llx segs %d qs %d\n",
1394                                 error,
1395                                 pinfo.port_name, pinfo.node_name,
1396                                 lpfc_tgttemplate.max_sgl_segments,
1397                                 lpfc_tgttemplate.max_hw_queues);
1398                 phba->targetport = NULL;
1399                 phba->nvmet_support = 0;
1400
1401                 lpfc_nvmet_cleanup_io_context(phba);
1402
1403         } else {
1404                 tgtp = (struct lpfc_nvmet_tgtport *)
1405                         phba->targetport->private;
1406                 tgtp->phba = phba;
1407
1408                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1409                                 "6026 Registered NVME "
1410                                 "targetport: %p, private %p "
1411                                 "portnm %llx nodenm %llx segs %d qs %d\n",
1412                                 phba->targetport, tgtp,
1413                                 pinfo.port_name, pinfo.node_name,
1414                                 lpfc_tgttemplate.max_sgl_segments,
1415                                 lpfc_tgttemplate.max_hw_queues);
1416
1417                 atomic_set(&tgtp->rcv_ls_req_in, 0);
1418                 atomic_set(&tgtp->rcv_ls_req_out, 0);
1419                 atomic_set(&tgtp->rcv_ls_req_drop, 0);
1420                 atomic_set(&tgtp->xmt_ls_abort, 0);
1421                 atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
1422                 atomic_set(&tgtp->xmt_ls_rsp, 0);
1423                 atomic_set(&tgtp->xmt_ls_drop, 0);
1424                 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
1425                 atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
1426                 atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
1427                 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
1428                 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1429                 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1430                 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
1431                 atomic_set(&tgtp->xmt_fcp_drop, 0);
1432                 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1433                 atomic_set(&tgtp->xmt_fcp_read, 0);
1434                 atomic_set(&tgtp->xmt_fcp_write, 0);
1435                 atomic_set(&tgtp->xmt_fcp_rsp, 0);
1436                 atomic_set(&tgtp->xmt_fcp_release, 0);
1437                 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1438                 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1439                 atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
1440                 atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
1441                 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1442                 atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
1443                 atomic_set(&tgtp->xmt_fcp_abort, 0);
1444                 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1445                 atomic_set(&tgtp->xmt_abort_unsol, 0);
1446                 atomic_set(&tgtp->xmt_abort_sol, 0);
1447                 atomic_set(&tgtp->xmt_abort_rsp, 0);
1448                 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
1449                 atomic_set(&tgtp->defer_ctx, 0);
1450                 atomic_set(&tgtp->defer_fod, 0);
1451                 atomic_set(&tgtp->defer_wqfull, 0);
1452         }
1453         return error;
1454 }
1455
1456 int
1457 lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
1458 {
1459         struct lpfc_vport  *vport = phba->pport;
1460
1461         if (!phba->targetport)
1462                 return 0;
1463
1464         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1465                          "6007 Update NVMET port %p did x%x\n",
1466                          phba->targetport, vport->fc_myDID);
1467
1468         phba->targetport->port_id = vport->fc_myDID;
1469         return 0;
1470 }
1471
1472 /**
1473  * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
1474  * @phba: pointer to lpfc hba data structure.
1475  * @axri: pointer to the nvmet xri abort wcqe structure.
1476  *
1477  * This routine is invoked by the worker thread to process a SLI4 fast-path
1478  * NVMET aborted xri.
1479  **/
1480 void
1481 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1482                             struct sli4_wcqe_xri_aborted *axri)
1483 {
1484         uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
1485         uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1486         struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1487         struct lpfc_nvmet_tgtport *tgtp;
1488         struct lpfc_nodelist *ndlp;
1489         unsigned long iflag = 0;
1490         int rrq_empty = 0;
1491         bool released = false;
1492
1493         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1494                         "6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
1495
1496         if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1497                 return;
1498
1499         if (phba->targetport) {
1500                 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1501                 atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
1502         }
1503
1504         spin_lock_irqsave(&phba->hbalock, iflag);
1505         spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1506         list_for_each_entry_safe(ctxp, next_ctxp,
1507                                  &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1508                                  list) {
1509                 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1510                         continue;
1511
1512                 /* Check if we already received a free context call
1513                  * and we have completed processing an abort situation.
1514                  */
1515                 if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
1516                     !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
1517                         list_del(&ctxp->list);
1518                         released = true;
1519                 }
1520                 ctxp->flag &= ~LPFC_NVMET_XBUSY;
1521                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1522
1523                 rrq_empty = list_empty(&phba->active_rrq_list);
1524                 spin_unlock_irqrestore(&phba->hbalock, iflag);
1525                 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1526                 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1527                     (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
1528                      ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
1529                         lpfc_set_rrq_active(phba, ndlp,
1530                                 ctxp->ctxbuf->sglq->sli4_lxritag,
1531                                 rxid, 1);
1532                         lpfc_sli4_abts_err_handler(phba, ndlp, axri);
1533                 }
1534
1535                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1536                                 "6318 XB aborted oxid %x flg x%x (%x)\n",
1537                                 ctxp->oxid, ctxp->flag, released);
1538                 if (released)
1539                         lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1540
1541                 if (rrq_empty)
1542                         lpfc_worker_wake_up(phba);
1543                 return;
1544         }
1545         spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1546         spin_unlock_irqrestore(&phba->hbalock, iflag);
1547 }
1548
1549 int
1550 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
1551                            struct fc_frame_header *fc_hdr)
1552
1553 {
1554 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1555         struct lpfc_hba *phba = vport->phba;
1556         struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1557         struct nvmefc_tgt_fcp_req *rsp;
1558         uint16_t xri;
1559         unsigned long iflag = 0;
1560
1561         xri = be16_to_cpu(fc_hdr->fh_ox_id);
1562
1563         spin_lock_irqsave(&phba->hbalock, iflag);
1564         spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1565         list_for_each_entry_safe(ctxp, next_ctxp,
1566                                  &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1567                                  list) {
1568                 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1569                         continue;
1570
1571                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1572                 spin_unlock_irqrestore(&phba->hbalock, iflag);
1573
1574                 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1575                 ctxp->flag |= LPFC_NVMET_ABTS_RCV;
1576                 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1577
1578                 lpfc_nvmeio_data(phba,
1579                         "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1580                         xri, smp_processor_id(), 0);
1581
1582                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1583                                 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
1584
1585                 rsp = &ctxp->ctx.fcp_req;
1586                 nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
1587
1588                 /* Respond with BA_ACC accordingly */
1589                 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1590                 return 0;
1591         }
1592         spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1593         spin_unlock_irqrestore(&phba->hbalock, iflag);
1594
1595         lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1596                          xri, smp_processor_id(), 1);
1597
1598         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1599                         "6320 NVMET Rcv ABTS:rjt xri x%x\n", xri);
1600
1601         /* Respond with BA_RJT accordingly */
1602         lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
1603 #endif
1604         return 0;
1605 }
1606
1607 static void
1608 lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
1609                         struct lpfc_nvmet_rcv_ctx *ctxp)
1610 {
1611         struct lpfc_sli_ring *pring;
1612         struct lpfc_iocbq *nvmewqeq;
1613         struct lpfc_iocbq *next_nvmewqeq;
1614         unsigned long iflags;
1615         struct lpfc_wcqe_complete wcqe;
1616         struct lpfc_wcqe_complete *wcqep;
1617
1618         pring = wq->pring;
1619         wcqep = &wcqe;
1620
1621         /* Fake an ABORT error code back to cmpl routine */
1622         memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete));
1623         bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT);
1624         wcqep->parameter = IOERR_ABORT_REQUESTED;
1625
1626         spin_lock_irqsave(&pring->ring_lock, iflags);
1627         list_for_each_entry_safe(nvmewqeq, next_nvmewqeq,
1628                                  &wq->wqfull_list, list) {
1629                 if (ctxp) {
1630                         /* Checking for a specific IO to flush */
1631                         if (nvmewqeq->context2 == ctxp) {
1632                                 list_del(&nvmewqeq->list);
1633                                 spin_unlock_irqrestore(&pring->ring_lock,
1634                                                        iflags);
1635                                 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
1636                                                           wcqep);
1637                                 return;
1638                         }
1639                         continue;
1640                 } else {
1641                         /* Flush all IOs */
1642                         list_del(&nvmewqeq->list);
1643                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
1644                         lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep);
1645                         spin_lock_irqsave(&pring->ring_lock, iflags);
1646                 }
1647         }
1648         if (!ctxp)
1649                 wq->q_flag &= ~HBA_NVMET_WQFULL;
1650         spin_unlock_irqrestore(&pring->ring_lock, iflags);
1651 }
1652
1653 void
1654 lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
1655                           struct lpfc_queue *wq)
1656 {
1657 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1658         struct lpfc_sli_ring *pring;
1659         struct lpfc_iocbq *nvmewqeq;
1660         unsigned long iflags;
1661         int rc;
1662
1663         /*
1664          * Some WQE slots are available, so try to re-issue anything
1665          * on the WQ wqfull_list.
1666          */
1667         pring = wq->pring;
1668         spin_lock_irqsave(&pring->ring_lock, iflags);
1669         while (!list_empty(&wq->wqfull_list)) {
1670                 list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
1671                                  list);
1672                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1673                 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
1674                 spin_lock_irqsave(&pring->ring_lock, iflags);
1675                 if (rc == -EBUSY) {
1676                         /* WQ was full again, so put it back on the list */
1677                         list_add(&nvmewqeq->list, &wq->wqfull_list);
1678                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
1679                         return;
1680                 }
1681         }
1682         wq->q_flag &= ~HBA_NVMET_WQFULL;
1683         spin_unlock_irqrestore(&pring->ring_lock, iflags);
1684
1685 #endif
1686 }
1687
1688 void
1689 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
1690 {
1691 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1692         struct lpfc_nvmet_tgtport *tgtp;
1693         struct lpfc_queue *wq;
1694         uint32_t qidx;
1695
1696         if (phba->nvmet_support == 0)
1697                 return;
1698         if (phba->targetport) {
1699                 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1700                 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
1701                         wq = phba->sli4_hba.hdwq[qidx].nvme_wq;
1702                         lpfc_nvmet_wqfull_flush(phba, wq, NULL);
1703                 }
1704                 init_completion(&tgtp->tport_unreg_done);
1705                 nvmet_fc_unregister_targetport(phba->targetport);
1706                 wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
1707                 lpfc_nvmet_cleanup_io_context(phba);
1708         }
1709         phba->targetport = NULL;
1710 #endif
1711 }
1712
1713 /**
1714  * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
1715  * @phba: pointer to lpfc hba data structure.
1716  * @pring: pointer to a SLI ring.
1717  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1718  *
1719  * This routine is used for processing the WQE associated with a unsolicited
1720  * event. It first determines whether there is an existing ndlp that matches
1721  * the DID from the unsolicited WQE. If not, it will create a new one with
1722  * the DID from the unsolicited WQE. The ELS command from the unsolicited
1723  * WQE is then used to invoke the proper routine and to set up proper state
1724  * of the discovery state machine.
1725  **/
1726 static void
1727 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1728                            struct hbq_dmabuf *nvmebuf)
1729 {
1730 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1731         struct lpfc_nvmet_tgtport *tgtp;
1732         struct fc_frame_header *fc_hdr;
1733         struct lpfc_nvmet_rcv_ctx *ctxp;
1734         uint32_t *payload;
1735         uint32_t size, oxid, sid, rc;
1736
1737         fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1738         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1739
1740         if (!phba->targetport) {
1741                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1742                                 "6154 LS Drop IO x%x\n", oxid);
1743                 oxid = 0;
1744                 size = 0;
1745                 sid = 0;
1746                 ctxp = NULL;
1747                 goto dropit;
1748         }
1749
1750         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1751         payload = (uint32_t *)(nvmebuf->dbuf.virt);
1752         size = bf_get(lpfc_rcqe_length,  &nvmebuf->cq_event.cqe.rcqe_cmpl);
1753         sid = sli4_sid_from_fc_hdr(fc_hdr);
1754
1755         ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
1756         if (ctxp == NULL) {
1757                 atomic_inc(&tgtp->rcv_ls_req_drop);
1758                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1759                                 "6155 LS Drop IO x%x: Alloc\n",
1760                                 oxid);
1761 dropit:
1762                 lpfc_nvmeio_data(phba, "NVMET LS  DROP: "
1763                                  "xri x%x sz %d from %06x\n",
1764                                  oxid, size, sid);
1765                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1766                 return;
1767         }
1768         ctxp->phba = phba;
1769         ctxp->size = size;
1770         ctxp->oxid = oxid;
1771         ctxp->sid = sid;
1772         ctxp->wqeq = NULL;
1773         ctxp->state = LPFC_NVMET_STE_LS_RCV;
1774         ctxp->entry_cnt = 1;
1775         ctxp->rqb_buffer = (void *)nvmebuf;
1776
1777         lpfc_nvmeio_data(phba, "NVMET LS   RCV: xri x%x sz %d from %06x\n",
1778                          oxid, size, sid);
1779         /*
1780          * The calling sequence should be:
1781          * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
1782          * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
1783          */
1784         atomic_inc(&tgtp->rcv_ls_req_in);
1785         rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
1786                                  payload, size);
1787
1788         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1789                         "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
1790                         "%08x %08x %08x\n", size, rc,
1791                         *payload, *(payload+1), *(payload+2),
1792                         *(payload+3), *(payload+4), *(payload+5));
1793
1794         if (rc == 0) {
1795                 atomic_inc(&tgtp->rcv_ls_req_out);
1796                 return;
1797         }
1798
1799         lpfc_nvmeio_data(phba, "NVMET LS  DROP: xri x%x sz %d from %06x\n",
1800                          oxid, size, sid);
1801
1802         atomic_inc(&tgtp->rcv_ls_req_drop);
1803         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1804                         "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
1805                         ctxp->oxid, rc);
1806
1807         /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
1808         lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1809
1810         atomic_inc(&tgtp->xmt_ls_abort);
1811         lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
1812 #endif
1813 }
1814
1815 static struct lpfc_nvmet_ctxbuf *
1816 lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
1817                              struct lpfc_nvmet_ctx_info *current_infop)
1818 {
1819 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1820         struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
1821         struct lpfc_nvmet_ctx_info *get_infop;
1822         int i;
1823
1824         /*
1825          * The current_infop for the MRQ a NVME command IU was received
1826          * on is empty. Our goal is to replenish this MRQs context
1827          * list from a another CPUs.
1828          *
1829          * First we need to pick a context list to start looking on.
1830          * nvmet_ctx_start_cpu has available context the last time
1831          * we needed to replenish this CPU where nvmet_ctx_next_cpu
1832          * is just the next sequential CPU for this MRQ.
1833          */
1834         if (current_infop->nvmet_ctx_start_cpu)
1835                 get_infop = current_infop->nvmet_ctx_start_cpu;
1836         else
1837                 get_infop = current_infop->nvmet_ctx_next_cpu;
1838
1839         for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1840                 if (get_infop == current_infop) {
1841                         get_infop = get_infop->nvmet_ctx_next_cpu;
1842                         continue;
1843                 }
1844                 spin_lock(&get_infop->nvmet_ctx_list_lock);
1845
1846                 /* Just take the entire context list, if there are any */
1847                 if (get_infop->nvmet_ctx_list_cnt) {
1848                         list_splice_init(&get_infop->nvmet_ctx_list,
1849                                     &current_infop->nvmet_ctx_list);
1850                         current_infop->nvmet_ctx_list_cnt =
1851                                 get_infop->nvmet_ctx_list_cnt - 1;
1852                         get_infop->nvmet_ctx_list_cnt = 0;
1853                         spin_unlock(&get_infop->nvmet_ctx_list_lock);
1854
1855                         current_infop->nvmet_ctx_start_cpu = get_infop;
1856                         list_remove_head(&current_infop->nvmet_ctx_list,
1857                                          ctx_buf, struct lpfc_nvmet_ctxbuf,
1858                                          list);
1859                         return ctx_buf;
1860                 }
1861
1862                 /* Otherwise, move on to the next CPU for this MRQ */
1863                 spin_unlock(&get_infop->nvmet_ctx_list_lock);
1864                 get_infop = get_infop->nvmet_ctx_next_cpu;
1865         }
1866
1867 #endif
1868         /* Nothing found, all contexts for the MRQ are in-flight */
1869         return NULL;
1870 }
1871
1872 /**
1873  * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
1874  * @phba: pointer to lpfc hba data structure.
1875  * @idx: relative index of MRQ vector
1876  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1877  *
1878  * This routine is used for processing the WQE associated with a unsolicited
1879  * event. It first determines whether there is an existing ndlp that matches
1880  * the DID from the unsolicited WQE. If not, it will create a new one with
1881  * the DID from the unsolicited WQE. The ELS command from the unsolicited
1882  * WQE is then used to invoke the proper routine and to set up proper state
1883  * of the discovery state machine.
1884  **/
1885 static void
1886 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1887                             uint32_t idx,
1888                             struct rqb_dmabuf *nvmebuf,
1889                             uint64_t isr_timestamp)
1890 {
1891         struct lpfc_nvmet_rcv_ctx *ctxp;
1892         struct lpfc_nvmet_tgtport *tgtp;
1893         struct fc_frame_header *fc_hdr;
1894         struct lpfc_nvmet_ctxbuf *ctx_buf;
1895         struct lpfc_nvmet_ctx_info *current_infop;
1896         uint32_t *payload;
1897         uint32_t size, oxid, sid, rc, qno;
1898         unsigned long iflag;
1899         int current_cpu;
1900 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1901         uint32_t id;
1902 #endif
1903
1904         if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
1905                 return;
1906
1907         ctx_buf = NULL;
1908         if (!nvmebuf || !phba->targetport) {
1909                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1910                                 "6157 NVMET FCP Drop IO\n");
1911                 oxid = 0;
1912                 size = 0;
1913                 sid = 0;
1914                 ctxp = NULL;
1915                 goto dropit;
1916         }
1917
1918         /*
1919          * Get a pointer to the context list for this MRQ based on
1920          * the CPU this MRQ IRQ is associated with. If the CPU association
1921          * changes from our initial assumption, the context list could
1922          * be empty, thus it would need to be replenished with the
1923          * context list from another CPU for this MRQ.
1924          */
1925         current_cpu = smp_processor_id();
1926         current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
1927         spin_lock_irqsave(&current_infop->nvmet_ctx_list_lock, iflag);
1928         if (current_infop->nvmet_ctx_list_cnt) {
1929                 list_remove_head(&current_infop->nvmet_ctx_list,
1930                                  ctx_buf, struct lpfc_nvmet_ctxbuf, list);
1931                 current_infop->nvmet_ctx_list_cnt--;
1932         } else {
1933                 ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
1934         }
1935         spin_unlock_irqrestore(&current_infop->nvmet_ctx_list_lock, iflag);
1936
1937         fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1938         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1939         size = nvmebuf->bytes_recv;
1940
1941 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1942         if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
1943                 id = smp_processor_id();
1944                 if (id < LPFC_CHECK_CPU_CNT)
1945                         phba->cpucheck_rcv_io[id]++;
1946         }
1947 #endif
1948
1949         lpfc_nvmeio_data(phba, "NVMET FCP  RCV: xri x%x sz %d CPU %02x\n",
1950                          oxid, size, smp_processor_id());
1951
1952         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1953
1954         if (!ctx_buf) {
1955                 /* Queue this NVME IO to process later */
1956                 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
1957                 list_add_tail(&nvmebuf->hbuf.list,
1958                               &phba->sli4_hba.lpfc_nvmet_io_wait_list);
1959                 phba->sli4_hba.nvmet_io_wait_cnt++;
1960                 phba->sli4_hba.nvmet_io_wait_total++;
1961                 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
1962                                        iflag);
1963
1964                 /* Post a brand new DMA buffer to RQ */
1965                 qno = nvmebuf->idx;
1966                 lpfc_post_rq_buffer(
1967                         phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
1968                         phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
1969
1970                 atomic_inc(&tgtp->defer_ctx);
1971                 return;
1972         }
1973
1974         payload = (uint32_t *)(nvmebuf->dbuf.virt);
1975         sid = sli4_sid_from_fc_hdr(fc_hdr);
1976
1977         ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
1978         if (ctxp->state != LPFC_NVMET_STE_FREE) {
1979                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1980                                 "6414 NVMET Context corrupt %d %d oxid x%x\n",
1981                                 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1982         }
1983         ctxp->wqeq = NULL;
1984         ctxp->txrdy = NULL;
1985         ctxp->offset = 0;
1986         ctxp->phba = phba;
1987         ctxp->size = size;
1988         ctxp->oxid = oxid;
1989         ctxp->sid = sid;
1990         ctxp->idx = idx;
1991         ctxp->state = LPFC_NVMET_STE_RCV;
1992         ctxp->entry_cnt = 1;
1993         ctxp->flag = 0;
1994         ctxp->ctxbuf = ctx_buf;
1995         ctxp->rqb_buffer = (void *)nvmebuf;
1996         spin_lock_init(&ctxp->ctxlock);
1997
1998 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1999         if (isr_timestamp) {
2000                 ctxp->ts_isr_cmd = isr_timestamp;
2001                 ctxp->ts_cmd_nvme = ktime_get_ns();
2002                 ctxp->ts_nvme_data = 0;
2003                 ctxp->ts_data_wqput = 0;
2004                 ctxp->ts_isr_data = 0;
2005                 ctxp->ts_data_nvme = 0;
2006                 ctxp->ts_nvme_status = 0;
2007                 ctxp->ts_status_wqput = 0;
2008                 ctxp->ts_isr_status = 0;
2009                 ctxp->ts_status_nvme = 0;
2010         } else {
2011                 ctxp->ts_cmd_nvme = 0;
2012         }
2013 #endif
2014
2015         atomic_inc(&tgtp->rcv_fcp_cmd_in);
2016         /*
2017          * The calling sequence should be:
2018          * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
2019          * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
2020          * When we return from nvmet_fc_rcv_fcp_req, all relevant info in
2021          * the NVME command / FC header is stored, so we are free to repost
2022          * the buffer.
2023          */
2024         rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
2025                                   payload, size);
2026
2027         /* Process FCP command */
2028         if (rc == 0) {
2029                 ctxp->rqb_buffer = NULL;
2030                 atomic_inc(&tgtp->rcv_fcp_cmd_out);
2031                 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2032                 return;
2033         }
2034
2035         /* Processing of FCP command is deferred */
2036         if (rc == -EOVERFLOW) {
2037                 /*
2038                  * Post a brand new DMA buffer to RQ and defer
2039                  * freeing rcv buffer till .defer_rcv callback
2040                  */
2041                 qno = nvmebuf->idx;
2042                 lpfc_post_rq_buffer(
2043                         phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2044                         phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2045
2046                 lpfc_nvmeio_data(phba,
2047                                  "NVMET RCV BUSY: xri x%x sz %d from %06x\n",
2048                                  oxid, size, sid);
2049                 atomic_inc(&tgtp->rcv_fcp_cmd_out);
2050                 atomic_inc(&tgtp->defer_fod);
2051                 return;
2052         }
2053         ctxp->rqb_buffer = nvmebuf;
2054
2055         atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2056         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2057                         "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
2058                         ctxp->oxid, rc,
2059                         atomic_read(&tgtp->rcv_fcp_cmd_in),
2060                         atomic_read(&tgtp->rcv_fcp_cmd_out),
2061                         atomic_read(&tgtp->xmt_fcp_release));
2062 dropit:
2063         lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
2064                          oxid, size, sid);
2065         if (oxid) {
2066                 lpfc_nvmet_defer_release(phba, ctxp);
2067                 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
2068                 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2069                 return;
2070         }
2071
2072         if (ctx_buf)
2073                 lpfc_nvmet_ctxbuf_post(phba, ctx_buf);
2074
2075         if (nvmebuf)
2076                 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2077 }
2078
2079 /**
2080  * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
2081  * @phba: pointer to lpfc hba data structure.
2082  * @pring: pointer to a SLI ring.
2083  * @nvmebuf: pointer to received nvme data structure.
2084  *
2085  * This routine is used to process an unsolicited event received from a SLI
2086  * (Service Level Interface) ring. The actual processing of the data buffer
2087  * associated with the unsolicited event is done by invoking the routine
2088  * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
2089  * SLI RQ on which the unsolicited event was received.
2090  **/
2091 void
2092 lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2093                           struct lpfc_iocbq *piocb)
2094 {
2095         struct lpfc_dmabuf *d_buf;
2096         struct hbq_dmabuf *nvmebuf;
2097
2098         d_buf = piocb->context2;
2099         nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2100
2101         if (phba->nvmet_support == 0) {
2102                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
2103                 return;
2104         }
2105         lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
2106 }
2107
2108 /**
2109  * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
2110  * @phba: pointer to lpfc hba data structure.
2111  * @idx: relative index of MRQ vector
2112  * @nvmebuf: pointer to received nvme data structure.
2113  *
2114  * This routine is used to process an unsolicited event received from a SLI
2115  * (Service Level Interface) ring. The actual processing of the data buffer
2116  * associated with the unsolicited event is done by invoking the routine
2117  * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
2118  * SLI RQ on which the unsolicited event was received.
2119  **/
2120 void
2121 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
2122                            uint32_t idx,
2123                            struct rqb_dmabuf *nvmebuf,
2124                            uint64_t isr_timestamp)
2125 {
2126         if (phba->nvmet_support == 0) {
2127                 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2128                 return;
2129         }
2130         lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf,
2131                                     isr_timestamp);
2132 }
2133
2134 /**
2135  * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
2136  * @phba: pointer to a host N_Port data structure.
2137  * @ctxp: Context info for NVME LS Request
2138  * @rspbuf: DMA buffer of NVME command.
2139  * @rspsize: size of the NVME command.
2140  *
2141  * This routine is used for allocating a lpfc-WQE data structure from
2142  * the driver lpfc-WQE free-list and prepare the WQE with the parameters
2143  * passed into the routine for discovery state machine to issue an Extended
2144  * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
2145  * and preparation routine that is used by all the discovery state machine
2146  * routines and the NVME command-specific fields will be later set up by
2147  * the individual discovery machine routines after calling this routine
2148  * allocating and preparing a generic WQE data structure. It fills in the
2149  * Buffer Descriptor Entries (BDEs), allocates buffers for both command
2150  * payload and response payload (if expected). The reference count on the
2151  * ndlp is incremented by 1 and the reference to the ndlp is put into
2152  * context1 of the WQE data structure for this WQE to hold the ndlp
2153  * reference for the command's callback function to access later.
2154  *
2155  * Return code
2156  *   Pointer to the newly allocated/prepared nvme wqe data structure
2157  *   NULL - when nvme wqe data structure allocation/preparation failed
2158  **/
2159 static struct lpfc_iocbq *
2160 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
2161                        struct lpfc_nvmet_rcv_ctx *ctxp,
2162                        dma_addr_t rspbuf, uint16_t rspsize)
2163 {
2164         struct lpfc_nodelist *ndlp;
2165         struct lpfc_iocbq *nvmewqe;
2166         union lpfc_wqe128 *wqe;
2167
2168         if (!lpfc_is_link_up(phba)) {
2169                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2170                                 "6104 NVMET prep LS wqe: link err: "
2171                                 "NPORT x%x oxid:x%x ste %d\n",
2172                                 ctxp->sid, ctxp->oxid, ctxp->state);
2173                 return NULL;
2174         }
2175
2176         /* Allocate buffer for  command wqe */
2177         nvmewqe = lpfc_sli_get_iocbq(phba);
2178         if (nvmewqe == NULL) {
2179                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2180                                 "6105 NVMET prep LS wqe: No WQE: "
2181                                 "NPORT x%x oxid x%x ste %d\n",
2182                                 ctxp->sid, ctxp->oxid, ctxp->state);
2183                 return NULL;
2184         }
2185
2186         ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2187         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2188             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2189             (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2190                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2191                                 "6106 NVMET prep LS wqe: No ndlp: "
2192                                 "NPORT x%x oxid x%x ste %d\n",
2193                                 ctxp->sid, ctxp->oxid, ctxp->state);
2194                 goto nvme_wqe_free_wqeq_exit;
2195         }
2196         ctxp->wqeq = nvmewqe;
2197
2198         /* prevent preparing wqe with NULL ndlp reference */
2199         nvmewqe->context1 = lpfc_nlp_get(ndlp);
2200         if (nvmewqe->context1 == NULL)
2201                 goto nvme_wqe_free_wqeq_exit;
2202         nvmewqe->context2 = ctxp;
2203
2204         wqe = &nvmewqe->wqe;
2205         memset(wqe, 0, sizeof(union lpfc_wqe));
2206
2207         /* Words 0 - 2 */
2208         wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2209         wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
2210         wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
2211         wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
2212
2213         /* Word 3 */
2214
2215         /* Word 4 */
2216
2217         /* Word 5 */
2218         bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
2219         bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
2220         bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
2221         bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
2222         bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
2223
2224         /* Word 6 */
2225         bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
2226                phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2227         bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
2228
2229         /* Word 7 */
2230         bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
2231                CMD_XMIT_SEQUENCE64_WQE);
2232         bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
2233         bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
2234         bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
2235
2236         /* Word 8 */
2237         wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
2238
2239         /* Word 9 */
2240         bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
2241         /* Needs to be set by caller */
2242         bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
2243
2244         /* Word 10 */
2245         bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
2246         bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2247         bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
2248                LPFC_WQE_LENLOC_WORD12);
2249         bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
2250
2251         /* Word 11 */
2252         bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
2253                LPFC_WQE_CQ_ID_DEFAULT);
2254         bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
2255                OTHER_COMMAND);
2256
2257         /* Word 12 */
2258         wqe->xmit_sequence.xmit_len = rspsize;
2259
2260         nvmewqe->retry = 1;
2261         nvmewqe->vport = phba->pport;
2262         nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2263         nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
2264
2265         /* Xmit NVMET response to remote NPORT <did> */
2266         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2267                         "6039 Xmit NVMET LS response to remote "
2268                         "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
2269                         ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
2270                         rspsize);
2271         return nvmewqe;
2272
2273 nvme_wqe_free_wqeq_exit:
2274         nvmewqe->context2 = NULL;
2275         nvmewqe->context3 = NULL;
2276         lpfc_sli_release_iocbq(phba, nvmewqe);
2277         return NULL;
2278 }
2279
2280
2281 static struct lpfc_iocbq *
2282 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
2283                         struct lpfc_nvmet_rcv_ctx *ctxp)
2284 {
2285         struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
2286         struct lpfc_nvmet_tgtport *tgtp;
2287         struct sli4_sge *sgl;
2288         struct lpfc_nodelist *ndlp;
2289         struct lpfc_iocbq *nvmewqe;
2290         struct scatterlist *sgel;
2291         union lpfc_wqe128 *wqe;
2292         struct ulp_bde64 *bde;
2293         uint32_t *txrdy;
2294         dma_addr_t physaddr;
2295         int i, cnt;
2296         int do_pbde;
2297         int xc = 1;
2298
2299         if (!lpfc_is_link_up(phba)) {
2300                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2301                                 "6107 NVMET prep FCP wqe: link err:"
2302                                 "NPORT x%x oxid x%x ste %d\n",
2303                                 ctxp->sid, ctxp->oxid, ctxp->state);
2304                 return NULL;
2305         }
2306
2307         ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2308         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2309             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2310              (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2311                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2312                                 "6108 NVMET prep FCP wqe: no ndlp: "
2313                                 "NPORT x%x oxid x%x ste %d\n",
2314                                 ctxp->sid, ctxp->oxid, ctxp->state);
2315                 return NULL;
2316         }
2317
2318         if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
2319                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2320                                 "6109 NVMET prep FCP wqe: seg cnt err: "
2321                                 "NPORT x%x oxid x%x ste %d cnt %d\n",
2322                                 ctxp->sid, ctxp->oxid, ctxp->state,
2323                                 phba->cfg_nvme_seg_cnt);
2324                 return NULL;
2325         }
2326
2327         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2328         nvmewqe = ctxp->wqeq;
2329         if (nvmewqe == NULL) {
2330                 /* Allocate buffer for  command wqe */
2331                 nvmewqe = ctxp->ctxbuf->iocbq;
2332                 if (nvmewqe == NULL) {
2333                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2334                                         "6110 NVMET prep FCP wqe: No "
2335                                         "WQE: NPORT x%x oxid x%x ste %d\n",
2336                                         ctxp->sid, ctxp->oxid, ctxp->state);
2337                         return NULL;
2338                 }
2339                 ctxp->wqeq = nvmewqe;
2340                 xc = 0; /* create new XRI */
2341                 nvmewqe->sli4_lxritag = NO_XRI;
2342                 nvmewqe->sli4_xritag = NO_XRI;
2343         }
2344
2345         /* Sanity check */
2346         if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
2347             (ctxp->entry_cnt == 1)) ||
2348             (ctxp->state == LPFC_NVMET_STE_DATA)) {
2349                 wqe = &nvmewqe->wqe;
2350         } else {
2351                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2352                                 "6111 Wrong state NVMET FCP: %d  cnt %d\n",
2353                                 ctxp->state, ctxp->entry_cnt);
2354                 return NULL;
2355         }
2356
2357         sgl  = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
2358         switch (rsp->op) {
2359         case NVMET_FCOP_READDATA:
2360         case NVMET_FCOP_READDATA_RSP:
2361                 /* From the tsend template, initialize words 7 - 11 */
2362                 memcpy(&wqe->words[7],
2363                        &lpfc_tsend_cmd_template.words[7],
2364                        sizeof(uint32_t) * 5);
2365
2366                 /* Words 0 - 2 : The first sg segment */
2367                 sgel = &rsp->sg[0];
2368                 physaddr = sg_dma_address(sgel);
2369                 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2370                 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
2371                 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
2372                 wqe->fcp_tsend.bde.addrHigh =
2373                         cpu_to_le32(putPaddrHigh(physaddr));
2374
2375                 /* Word 3 */
2376                 wqe->fcp_tsend.payload_offset_len = 0;
2377
2378                 /* Word 4 */
2379                 wqe->fcp_tsend.relative_offset = ctxp->offset;
2380
2381                 /* Word 5 */
2382                 wqe->fcp_tsend.reserved = 0;
2383
2384                 /* Word 6 */
2385                 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
2386                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2387                 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
2388                        nvmewqe->sli4_xritag);
2389
2390                 /* Word 7 - set ar later */
2391
2392                 /* Word 8 */
2393                 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
2394
2395                 /* Word 9 */
2396                 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
2397                 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
2398
2399                 /* Word 10 - set wqes later, in template xc=1 */
2400                 if (!xc)
2401                         bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0);
2402
2403                 /* Word 11 - set sup, irsp, irsplen later */
2404                 do_pbde = 0;
2405
2406                 /* Word 12 */
2407                 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2408
2409                 /* Setup 2 SKIP SGEs */
2410                 sgl->addr_hi = 0;
2411                 sgl->addr_lo = 0;
2412                 sgl->word2 = 0;
2413                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2414                 sgl->word2 = cpu_to_le32(sgl->word2);
2415                 sgl->sge_len = 0;
2416                 sgl++;
2417                 sgl->addr_hi = 0;
2418                 sgl->addr_lo = 0;
2419                 sgl->word2 = 0;
2420                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2421                 sgl->word2 = cpu_to_le32(sgl->word2);
2422                 sgl->sge_len = 0;
2423                 sgl++;
2424                 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
2425                         atomic_inc(&tgtp->xmt_fcp_read_rsp);
2426
2427                         /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2428
2429                         if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
2430                                 if (ndlp->nlp_flag & NLP_SUPPRESS_RSP)
2431                                         bf_set(wqe_sup,
2432                                                &wqe->fcp_tsend.wqe_com, 1);
2433                         } else {
2434                                 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
2435                                 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
2436                                 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
2437                                        ((rsp->rsplen >> 2) - 1));
2438                                 memcpy(&wqe->words[16], rsp->rspaddr,
2439                                        rsp->rsplen);
2440                         }
2441                 } else {
2442                         atomic_inc(&tgtp->xmt_fcp_read);
2443
2444                         /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2445                         bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
2446                 }
2447                 break;
2448
2449         case NVMET_FCOP_WRITEDATA:
2450                 /* From the treceive template, initialize words 3 - 11 */
2451                 memcpy(&wqe->words[3],
2452                        &lpfc_treceive_cmd_template.words[3],
2453                        sizeof(uint32_t) * 9);
2454
2455                 /* Words 0 - 2 : The first sg segment */
2456                 txrdy = dma_pool_alloc(phba->txrdy_payload_pool,
2457                                        GFP_KERNEL, &physaddr);
2458                 if (!txrdy) {
2459                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2460                                         "6041 Bad txrdy buffer: oxid x%x\n",
2461                                         ctxp->oxid);
2462                         return NULL;
2463                 }
2464                 ctxp->txrdy = txrdy;
2465                 ctxp->txrdy_phys = physaddr;
2466                 wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2467                 wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
2468                 wqe->fcp_treceive.bde.addrLow =
2469                         cpu_to_le32(putPaddrLow(physaddr));
2470                 wqe->fcp_treceive.bde.addrHigh =
2471                         cpu_to_le32(putPaddrHigh(physaddr));
2472
2473                 /* Word 4 */
2474                 wqe->fcp_treceive.relative_offset = ctxp->offset;
2475
2476                 /* Word 6 */
2477                 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
2478                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2479                 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
2480                        nvmewqe->sli4_xritag);
2481
2482                 /* Word 7 */
2483
2484                 /* Word 8 */
2485                 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
2486
2487                 /* Word 9 */
2488                 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
2489                 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
2490
2491                 /* Word 10 - in template xc=1 */
2492                 if (!xc)
2493                         bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
2494
2495                 /* Word 11 - set pbde later */
2496                 if (phba->cfg_enable_pbde) {
2497                         do_pbde = 1;
2498                 } else {
2499                         bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
2500                         do_pbde = 0;
2501                 }
2502
2503                 /* Word 12 */
2504                 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2505
2506                 /* Setup 1 TXRDY and 1 SKIP SGE */
2507                 txrdy[0] = 0;
2508                 txrdy[1] = cpu_to_be32(rsp->transfer_length);
2509                 txrdy[2] = 0;
2510
2511                 sgl->addr_hi = putPaddrHigh(physaddr);
2512                 sgl->addr_lo = putPaddrLow(physaddr);
2513                 sgl->word2 = 0;
2514                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2515                 sgl->word2 = cpu_to_le32(sgl->word2);
2516                 sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
2517                 sgl++;
2518                 sgl->addr_hi = 0;
2519                 sgl->addr_lo = 0;
2520                 sgl->word2 = 0;
2521                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2522                 sgl->word2 = cpu_to_le32(sgl->word2);
2523                 sgl->sge_len = 0;
2524                 sgl++;
2525                 atomic_inc(&tgtp->xmt_fcp_write);
2526                 break;
2527
2528         case NVMET_FCOP_RSP:
2529                 /* From the treceive template, initialize words 4 - 11 */
2530                 memcpy(&wqe->words[4],
2531                        &lpfc_trsp_cmd_template.words[4],
2532                        sizeof(uint32_t) * 8);
2533
2534                 /* Words 0 - 2 */
2535                 physaddr = rsp->rspdma;
2536                 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2537                 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
2538                 wqe->fcp_trsp.bde.addrLow =
2539                         cpu_to_le32(putPaddrLow(physaddr));
2540                 wqe->fcp_trsp.bde.addrHigh =
2541                         cpu_to_le32(putPaddrHigh(physaddr));
2542
2543                 /* Word 3 */
2544                 wqe->fcp_trsp.response_len = rsp->rsplen;
2545
2546                 /* Word 6 */
2547                 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
2548                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2549                 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
2550                        nvmewqe->sli4_xritag);
2551
2552                 /* Word 7 */
2553
2554                 /* Word 8 */
2555                 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
2556
2557                 /* Word 9 */
2558                 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
2559                 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
2560
2561                 /* Word 10 */
2562                 if (xc)
2563                         bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1);
2564
2565                 /* Word 11 */
2566                 /* In template wqes=0 irsp=0 irsplen=0 - good response */
2567                 if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) {
2568                         /* Bad response - embed it */
2569                         bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
2570                         bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
2571                         bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
2572                                ((rsp->rsplen >> 2) - 1));
2573                         memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
2574                 }
2575                 do_pbde = 0;
2576
2577                 /* Word 12 */
2578                 wqe->fcp_trsp.rsvd_12_15[0] = 0;
2579
2580                 /* Use rspbuf, NOT sg list */
2581                 rsp->sg_cnt = 0;
2582                 sgl->word2 = 0;
2583                 atomic_inc(&tgtp->xmt_fcp_rsp);
2584                 break;
2585
2586         default:
2587                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2588                                 "6064 Unknown Rsp Op %d\n",
2589                                 rsp->op);
2590                 return NULL;
2591         }
2592
2593         nvmewqe->retry = 1;
2594         nvmewqe->vport = phba->pport;
2595         nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2596         nvmewqe->context1 = ndlp;
2597
2598         for (i = 0; i < rsp->sg_cnt; i++) {
2599                 sgel = &rsp->sg[i];
2600                 physaddr = sg_dma_address(sgel);
2601                 cnt = sg_dma_len(sgel);
2602                 sgl->addr_hi = putPaddrHigh(physaddr);
2603                 sgl->addr_lo = putPaddrLow(physaddr);
2604                 sgl->word2 = 0;
2605                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2606                 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
2607                 if ((i+1) == rsp->sg_cnt)
2608                         bf_set(lpfc_sli4_sge_last, sgl, 1);
2609                 sgl->word2 = cpu_to_le32(sgl->word2);
2610                 sgl->sge_len = cpu_to_le32(cnt);
2611                 if (i == 0) {
2612                         bde = (struct ulp_bde64 *)&wqe->words[13];
2613                         if (do_pbde) {
2614                                 /* Words 13-15  (PBDE) */
2615                                 bde->addrLow = sgl->addr_lo;
2616                                 bde->addrHigh = sgl->addr_hi;
2617                                 bde->tus.f.bdeSize =
2618                                         le32_to_cpu(sgl->sge_len);
2619                                 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2620                                 bde->tus.w = cpu_to_le32(bde->tus.w);
2621                         } else {
2622                                 memset(bde, 0, sizeof(struct ulp_bde64));
2623                         }
2624                 }
2625                 sgl++;
2626                 ctxp->offset += cnt;
2627         }
2628         ctxp->state = LPFC_NVMET_STE_DATA;
2629         ctxp->entry_cnt++;
2630         return nvmewqe;
2631 }
2632
2633 /**
2634  * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
2635  * @phba: Pointer to HBA context object.
2636  * @cmdwqe: Pointer to driver command WQE object.
2637  * @wcqe: Pointer to driver response CQE object.
2638  *
2639  * The function is called from SLI ring event handler with no
2640  * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2641  * The function frees memory resources used for the NVME commands.
2642  **/
2643 static void
2644 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2645                              struct lpfc_wcqe_complete *wcqe)
2646 {
2647         struct lpfc_nvmet_rcv_ctx *ctxp;
2648         struct lpfc_nvmet_tgtport *tgtp;
2649         uint32_t status, result;
2650         unsigned long flags;
2651         bool released = false;
2652
2653         ctxp = cmdwqe->context2;
2654         status = bf_get(lpfc_wcqe_c_status, wcqe);
2655         result = wcqe->parameter;
2656
2657         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2658         if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2659                 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2660
2661         ctxp->state = LPFC_NVMET_STE_DONE;
2662
2663         /* Check if we already received a free context call
2664          * and we have completed processing an abort situation.
2665          */
2666         spin_lock_irqsave(&ctxp->ctxlock, flags);
2667         if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2668             !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2669                 list_del(&ctxp->list);
2670                 released = true;
2671         }
2672         ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2673         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2674         atomic_inc(&tgtp->xmt_abort_rsp);
2675
2676         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2677                         "6165 ABORT cmpl: xri x%x flg x%x (%d) "
2678                         "WCQE: %08x %08x %08x %08x\n",
2679                         ctxp->oxid, ctxp->flag, released,
2680                         wcqe->word0, wcqe->total_data_placed,
2681                         result, wcqe->word3);
2682
2683         cmdwqe->context2 = NULL;
2684         cmdwqe->context3 = NULL;
2685         /*
2686          * if transport has released ctx, then can reuse it. Otherwise,
2687          * will be recycled by transport release call.
2688          */
2689         if (released)
2690                 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2691
2692         /* This is the iocbq for the abort, not the command */
2693         lpfc_sli_release_iocbq(phba, cmdwqe);
2694
2695         /* Since iaab/iaar are NOT set, there is no work left.
2696          * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2697          * should have been called already.
2698          */
2699 }
2700
2701 /**
2702  * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
2703  * @phba: Pointer to HBA context object.
2704  * @cmdwqe: Pointer to driver command WQE object.
2705  * @wcqe: Pointer to driver response CQE object.
2706  *
2707  * The function is called from SLI ring event handler with no
2708  * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2709  * The function frees memory resources used for the NVME commands.
2710  **/
2711 static void
2712 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2713                                struct lpfc_wcqe_complete *wcqe)
2714 {
2715         struct lpfc_nvmet_rcv_ctx *ctxp;
2716         struct lpfc_nvmet_tgtport *tgtp;
2717         unsigned long flags;
2718         uint32_t status, result;
2719         bool released = false;
2720
2721         ctxp = cmdwqe->context2;
2722         status = bf_get(lpfc_wcqe_c_status, wcqe);
2723         result = wcqe->parameter;
2724
2725         if (!ctxp) {
2726                 /* if context is clear, related io alrady complete */
2727                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2728                                 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
2729                                 wcqe->word0, wcqe->total_data_placed,
2730                                 result, wcqe->word3);
2731                 return;
2732         }
2733
2734         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2735         if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2736                 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2737
2738         /* Sanity check */
2739         if (ctxp->state != LPFC_NVMET_STE_ABORT) {
2740                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2741                                 "6112 ABTS Wrong state:%d oxid x%x\n",
2742                                 ctxp->state, ctxp->oxid);
2743         }
2744
2745         /* Check if we already received a free context call
2746          * and we have completed processing an abort situation.
2747          */
2748         ctxp->state = LPFC_NVMET_STE_DONE;
2749         spin_lock_irqsave(&ctxp->ctxlock, flags);
2750         if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2751             !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2752                 list_del(&ctxp->list);
2753                 released = true;
2754         }
2755         ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2756         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2757         atomic_inc(&tgtp->xmt_abort_rsp);
2758
2759         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2760                         "6316 ABTS cmpl xri x%x flg x%x (%x) "
2761                         "WCQE: %08x %08x %08x %08x\n",
2762                         ctxp->oxid, ctxp->flag, released,
2763                         wcqe->word0, wcqe->total_data_placed,
2764                         result, wcqe->word3);
2765
2766         cmdwqe->context2 = NULL;
2767         cmdwqe->context3 = NULL;
2768         /*
2769          * if transport has released ctx, then can reuse it. Otherwise,
2770          * will be recycled by transport release call.
2771          */
2772         if (released)
2773                 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2774
2775         /* Since iaab/iaar are NOT set, there is no work left.
2776          * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2777          * should have been called already.
2778          */
2779 }
2780
2781 /**
2782  * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
2783  * @phba: Pointer to HBA context object.
2784  * @cmdwqe: Pointer to driver command WQE object.
2785  * @wcqe: Pointer to driver response CQE object.
2786  *
2787  * The function is called from SLI ring event handler with no
2788  * lock held. This function is the completion handler for NVME ABTS for LS cmds
2789  * The function frees memory resources used for the NVME commands.
2790  **/
2791 static void
2792 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2793                             struct lpfc_wcqe_complete *wcqe)
2794 {
2795         struct lpfc_nvmet_rcv_ctx *ctxp;
2796         struct lpfc_nvmet_tgtport *tgtp;
2797         uint32_t status, result;
2798
2799         ctxp = cmdwqe->context2;
2800         status = bf_get(lpfc_wcqe_c_status, wcqe);
2801         result = wcqe->parameter;
2802
2803         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2804         atomic_inc(&tgtp->xmt_ls_abort_cmpl);
2805
2806         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2807                         "6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n",
2808                         ctxp, wcqe->word0, wcqe->total_data_placed,
2809                         result, wcqe->word3);
2810
2811         if (!ctxp) {
2812                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2813                                 "6415 NVMET LS Abort No ctx: WCQE: "
2814                                  "%08x %08x %08x %08x\n",
2815                                 wcqe->word0, wcqe->total_data_placed,
2816                                 result, wcqe->word3);
2817
2818                 lpfc_sli_release_iocbq(phba, cmdwqe);
2819                 return;
2820         }
2821
2822         if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) {
2823                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2824                                 "6416 NVMET LS abort cmpl state mismatch: "
2825                                 "oxid x%x: %d %d\n",
2826                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
2827         }
2828
2829         cmdwqe->context2 = NULL;
2830         cmdwqe->context3 = NULL;
2831         lpfc_sli_release_iocbq(phba, cmdwqe);
2832         kfree(ctxp);
2833 }
2834
2835 static int
2836 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
2837                              struct lpfc_nvmet_rcv_ctx *ctxp,
2838                              uint32_t sid, uint16_t xri)
2839 {
2840         struct lpfc_nvmet_tgtport *tgtp;
2841         struct lpfc_iocbq *abts_wqeq;
2842         union lpfc_wqe128 *wqe_abts;
2843         struct lpfc_nodelist *ndlp;
2844
2845         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2846                         "6067 ABTS: sid %x xri x%x/x%x\n",
2847                         sid, xri, ctxp->wqeq->sli4_xritag);
2848
2849         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2850
2851         ndlp = lpfc_findnode_did(phba->pport, sid);
2852         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2853             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2854             (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2855                 atomic_inc(&tgtp->xmt_abort_rsp_error);
2856                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2857                                 "6134 Drop ABTS - wrong NDLP state x%x.\n",
2858                                 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
2859
2860                 /* No failure to an ABTS request. */
2861                 return 0;
2862         }
2863
2864         abts_wqeq = ctxp->wqeq;
2865         wqe_abts = &abts_wqeq->wqe;
2866
2867         /*
2868          * Since we zero the whole WQE, we need to ensure we set the WQE fields
2869          * that were initialized in lpfc_sli4_nvmet_alloc.
2870          */
2871         memset(wqe_abts, 0, sizeof(union lpfc_wqe));
2872
2873         /* Word 5 */
2874         bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
2875         bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
2876         bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
2877         bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
2878         bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
2879
2880         /* Word 6 */
2881         bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
2882                phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2883         bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
2884                abts_wqeq->sli4_xritag);
2885
2886         /* Word 7 */
2887         bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
2888                CMD_XMIT_SEQUENCE64_WQE);
2889         bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
2890         bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
2891         bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
2892
2893         /* Word 8 */
2894         wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
2895
2896         /* Word 9 */
2897         bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
2898         /* Needs to be set by caller */
2899         bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
2900
2901         /* Word 10 */
2902         bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
2903         bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2904         bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
2905                LPFC_WQE_LENLOC_WORD12);
2906         bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
2907         bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
2908
2909         /* Word 11 */
2910         bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
2911                LPFC_WQE_CQ_ID_DEFAULT);
2912         bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
2913                OTHER_COMMAND);
2914
2915         abts_wqeq->vport = phba->pport;
2916         abts_wqeq->context1 = ndlp;
2917         abts_wqeq->context2 = ctxp;
2918         abts_wqeq->context3 = NULL;
2919         abts_wqeq->rsvd2 = 0;
2920         /* hba_wqidx should already be setup from command we are aborting */
2921         abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
2922         abts_wqeq->iocb.ulpLe = 1;
2923
2924         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2925                         "6069 Issue ABTS to xri x%x reqtag x%x\n",
2926                         xri, abts_wqeq->iotag);
2927         return 1;
2928 }
2929
2930 static int
2931 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
2932                                struct lpfc_nvmet_rcv_ctx *ctxp,
2933                                uint32_t sid, uint16_t xri)
2934 {
2935         struct lpfc_nvmet_tgtport *tgtp;
2936         struct lpfc_iocbq *abts_wqeq;
2937         union lpfc_wqe128 *abts_wqe;
2938         struct lpfc_nodelist *ndlp;
2939         unsigned long flags;
2940         int rc;
2941
2942         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2943         if (!ctxp->wqeq) {
2944                 ctxp->wqeq = ctxp->ctxbuf->iocbq;
2945                 ctxp->wqeq->hba_wqidx = 0;
2946         }
2947
2948         ndlp = lpfc_findnode_did(phba->pport, sid);
2949         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2950             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2951             (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2952                 atomic_inc(&tgtp->xmt_abort_rsp_error);
2953                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2954                                 "6160 Drop ABORT - wrong NDLP state x%x.\n",
2955                                 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
2956
2957                 /* No failure to an ABTS request. */
2958                 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2959                 return 0;
2960         }
2961
2962         /* Issue ABTS for this WQE based on iotag */
2963         ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
2964         if (!ctxp->abort_wqeq) {
2965                 atomic_inc(&tgtp->xmt_abort_rsp_error);
2966                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2967                                 "6161 ABORT failed: No wqeqs: "
2968                                 "xri: x%x\n", ctxp->oxid);
2969                 /* No failure to an ABTS request. */
2970                 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2971                 return 0;
2972         }
2973         abts_wqeq = ctxp->abort_wqeq;
2974         abts_wqe = &abts_wqeq->wqe;
2975         ctxp->state = LPFC_NVMET_STE_ABORT;
2976
2977         /* Announce entry to new IO submit field. */
2978         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2979                         "6162 ABORT Request to rport DID x%06x "
2980                         "for xri x%x x%x\n",
2981                         ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
2982
2983         /* If the hba is getting reset, this flag is set.  It is
2984          * cleared when the reset is complete and rings reestablished.
2985          */
2986         spin_lock_irqsave(&phba->hbalock, flags);
2987         /* driver queued commands are in process of being flushed */
2988         if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
2989                 spin_unlock_irqrestore(&phba->hbalock, flags);
2990                 atomic_inc(&tgtp->xmt_abort_rsp_error);
2991                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2992                                 "6163 Driver in reset cleanup - flushing "
2993                                 "NVME Req now. hba_flag x%x oxid x%x\n",
2994                                 phba->hba_flag, ctxp->oxid);
2995                 lpfc_sli_release_iocbq(phba, abts_wqeq);
2996                 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2997                 return 0;
2998         }
2999
3000         /* Outstanding abort is in progress */
3001         if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
3002                 spin_unlock_irqrestore(&phba->hbalock, flags);
3003                 atomic_inc(&tgtp->xmt_abort_rsp_error);
3004                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
3005                                 "6164 Outstanding NVME I/O Abort Request "
3006                                 "still pending on oxid x%x\n",
3007                                 ctxp->oxid);
3008                 lpfc_sli_release_iocbq(phba, abts_wqeq);
3009                 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3010                 return 0;
3011         }
3012
3013         /* Ready - mark outstanding as aborted by driver. */
3014         abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
3015
3016         /* WQEs are reused.  Clear stale data and set key fields to
3017          * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
3018          */
3019         memset(abts_wqe, 0, sizeof(union lpfc_wqe));
3020
3021         /* word 3 */
3022         bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
3023
3024         /* word 7 */
3025         bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
3026         bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
3027
3028         /* word 8 - tell the FW to abort the IO associated with this
3029          * outstanding exchange ID.
3030          */
3031         abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
3032
3033         /* word 9 - this is the iotag for the abts_wqe completion. */
3034         bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
3035                abts_wqeq->iotag);
3036
3037         /* word 10 */
3038         bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
3039         bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
3040
3041         /* word 11 */
3042         bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
3043         bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
3044         bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
3045
3046         /* ABTS WQE must go to the same WQ as the WQE to be aborted */
3047         abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
3048         abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
3049         abts_wqeq->iocb_cmpl = 0;
3050         abts_wqeq->iocb_flag |= LPFC_IO_NVME;
3051         abts_wqeq->context2 = ctxp;
3052         abts_wqeq->vport = phba->pport;
3053         rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
3054         spin_unlock_irqrestore(&phba->hbalock, flags);
3055         if (rc == WQE_SUCCESS) {
3056                 atomic_inc(&tgtp->xmt_abort_sol);
3057                 return 0;
3058         }
3059
3060         atomic_inc(&tgtp->xmt_abort_rsp_error);
3061         ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3062         lpfc_sli_release_iocbq(phba, abts_wqeq);
3063         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3064                         "6166 Failed ABORT issue_wqe with status x%x "
3065                         "for oxid x%x.\n",
3066                         rc, ctxp->oxid);
3067         return 1;
3068 }
3069
3070
3071 static int
3072 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
3073                                  struct lpfc_nvmet_rcv_ctx *ctxp,
3074                                  uint32_t sid, uint16_t xri)
3075 {
3076         struct lpfc_nvmet_tgtport *tgtp;
3077         struct lpfc_iocbq *abts_wqeq;
3078         unsigned long flags;
3079         int rc;
3080
3081         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3082         if (!ctxp->wqeq) {
3083                 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3084                 ctxp->wqeq->hba_wqidx = 0;
3085         }
3086
3087         if (ctxp->state == LPFC_NVMET_STE_FREE) {
3088                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3089                                 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
3090                                 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
3091                 rc = WQE_BUSY;
3092                 goto aerr;
3093         }
3094         ctxp->state = LPFC_NVMET_STE_ABORT;
3095         ctxp->entry_cnt++;
3096         rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
3097         if (rc == 0)
3098                 goto aerr;
3099
3100         spin_lock_irqsave(&phba->hbalock, flags);
3101         abts_wqeq = ctxp->wqeq;
3102         abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
3103         abts_wqeq->iocb_cmpl = NULL;
3104         abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
3105         rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
3106         spin_unlock_irqrestore(&phba->hbalock, flags);
3107         if (rc == WQE_SUCCESS) {
3108                 return 0;
3109         }
3110
3111 aerr:
3112         spin_lock_irqsave(&ctxp->ctxlock, flags);
3113         if (ctxp->flag & LPFC_NVMET_CTX_RLS)
3114                 list_del(&ctxp->list);
3115         ctxp->flag &= ~(LPFC_NVMET_ABORT_OP | LPFC_NVMET_CTX_RLS);
3116         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3117
3118         atomic_inc(&tgtp->xmt_abort_rsp_error);
3119         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3120                         "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
3121                         ctxp->oxid, rc);
3122         lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3123         return 1;
3124 }
3125
3126 static int
3127 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
3128                                 struct lpfc_nvmet_rcv_ctx *ctxp,
3129                                 uint32_t sid, uint16_t xri)
3130 {
3131         struct lpfc_nvmet_tgtport *tgtp;
3132         struct lpfc_iocbq *abts_wqeq;
3133         union lpfc_wqe128 *wqe_abts;
3134         unsigned long flags;
3135         int rc;
3136
3137         if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) ||
3138             (ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) {
3139                 ctxp->state = LPFC_NVMET_STE_LS_ABORT;
3140                 ctxp->entry_cnt++;
3141         } else {
3142                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3143                                 "6418 NVMET LS abort state mismatch "
3144                                 "IO x%x: %d %d\n",
3145                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3146                 ctxp->state = LPFC_NVMET_STE_LS_ABORT;
3147         }
3148
3149         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3150         if (!ctxp->wqeq) {
3151                 /* Issue ABTS for this WQE based on iotag */
3152                 ctxp->wqeq = lpfc_sli_get_iocbq(phba);
3153                 if (!ctxp->wqeq) {
3154                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3155                                         "6068 Abort failed: No wqeqs: "
3156                                         "xri: x%x\n", xri);
3157                         /* No failure to an ABTS request. */
3158                         kfree(ctxp);
3159                         return 0;
3160                 }
3161         }
3162         abts_wqeq = ctxp->wqeq;
3163         wqe_abts = &abts_wqeq->wqe;
3164
3165         if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
3166                 rc = WQE_BUSY;
3167                 goto out;
3168         }
3169
3170         spin_lock_irqsave(&phba->hbalock, flags);
3171         abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
3172         abts_wqeq->iocb_cmpl = 0;
3173         abts_wqeq->iocb_flag |=  LPFC_IO_NVME_LS;
3174         rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq);
3175         spin_unlock_irqrestore(&phba->hbalock, flags);
3176         if (rc == WQE_SUCCESS) {
3177                 atomic_inc(&tgtp->xmt_abort_unsol);
3178                 return 0;
3179         }
3180 out:
3181         atomic_inc(&tgtp->xmt_abort_rsp_error);
3182         abts_wqeq->context2 = NULL;
3183         abts_wqeq->context3 = NULL;
3184         lpfc_sli_release_iocbq(phba, abts_wqeq);
3185         kfree(ctxp);
3186         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3187                         "6056 Failed to Issue ABTS. Status x%x\n", rc);
3188         return 0;
3189 }