Merge tag 'samsung-soc-5.10' of https://git.kernel.org/pub/scm/linux/kernel/git/krzk...
[linux-2.6-microblaze.git] / drivers / scsi / lpfc / lpfc_nvme.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
30
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
38
39 #include "lpfc_version.h"
40 #include "lpfc_hw4.h"
41 #include "lpfc_hw.h"
42 #include "lpfc_sli.h"
43 #include "lpfc_sli4.h"
44 #include "lpfc_nl.h"
45 #include "lpfc_disc.h"
46 #include "lpfc.h"
47 #include "lpfc_nvme.h"
48 #include "lpfc_scsi.h"
49 #include "lpfc_logmsg.h"
50 #include "lpfc_crtn.h"
51 #include "lpfc_vport.h"
52 #include "lpfc_debugfs.h"
53
54 /* NVME initiator-based functions */
55
56 static struct lpfc_io_buf *
57 lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
58                   int idx, int expedite);
59
60 static void
61 lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *);
62
63 static struct nvme_fc_port_template lpfc_nvme_template;
64
65 static union lpfc_wqe128 lpfc_iread_cmd_template;
66 static union lpfc_wqe128 lpfc_iwrite_cmd_template;
67 static union lpfc_wqe128 lpfc_icmnd_cmd_template;
68
69 /* Setup WQE templates for NVME IOs */
70 void
71 lpfc_nvme_cmd_template(void)
72 {
73         union lpfc_wqe128 *wqe;
74
75         /* IREAD template */
76         wqe = &lpfc_iread_cmd_template;
77         memset(wqe, 0, sizeof(union lpfc_wqe128));
78
79         /* Word 0, 1, 2 - BDE is variable */
80
81         /* Word 3 - cmd_buff_len, payload_offset_len is zero */
82
83         /* Word 4 - total_xfer_len is variable */
84
85         /* Word 5 - is zero */
86
87         /* Word 6 - ctxt_tag, xri_tag is variable */
88
89         /* Word 7 */
90         bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
91         bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
92         bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
93         bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
94
95         /* Word 8 - abort_tag is variable */
96
97         /* Word 9  - reqtag is variable */
98
99         /* Word 10 - dbde, wqes is variable */
100         bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
101         bf_set(wqe_nvme, &wqe->fcp_iread.wqe_com, 1);
102         bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
103         bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
104         bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
105         bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
106
107         /* Word 11 - pbde is variable */
108         bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, NVME_READ_CMD);
109         bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
110         bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
111
112         /* Word 12 - is zero */
113
114         /* Word 13, 14, 15 - PBDE is variable */
115
116         /* IWRITE template */
117         wqe = &lpfc_iwrite_cmd_template;
118         memset(wqe, 0, sizeof(union lpfc_wqe128));
119
120         /* Word 0, 1, 2 - BDE is variable */
121
122         /* Word 3 - cmd_buff_len, payload_offset_len is zero */
123
124         /* Word 4 - total_xfer_len is variable */
125
126         /* Word 5 - initial_xfer_len is variable */
127
128         /* Word 6 - ctxt_tag, xri_tag is variable */
129
130         /* Word 7 */
131         bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
132         bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
133         bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
134         bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
135
136         /* Word 8 - abort_tag is variable */
137
138         /* Word 9  - reqtag is variable */
139
140         /* Word 10 - dbde, wqes is variable */
141         bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
142         bf_set(wqe_nvme, &wqe->fcp_iwrite.wqe_com, 1);
143         bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
144         bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
145         bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
146         bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
147
148         /* Word 11 - pbde is variable */
149         bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, NVME_WRITE_CMD);
150         bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
151         bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
152
153         /* Word 12 - is zero */
154
155         /* Word 13, 14, 15 - PBDE is variable */
156
157         /* ICMND template */
158         wqe = &lpfc_icmnd_cmd_template;
159         memset(wqe, 0, sizeof(union lpfc_wqe128));
160
161         /* Word 0, 1, 2 - BDE is variable */
162
163         /* Word 3 - payload_offset_len is variable */
164
165         /* Word 4, 5 - is zero */
166
167         /* Word 6 - ctxt_tag, xri_tag is variable */
168
169         /* Word 7 */
170         bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
171         bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
172         bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
173         bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
174
175         /* Word 8 - abort_tag is variable */
176
177         /* Word 9  - reqtag is variable */
178
179         /* Word 10 - dbde, wqes is variable */
180         bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
181         bf_set(wqe_nvme, &wqe->fcp_icmd.wqe_com, 1);
182         bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
183         bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
184         bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
185         bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
186
187         /* Word 11 */
188         bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, FCP_COMMAND);
189         bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
190         bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
191
192         /* Word 12, 13, 14, 15 - is zero */
193 }
194
195 /**
196  * lpfc_nvme_prep_abort_wqe - set up 'abort' work queue entry.
197  * @pwqeq: Pointer to command iocb.
198  * @xritag: Tag that  uniqely identifies the local exchange resource.
199  * @opt: Option bits -
200  *              bit 0 = inhibit sending abts on the link
201  *
202  * This function is called with hbalock held.
203  **/
204 void
205 lpfc_nvme_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt)
206 {
207         union lpfc_wqe128 *wqe = &pwqeq->wqe;
208
209         /* WQEs are reused.  Clear stale data and set key fields to
210          * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
211          */
212         memset(wqe, 0, sizeof(*wqe));
213
214         if (opt & INHIBIT_ABORT)
215                 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
216         /* Abort specified xri tag, with the mask deliberately zeroed */
217         bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
218
219         bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
220
221         /* Abort the IO associated with this outstanding exchange ID. */
222         wqe->abort_cmd.wqe_com.abort_tag = xritag;
223
224         /* iotag for the wqe completion. */
225         bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, pwqeq->iotag);
226
227         bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
228         bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
229
230         bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
231         bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
232         bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
233 }
234
235 /**
236  * lpfc_nvme_create_queue -
237  * @pnvme_lport: Transport localport that LS is to be issued from
238  * @lpfc_pnvme: Pointer to the driver's nvme instance data
239  * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
240  * @qsize: Size of the queue in bytes
241  * @handle: An opaque driver handle used in follow-up calls.
242  *
243  * Driver registers this routine to preallocate and initialize any
244  * internal data structures to bind the @qidx to its internal IO queues.
245  * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ.
246  *
247  * Return value :
248  *   0 - Success
249  *   -EINVAL - Unsupported input value.
250  *   -ENOMEM - Could not alloc necessary memory
251  **/
252 static int
253 lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
254                        unsigned int qidx, u16 qsize,
255                        void **handle)
256 {
257         struct lpfc_nvme_lport *lport;
258         struct lpfc_vport *vport;
259         struct lpfc_nvme_qhandle *qhandle;
260         char *str;
261
262         if (!pnvme_lport->private)
263                 return -ENOMEM;
264
265         lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
266         vport = lport->vport;
267         qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
268         if (qhandle == NULL)
269                 return -ENOMEM;
270
271         qhandle->cpu_id = raw_smp_processor_id();
272         qhandle->qidx = qidx;
273         /*
274          * NVME qidx == 0 is the admin queue, so both admin queue
275          * and first IO queue will use MSI-X vector and associated
276          * EQ/CQ/WQ at index 0. After that they are sequentially assigned.
277          */
278         if (qidx) {
279                 str = "IO ";  /* IO queue */
280                 qhandle->index = ((qidx - 1) %
281                         lpfc_nvme_template.max_hw_queues);
282         } else {
283                 str = "ADM";  /* Admin queue */
284                 qhandle->index = qidx;
285         }
286
287         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
288                          "6073 Binding %s HdwQueue %d  (cpu %d) to "
289                          "hdw_queue %d qhandle x%px\n", str,
290                          qidx, qhandle->cpu_id, qhandle->index, qhandle);
291         *handle = (void *)qhandle;
292         return 0;
293 }
294
295 /**
296  * lpfc_nvme_delete_queue -
297  * @pnvme_lport: Transport localport that LS is to be issued from
298  * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
299  * @handle: An opaque driver handle from lpfc_nvme_create_queue
300  *
301  * Driver registers this routine to free
302  * any internal data structures to bind the @qidx to its internal
303  * IO queues.
304  *
305  * Return value :
306  *   0 - Success
307  *   TODO:  What are the failure codes.
308  **/
309 static void
310 lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
311                        unsigned int qidx,
312                        void *handle)
313 {
314         struct lpfc_nvme_lport *lport;
315         struct lpfc_vport *vport;
316
317         if (!pnvme_lport->private)
318                 return;
319
320         lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
321         vport = lport->vport;
322
323         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
324                         "6001 ENTER.  lpfc_pnvme x%px, qidx x%x qhandle x%px\n",
325                         lport, qidx, handle);
326         kfree(handle);
327 }
328
329 static void
330 lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
331 {
332         struct lpfc_nvme_lport *lport = localport->private;
333
334         lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
335                          "6173 localport x%px delete complete\n",
336                          lport);
337
338         /* release any threads waiting for the unreg to complete */
339         if (lport->vport->localport)
340                 complete(lport->lport_unreg_cmp);
341 }
342
343 /* lpfc_nvme_remoteport_delete
344  *
345  * @remoteport: Pointer to an nvme transport remoteport instance.
346  *
347  * This is a template downcall.  NVME transport calls this function
348  * when it has completed the unregistration of a previously
349  * registered remoteport.
350  *
351  * Return value :
352  * None
353  */
354 static void
355 lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
356 {
357         struct lpfc_nvme_rport *rport = remoteport->private;
358         struct lpfc_vport *vport;
359         struct lpfc_nodelist *ndlp;
360
361         ndlp = rport->ndlp;
362         if (!ndlp)
363                 goto rport_err;
364
365         vport = ndlp->vport;
366         if (!vport)
367                 goto rport_err;
368
369         /* Remove this rport from the lport's list - memory is owned by the
370          * transport. Remove the ndlp reference for the NVME transport before
371          * calling state machine to remove the node.
372          */
373         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
374                         "6146 remoteport delete of remoteport x%px\n",
375                         remoteport);
376         spin_lock_irq(&vport->phba->hbalock);
377
378         /* The register rebind might have occurred before the delete
379          * downcall.  Guard against this race.
380          */
381         if (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG) {
382                 ndlp->nrport = NULL;
383                 ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
384                 spin_unlock_irq(&vport->phba->hbalock);
385
386                 /* Remove original register reference. The host transport
387                  * won't reference this rport/remoteport any further.
388                  */
389                 lpfc_nlp_put(ndlp);
390         } else {
391                 spin_unlock_irq(&vport->phba->hbalock);
392         }
393
394  rport_err:
395         return;
396 }
397
398 /**
399  * lpfc_nvme_handle_lsreq - Process an unsolicited NVME LS request
400  * @phba: pointer to lpfc hba data structure.
401  * @axchg: pointer to exchange context for the NVME LS request
402  *
403  * This routine is used for processing an asychronously received NVME LS
404  * request. Any remaining validation is done and the LS is then forwarded
405  * to the nvme-fc transport via nvme_fc_rcv_ls_req().
406  *
407  * The calling sequence should be: nvme_fc_rcv_ls_req() -> (processing)
408  * -> lpfc_nvme_xmt_ls_rsp/cmp -> req->done.
409  * __lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg.
410  *
411  * Returns 0 if LS was handled and delivered to the transport
412  * Returns 1 if LS failed to be handled and should be dropped
413  */
414 int
415 lpfc_nvme_handle_lsreq(struct lpfc_hba *phba,
416                         struct lpfc_async_xchg_ctx *axchg)
417 {
418 #if (IS_ENABLED(CONFIG_NVME_FC))
419         struct lpfc_vport *vport;
420         struct lpfc_nvme_rport *lpfc_rport;
421         struct nvme_fc_remote_port *remoteport;
422         struct lpfc_nvme_lport *lport;
423         uint32_t *payload = axchg->payload;
424         int rc;
425
426         vport = axchg->ndlp->vport;
427         lpfc_rport = axchg->ndlp->nrport;
428         if (!lpfc_rport)
429                 return -EINVAL;
430
431         remoteport = lpfc_rport->remoteport;
432         if (!vport->localport)
433                 return -EINVAL;
434
435         lport = vport->localport->private;
436         if (!lport)
437                 return -EINVAL;
438
439         rc = nvme_fc_rcv_ls_req(remoteport, &axchg->ls_rsp, axchg->payload,
440                                 axchg->size);
441
442         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
443                         "6205 NVME Unsol rcv: sz %d rc %d: %08x %08x %08x "
444                         "%08x %08x %08x\n",
445                         axchg->size, rc,
446                         *payload, *(payload+1), *(payload+2),
447                         *(payload+3), *(payload+4), *(payload+5));
448
449         if (!rc)
450                 return 0;
451 #endif
452         return 1;
453 }
454
455 /**
456  * __lpfc_nvme_ls_req_cmp - Generic completion handler for a NVME
457  *        LS request.
458  * @phba: Pointer to HBA context object
459  * @vport: The local port that issued the LS
460  * @cmdwqe: Pointer to driver command WQE object.
461  * @wcqe: Pointer to driver response CQE object.
462  *
463  * This function is the generic completion handler for NVME LS requests.
464  * The function updates any states and statistics, calls the transport
465  * ls_req done() routine, then tears down the command and buffers used
466  * for the LS request.
467  **/
468 void
469 __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba,  struct lpfc_vport *vport,
470                         struct lpfc_iocbq *cmdwqe,
471                         struct lpfc_wcqe_complete *wcqe)
472 {
473         struct nvmefc_ls_req *pnvme_lsreq;
474         struct lpfc_dmabuf *buf_ptr;
475         struct lpfc_nodelist *ndlp;
476         uint32_t status;
477
478         pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
479         ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
480         status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
481
482         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
483                          "6047 NVMEx LS REQ %px cmpl DID %x Xri: %x "
484                          "status %x reason x%x cmd:x%px lsreg:x%px bmp:x%px "
485                          "ndlp:x%px\n",
486                          pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
487                          cmdwqe->sli4_xritag, status,
488                          (wcqe->parameter & 0xffff),
489                          cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
490
491         lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x parm x%x\n",
492                          cmdwqe->sli4_xritag, status, wcqe->parameter);
493
494         if (cmdwqe->context3) {
495                 buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3;
496                 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
497                 kfree(buf_ptr);
498                 cmdwqe->context3 = NULL;
499         }
500         if (pnvme_lsreq->done)
501                 pnvme_lsreq->done(pnvme_lsreq, status);
502         else
503                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
504                                  "6046 NVMEx cmpl without done call back? "
505                                  "Data %px DID %x Xri: %x status %x\n",
506                                 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
507                                 cmdwqe->sli4_xritag, status);
508         if (ndlp) {
509                 lpfc_nlp_put(ndlp);
510                 cmdwqe->context1 = NULL;
511         }
512         lpfc_sli_release_iocbq(phba, cmdwqe);
513 }
514
515 static void
516 lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
517                        struct lpfc_wcqe_complete *wcqe)
518 {
519         struct lpfc_vport *vport = cmdwqe->vport;
520         struct lpfc_nvme_lport *lport;
521         uint32_t status;
522
523         status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
524
525         if (vport->localport) {
526                 lport = (struct lpfc_nvme_lport *)vport->localport->private;
527                 if (lport) {
528                         atomic_inc(&lport->fc4NvmeLsCmpls);
529                         if (status) {
530                                 if (bf_get(lpfc_wcqe_c_xb, wcqe))
531                                         atomic_inc(&lport->cmpl_ls_xb);
532                                 atomic_inc(&lport->cmpl_ls_err);
533                         }
534                 }
535         }
536
537         __lpfc_nvme_ls_req_cmp(phba, vport, cmdwqe, wcqe);
538 }
539
540 static int
541 lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
542                   struct lpfc_dmabuf *inp,
543                   struct nvmefc_ls_req *pnvme_lsreq,
544                   void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
545                                struct lpfc_wcqe_complete *),
546                   struct lpfc_nodelist *ndlp, uint32_t num_entry,
547                   uint32_t tmo, uint8_t retry)
548 {
549         struct lpfc_hba *phba = vport->phba;
550         union lpfc_wqe128 *wqe;
551         struct lpfc_iocbq *genwqe;
552         struct ulp_bde64 *bpl;
553         struct ulp_bde64 bde;
554         int i, rc, xmit_len, first_len;
555
556         /* Allocate buffer for  command WQE */
557         genwqe = lpfc_sli_get_iocbq(phba);
558         if (genwqe == NULL)
559                 return 1;
560
561         wqe = &genwqe->wqe;
562         /* Initialize only 64 bytes */
563         memset(wqe, 0, sizeof(union lpfc_wqe));
564
565         genwqe->context3 = (uint8_t *)bmp;
566         genwqe->iocb_flag |= LPFC_IO_NVME_LS;
567
568         /* Save for completion so we can release these resources */
569         genwqe->context1 = lpfc_nlp_get(ndlp);
570         genwqe->context2 = (uint8_t *)pnvme_lsreq;
571         /* Fill in payload, bp points to frame payload */
572
573         if (!tmo)
574                 /* FC spec states we need 3 * ratov for CT requests */
575                 tmo = (3 * phba->fc_ratov);
576
577         /* For this command calculate the xmit length of the request bde. */
578         xmit_len = 0;
579         first_len = 0;
580         bpl = (struct ulp_bde64 *)bmp->virt;
581         for (i = 0; i < num_entry; i++) {
582                 bde.tus.w = bpl[i].tus.w;
583                 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
584                         break;
585                 xmit_len += bde.tus.f.bdeSize;
586                 if (i == 0)
587                         first_len = xmit_len;
588         }
589
590         genwqe->rsvd2 = num_entry;
591         genwqe->hba_wqidx = 0;
592
593         /* Words 0 - 2 */
594         wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
595         wqe->generic.bde.tus.f.bdeSize = first_len;
596         wqe->generic.bde.addrLow = bpl[0].addrLow;
597         wqe->generic.bde.addrHigh = bpl[0].addrHigh;
598
599         /* Word 3 */
600         wqe->gen_req.request_payload_len = first_len;
601
602         /* Word 4 */
603
604         /* Word 5 */
605         bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
606         bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
607         bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
608         bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ);
609         bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
610
611         /* Word 6 */
612         bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com,
613                phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
614         bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag);
615
616         /* Word 7 */
617         bf_set(wqe_tmo, &wqe->gen_req.wqe_com, (vport->phba->fc_ratov-1));
618         bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3);
619         bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE);
620         bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI);
621
622         /* Word 8 */
623         wqe->gen_req.wqe_com.abort_tag = genwqe->iotag;
624
625         /* Word 9 */
626         bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag);
627
628         /* Word 10 */
629         bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
630         bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
631         bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
632         bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
633         bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
634
635         /* Word 11 */
636         bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
637         bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND);
638
639
640         /* Issue GEN REQ WQE for NPORT <did> */
641         genwqe->wqe_cmpl = cmpl;
642         genwqe->iocb_cmpl = NULL;
643         genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
644         genwqe->vport = vport;
645         genwqe->retry = retry;
646
647         lpfc_nvmeio_data(phba, "NVME LS  XMIT: xri x%x iotag x%x to x%06x\n",
648                          genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);
649
650         rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe);
651         if (rc) {
652                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
653                                  "6045 Issue GEN REQ WQE to NPORT x%x "
654                                  "Data: x%x x%x  rc x%x\n",
655                                  ndlp->nlp_DID, genwqe->iotag,
656                                  vport->port_state, rc);
657                 lpfc_sli_release_iocbq(phba, genwqe);
658                 return 1;
659         }
660
661         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_ELS,
662                          "6050 Issue GEN REQ WQE to NPORT x%x "
663                          "Data: oxid: x%x state: x%x wq:x%px lsreq:x%px "
664                          "bmp:x%px xmit:%d 1st:%d\n",
665                          ndlp->nlp_DID, genwqe->sli4_xritag,
666                          vport->port_state,
667                          genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
668         return 0;
669 }
670
671
672 /**
673  * __lpfc_nvme_ls_req - Generic service routine to issue an NVME LS request
674  * @vport: The local port issuing the LS
675  * @ndlp: The remote port to send the LS to
676  * @pnvme_lsreq: Pointer to LS request structure from the transport
677  * @gen_req_cmp: Completion call-back
678  *
679  * Routine validates the ndlp, builds buffers and sends a GEN_REQUEST
680  * WQE to perform the LS operation.
681  *
682  * Return value :
683  *   0 - Success
684  *   non-zero: various error codes, in form of -Exxx
685  **/
686 int
687 __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
688                       struct nvmefc_ls_req *pnvme_lsreq,
689                       void (*gen_req_cmp)(struct lpfc_hba *phba,
690                                 struct lpfc_iocbq *cmdwqe,
691                                 struct lpfc_wcqe_complete *wcqe))
692 {
693         struct lpfc_dmabuf *bmp;
694         struct ulp_bde64 *bpl;
695         int ret;
696         uint16_t ntype, nstate;
697
698         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
699                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
700                                  "6051 NVMEx LS REQ: Bad NDLP x%px, Failing "
701                                  "LS Req\n",
702                                  ndlp);
703                 return -ENODEV;
704         }
705
706         ntype = ndlp->nlp_type;
707         nstate = ndlp->nlp_state;
708         if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) ||
709             (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) {
710                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
711                                  "6088 NVMEx LS REQ: Fail DID x%06x not "
712                                  "ready for IO. Type x%x, State x%x\n",
713                                  ndlp->nlp_DID, ntype, nstate);
714                 return -ENODEV;
715         }
716
717         /*
718          * there are two dma buf in the request, actually there is one and
719          * the second one is just the start address + cmd size.
720          * Before calling lpfc_nvme_gen_req these buffers need to be wrapped
721          * in a lpfc_dmabuf struct. When freeing we just free the wrapper
722          * because the nvem layer owns the data bufs.
723          * We do not have to break these packets open, we don't care what is
724          * in them. And we do not have to look at the resonse data, we only
725          * care that we got a response. All of the caring is going to happen
726          * in the nvme-fc layer.
727          */
728
729         bmp = kmalloc(sizeof(*bmp), GFP_KERNEL);
730         if (!bmp) {
731                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
732                                  "6044 NVMEx LS REQ: Could not alloc LS buf "
733                                  "for DID %x\n",
734                                  ndlp->nlp_DID);
735                 return -ENOMEM;
736         }
737
738         bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys));
739         if (!bmp->virt) {
740                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
741                                  "6042 NVMEx LS REQ: Could not alloc mbuf "
742                                  "for DID %x\n",
743                                  ndlp->nlp_DID);
744                 kfree(bmp);
745                 return -ENOMEM;
746         }
747
748         INIT_LIST_HEAD(&bmp->list);
749
750         bpl = (struct ulp_bde64 *)bmp->virt;
751         bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma));
752         bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma));
753         bpl->tus.f.bdeFlags = 0;
754         bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen;
755         bpl->tus.w = le32_to_cpu(bpl->tus.w);
756         bpl++;
757
758         bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma));
759         bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma));
760         bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
761         bpl->tus.f.bdeSize = pnvme_lsreq->rsplen;
762         bpl->tus.w = le32_to_cpu(bpl->tus.w);
763
764         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
765                         "6149 NVMEx LS REQ: Issue to DID 0x%06x lsreq x%px, "
766                         "rqstlen:%d rsplen:%d %pad %pad\n",
767                         ndlp->nlp_DID, pnvme_lsreq, pnvme_lsreq->rqstlen,
768                         pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
769                         &pnvme_lsreq->rspdma);
770
771         ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr,
772                                 pnvme_lsreq, gen_req_cmp, ndlp, 2,
773                                 LPFC_NVME_LS_TIMEOUT, 0);
774         if (ret != WQE_SUCCESS) {
775                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
776                                  "6052 NVMEx REQ: EXIT. issue ls wqe failed "
777                                  "lsreq x%px Status %x DID %x\n",
778                                  pnvme_lsreq, ret, ndlp->nlp_DID);
779                 lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
780                 kfree(bmp);
781                 return -EIO;
782         }
783
784         return 0;
785 }
786
787 /**
788  * lpfc_nvme_ls_req - Issue an NVME Link Service request
789  * @pnvme_lport: Transport localport that LS is to be issued from.
790  * @nvme_rport: Transport remoteport that LS is to be sent to.
791  * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
792  *
793  * Driver registers this routine to handle any link service request
794  * from the nvme_fc transport to a remote nvme-aware port.
795  *
796  * Return value :
797  *   0 - Success
798  *   non-zero: various error codes, in form of -Exxx
799  **/
800 static int
801 lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
802                  struct nvme_fc_remote_port *pnvme_rport,
803                  struct nvmefc_ls_req *pnvme_lsreq)
804 {
805         struct lpfc_nvme_lport *lport;
806         struct lpfc_nvme_rport *rport;
807         struct lpfc_vport *vport;
808         int ret;
809
810         lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
811         rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
812         if (unlikely(!lport) || unlikely(!rport))
813                 return -EINVAL;
814
815         vport = lport->vport;
816         if (vport->load_flag & FC_UNLOADING)
817                 return -ENODEV;
818
819         atomic_inc(&lport->fc4NvmeLsRequests);
820
821         ret = __lpfc_nvme_ls_req(vport, rport->ndlp, pnvme_lsreq,
822                                  lpfc_nvme_ls_req_cmp);
823         if (ret)
824                 atomic_inc(&lport->xmt_ls_err);
825
826         return ret;
827 }
828
829 /**
830  * __lpfc_nvme_ls_abort - Generic service routine to abort a prior
831  *         NVME LS request
832  * @vport: The local port that issued the LS
833  * @ndlp: The remote port the LS was sent to
834  * @pnvme_lsreq: Pointer to LS request structure from the transport
835  *
836  * The driver validates the ndlp, looks for the LS, and aborts the
837  * LS if found.
838  *
839  * Returns:
840  * 0 : if LS found and aborted
841  * non-zero: various error conditions in form -Exxx
842  **/
843 int
844 __lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
845                         struct nvmefc_ls_req *pnvme_lsreq)
846 {
847         struct lpfc_hba *phba = vport->phba;
848         struct lpfc_sli_ring *pring;
849         struct lpfc_iocbq *wqe, *next_wqe;
850         bool foundit = false;
851
852         if (!ndlp) {
853                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
854                                 "6049 NVMEx LS REQ Abort: Bad NDLP x%px DID "
855                                 "x%06x, Failing LS Req\n",
856                                 ndlp, ndlp ? ndlp->nlp_DID : 0);
857                 return -EINVAL;
858         }
859
860         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS,
861                          "6040 NVMEx LS REQ Abort: Issue LS_ABORT for lsreq "
862                          "x%p rqstlen:%d rsplen:%d %pad %pad\n",
863                          pnvme_lsreq, pnvme_lsreq->rqstlen,
864                          pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
865                          &pnvme_lsreq->rspdma);
866
867         /*
868          * Lock the ELS ring txcmplq and look for the wqe that matches
869          * this ELS. If found, issue an abort on the wqe.
870          */
871         pring = phba->sli4_hba.nvmels_wq->pring;
872         spin_lock_irq(&phba->hbalock);
873         spin_lock(&pring->ring_lock);
874         list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
875                 if (wqe->context2 == pnvme_lsreq) {
876                         wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
877                         foundit = true;
878                         break;
879                 }
880         }
881         spin_unlock(&pring->ring_lock);
882
883         if (foundit)
884                 lpfc_sli_issue_abort_iotag(phba, pring, wqe);
885         spin_unlock_irq(&phba->hbalock);
886
887         if (foundit)
888                 return 0;
889
890         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS,
891                          "6213 NVMEx LS REQ Abort: Unable to locate req x%p\n",
892                          pnvme_lsreq);
893         return -EINVAL;
894 }
895
896 static int
897 lpfc_nvme_xmt_ls_rsp(struct nvme_fc_local_port *localport,
898                      struct nvme_fc_remote_port *remoteport,
899                      struct nvmefc_ls_rsp *ls_rsp)
900 {
901         struct lpfc_async_xchg_ctx *axchg =
902                 container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp);
903         struct lpfc_nvme_lport *lport;
904         int rc;
905
906         if (axchg->phba->pport->load_flag & FC_UNLOADING)
907                 return -ENODEV;
908
909         lport = (struct lpfc_nvme_lport *)localport->private;
910
911         rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, __lpfc_nvme_xmt_ls_rsp_cmp);
912
913         if (rc) {
914                 /*
915                  * unless the failure is due to having already sent
916                  * the response, an abort will be generated for the
917                  * exchange if the rsp can't be sent.
918                  */
919                 if (rc != -EALREADY)
920                         atomic_inc(&lport->xmt_ls_abort);
921                 return rc;
922         }
923
924         return 0;
925 }
926
927 /**
928  * lpfc_nvme_ls_abort - Abort a prior NVME LS request
929  * @pnvme_lport: Transport localport that LS is to be issued from.
930  * @pnvme_rport: Transport remoteport that LS is to be sent to.
931  * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
932  *
933  * Driver registers this routine to abort a NVME LS request that is
934  * in progress (from the transports perspective).
935  **/
936 static void
937 lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
938                    struct nvme_fc_remote_port *pnvme_rport,
939                    struct nvmefc_ls_req *pnvme_lsreq)
940 {
941         struct lpfc_nvme_lport *lport;
942         struct lpfc_vport *vport;
943         struct lpfc_hba *phba;
944         struct lpfc_nodelist *ndlp;
945         int ret;
946
947         lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
948         if (unlikely(!lport))
949                 return;
950         vport = lport->vport;
951         phba = vport->phba;
952
953         if (vport->load_flag & FC_UNLOADING)
954                 return;
955
956         ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
957
958         ret = __lpfc_nvme_ls_abort(vport, ndlp, pnvme_lsreq);
959         if (!ret)
960                 atomic_inc(&lport->xmt_ls_abort);
961 }
962
963 /* Fix up the existing sgls for NVME IO. */
964 static inline void
965 lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
966                        struct lpfc_io_buf *lpfc_ncmd,
967                        struct nvmefc_fcp_req *nCmd)
968 {
969         struct lpfc_hba  *phba = vport->phba;
970         struct sli4_sge *sgl;
971         union lpfc_wqe128 *wqe;
972         uint32_t *wptr, *dptr;
973
974         /*
975          * Get a local pointer to the built-in wqe and correct
976          * the cmd size to match NVME's 96 bytes and fix
977          * the dma address.
978          */
979
980         wqe = &lpfc_ncmd->cur_iocbq.wqe;
981
982         /*
983          * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to
984          * match NVME.  NVME sends 96 bytes. Also, use the
985          * nvme commands command and response dma addresses
986          * rather than the virtual memory to ease the restore
987          * operation.
988          */
989         sgl = lpfc_ncmd->dma_sgl;
990         sgl->sge_len = cpu_to_le32(nCmd->cmdlen);
991         if (phba->cfg_nvme_embed_cmd) {
992                 sgl->addr_hi = 0;
993                 sgl->addr_lo = 0;
994
995                 /* Word 0-2 - NVME CMND IU (embedded payload) */
996                 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED;
997                 wqe->generic.bde.tus.f.bdeSize = 56;
998                 wqe->generic.bde.addrHigh = 0;
999                 wqe->generic.bde.addrLow =  64;  /* Word 16 */
1000
1001                 /* Word 10  - dbde is 0, wqes is 1 in template */
1002
1003                 /*
1004                  * Embed the payload in the last half of the WQE
1005                  * WQE words 16-30 get the NVME CMD IU payload
1006                  *
1007                  * WQE words 16-19 get payload Words 1-4
1008                  * WQE words 20-21 get payload Words 6-7
1009                  * WQE words 22-29 get payload Words 16-23
1010                  */
1011                 wptr = &wqe->words[16];  /* WQE ptr */
1012                 dptr = (uint32_t *)nCmd->cmdaddr;  /* payload ptr */
1013                 dptr++;                 /* Skip Word 0 in payload */
1014
1015                 *wptr++ = *dptr++;      /* Word 1 */
1016                 *wptr++ = *dptr++;      /* Word 2 */
1017                 *wptr++ = *dptr++;      /* Word 3 */
1018                 *wptr++ = *dptr++;      /* Word 4 */
1019                 dptr++;                 /* Skip Word 5 in payload */
1020                 *wptr++ = *dptr++;      /* Word 6 */
1021                 *wptr++ = *dptr++;      /* Word 7 */
1022                 dptr += 8;              /* Skip Words 8-15 in payload */
1023                 *wptr++ = *dptr++;      /* Word 16 */
1024                 *wptr++ = *dptr++;      /* Word 17 */
1025                 *wptr++ = *dptr++;      /* Word 18 */
1026                 *wptr++ = *dptr++;      /* Word 19 */
1027                 *wptr++ = *dptr++;      /* Word 20 */
1028                 *wptr++ = *dptr++;      /* Word 21 */
1029                 *wptr++ = *dptr++;      /* Word 22 */
1030                 *wptr   = *dptr;        /* Word 23 */
1031         } else {
1032                 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->cmddma));
1033                 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->cmddma));
1034
1035                 /* Word 0-2 - NVME CMND IU Inline BDE */
1036                 wqe->generic.bde.tus.f.bdeFlags =  BUFF_TYPE_BDE_64;
1037                 wqe->generic.bde.tus.f.bdeSize = nCmd->cmdlen;
1038                 wqe->generic.bde.addrHigh = sgl->addr_hi;
1039                 wqe->generic.bde.addrLow =  sgl->addr_lo;
1040
1041                 /* Word 10 */
1042                 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
1043                 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
1044         }
1045
1046         sgl++;
1047
1048         /* Setup the physical region for the FCP RSP */
1049         sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma));
1050         sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma));
1051         sgl->word2 = le32_to_cpu(sgl->word2);
1052         if (nCmd->sg_cnt)
1053                 bf_set(lpfc_sli4_sge_last, sgl, 0);
1054         else
1055                 bf_set(lpfc_sli4_sge_last, sgl, 1);
1056         sgl->word2 = cpu_to_le32(sgl->word2);
1057         sgl->sge_len = cpu_to_le32(nCmd->rsplen);
1058 }
1059
1060
1061 /*
1062  * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO
1063  *
1064  * Driver registers this routine as it io request handler.  This
1065  * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1066  * data structure to the rport indicated in @lpfc_nvme_rport.
1067  *
1068  * Return value :
1069  *   0 - Success
1070  *   TODO: What are the failure codes.
1071  **/
1072 static void
1073 lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
1074                           struct lpfc_wcqe_complete *wcqe)
1075 {
1076         struct lpfc_io_buf *lpfc_ncmd =
1077                 (struct lpfc_io_buf *)pwqeIn->context1;
1078         struct lpfc_vport *vport = pwqeIn->vport;
1079         struct nvmefc_fcp_req *nCmd;
1080         struct nvme_fc_ersp_iu *ep;
1081         struct nvme_fc_cmd_iu *cp;
1082         struct lpfc_nodelist *ndlp;
1083         struct lpfc_nvme_fcpreq_priv *freqpriv;
1084         struct lpfc_nvme_lport *lport;
1085         uint32_t code, status, idx;
1086         uint16_t cid, sqhd, data;
1087         uint32_t *ptr;
1088 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1089         int cpu;
1090 #endif
1091
1092         /* Sanity check on return of outstanding command */
1093         if (!lpfc_ncmd) {
1094                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1095                                  "6071 Null lpfc_ncmd pointer. No "
1096                                  "release, skip completion\n");
1097                 return;
1098         }
1099
1100         /* Guard against abort handler being called at same time */
1101         spin_lock(&lpfc_ncmd->buf_lock);
1102
1103         if (!lpfc_ncmd->nvmeCmd) {
1104                 spin_unlock(&lpfc_ncmd->buf_lock);
1105                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1106                                  "6066 Missing cmpl ptrs: lpfc_ncmd x%px, "
1107                                  "nvmeCmd x%px\n",
1108                                  lpfc_ncmd, lpfc_ncmd->nvmeCmd);
1109
1110                 /* Release the lpfc_ncmd regardless of the missing elements. */
1111                 lpfc_release_nvme_buf(phba, lpfc_ncmd);
1112                 return;
1113         }
1114         nCmd = lpfc_ncmd->nvmeCmd;
1115         status = bf_get(lpfc_wcqe_c_status, wcqe);
1116
1117         idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
1118         phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++;
1119
1120         if (unlikely(status && vport->localport)) {
1121                 lport = (struct lpfc_nvme_lport *)vport->localport->private;
1122                 if (lport) {
1123                         if (bf_get(lpfc_wcqe_c_xb, wcqe))
1124                                 atomic_inc(&lport->cmpl_fcp_xb);
1125                         atomic_inc(&lport->cmpl_fcp_err);
1126                 }
1127         }
1128
1129         lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
1130                          lpfc_ncmd->cur_iocbq.sli4_xritag,
1131                          status, wcqe->parameter);
1132         /*
1133          * Catch race where our node has transitioned, but the
1134          * transport is still transitioning.
1135          */
1136         ndlp = lpfc_ncmd->ndlp;
1137         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1138                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1139                                  "6062 Ignoring NVME cmpl.  No ndlp\n");
1140                 goto out_err;
1141         }
1142
1143         code = bf_get(lpfc_wcqe_c_code, wcqe);
1144         if (code == CQE_CODE_NVME_ERSP) {
1145                 /* For this type of CQE, we need to rebuild the rsp */
1146                 ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
1147
1148                 /*
1149                  * Get Command Id from cmd to plug into response. This
1150                  * code is not needed in the next NVME Transport drop.
1151                  */
1152                 cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
1153                 cid = cp->sqe.common.command_id;
1154
1155                 /*
1156                  * RSN is in CQE word 2
1157                  * SQHD is in CQE Word 3 bits 15:0
1158                  * Cmd Specific info is in CQE Word 1
1159                  * and in CQE Word 0 bits 15:0
1160                  */
1161                 sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe);
1162
1163                 /* Now lets build the NVME ERSP IU */
1164                 ep->iu_len = cpu_to_be16(8);
1165                 ep->rsn = wcqe->parameter;
1166                 ep->xfrd_len = cpu_to_be32(nCmd->payload_length);
1167                 ep->rsvd12 = 0;
1168                 ptr = (uint32_t *)&ep->cqe.result.u64;
1169                 *ptr++ = wcqe->total_data_placed;
1170                 data = bf_get(lpfc_wcqe_c_ersp0, wcqe);
1171                 *ptr = (uint32_t)data;
1172                 ep->cqe.sq_head = sqhd;
1173                 ep->cqe.sq_id =  nCmd->sqid;
1174                 ep->cqe.command_id = cid;
1175                 ep->cqe.status = 0;
1176
1177                 lpfc_ncmd->status = IOSTAT_SUCCESS;
1178                 lpfc_ncmd->result = 0;
1179                 nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
1180                 nCmd->transferred_length = nCmd->payload_length;
1181         } else {
1182                 lpfc_ncmd->status = (status & LPFC_IOCB_STATUS_MASK);
1183                 lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
1184
1185                 /* For NVME, the only failure path that results in an
1186                  * IO error is when the adapter rejects it.  All other
1187                  * conditions are a success case and resolved by the
1188                  * transport.
1189                  * IOSTAT_FCP_RSP_ERROR means:
1190                  * 1. Length of data received doesn't match total
1191                  *    transfer length in WQE
1192                  * 2. If the RSP payload does NOT match these cases:
1193                  *    a. RSP length 12/24 bytes and all zeros
1194                  *    b. NVME ERSP
1195                  */
1196                 switch (lpfc_ncmd->status) {
1197                 case IOSTAT_SUCCESS:
1198                         nCmd->transferred_length = wcqe->total_data_placed;
1199                         nCmd->rcv_rsplen = 0;
1200                         nCmd->status = 0;
1201                         break;
1202                 case IOSTAT_FCP_RSP_ERROR:
1203                         nCmd->transferred_length = wcqe->total_data_placed;
1204                         nCmd->rcv_rsplen = wcqe->parameter;
1205                         nCmd->status = 0;
1206                         /* Sanity check */
1207                         if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN)
1208                                 break;
1209                         lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1210                                          "6081 NVME Completion Protocol Error: "
1211                                          "xri %x status x%x result x%x "
1212                                          "placed x%x\n",
1213                                          lpfc_ncmd->cur_iocbq.sli4_xritag,
1214                                          lpfc_ncmd->status, lpfc_ncmd->result,
1215                                          wcqe->total_data_placed);
1216                         break;
1217                 case IOSTAT_LOCAL_REJECT:
1218                         /* Let fall through to set command final state. */
1219                         if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED)
1220                                 lpfc_printf_vlog(vport, KERN_INFO,
1221                                          LOG_NVME_IOERR,
1222                                          "6032 Delay Aborted cmd x%px "
1223                                          "nvme cmd x%px, xri x%x, "
1224                                          "xb %d\n",
1225                                          lpfc_ncmd, nCmd,
1226                                          lpfc_ncmd->cur_iocbq.sli4_xritag,
1227                                          bf_get(lpfc_wcqe_c_xb, wcqe));
1228                         fallthrough;
1229                 default:
1230 out_err:
1231                         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1232                                          "6072 NVME Completion Error: xri %x "
1233                                          "status x%x result x%x [x%x] "
1234                                          "placed x%x\n",
1235                                          lpfc_ncmd->cur_iocbq.sli4_xritag,
1236                                          lpfc_ncmd->status, lpfc_ncmd->result,
1237                                          wcqe->parameter,
1238                                          wcqe->total_data_placed);
1239                         nCmd->transferred_length = 0;
1240                         nCmd->rcv_rsplen = 0;
1241                         nCmd->status = NVME_SC_INTERNAL;
1242                 }
1243         }
1244
1245         /* pick up SLI4 exhange busy condition */
1246         if (bf_get(lpfc_wcqe_c_xb, wcqe))
1247                 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
1248         else
1249                 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
1250
1251         /* Update stats and complete the IO.  There is
1252          * no need for dma unprep because the nvme_transport
1253          * owns the dma address.
1254          */
1255 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1256         if (lpfc_ncmd->ts_cmd_start) {
1257                 lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
1258                 lpfc_ncmd->ts_data_io = ktime_get_ns();
1259                 phba->ktime_last_cmd = lpfc_ncmd->ts_data_io;
1260                 lpfc_io_ktime(phba, lpfc_ncmd);
1261         }
1262         if (unlikely(phba->hdwqstat_on & LPFC_CHECK_NVME_IO)) {
1263                 cpu = raw_smp_processor_id();
1264                 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
1265                 if (lpfc_ncmd->cpu != cpu)
1266                         lpfc_printf_vlog(vport,
1267                                          KERN_INFO, LOG_NVME_IOERR,
1268                                          "6701 CPU Check cmpl: "
1269                                          "cpu %d expect %d\n",
1270                                          cpu, lpfc_ncmd->cpu);
1271         }
1272 #endif
1273
1274         /* NVME targets need completion held off until the abort exchange
1275          * completes unless the NVME Rport is getting unregistered.
1276          */
1277
1278         if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
1279                 freqpriv = nCmd->private;
1280                 freqpriv->nvme_buf = NULL;
1281                 lpfc_ncmd->nvmeCmd = NULL;
1282                 spin_unlock(&lpfc_ncmd->buf_lock);
1283                 nCmd->done(nCmd);
1284         } else
1285                 spin_unlock(&lpfc_ncmd->buf_lock);
1286
1287         /* Call release with XB=1 to queue the IO into the abort list. */
1288         lpfc_release_nvme_buf(phba, lpfc_ncmd);
1289 }
1290
1291
1292 /**
1293  * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO
1294  * @vport: pointer to a host virtual N_Port data structure
1295  * @lpfcn_cmd: Pointer to lpfc scsi command
1296  * @pnode: pointer to a node-list data structure
1297  * @cstat: pointer to the control status structure
1298  *
1299  * Driver registers this routine as it io request handler.  This
1300  * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1301  * data structure to the rport indicated in @lpfc_nvme_rport.
1302  *
1303  * Return value :
1304  *   0 - Success
1305  *   TODO: What are the failure codes.
1306  **/
1307 static int
1308 lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
1309                       struct lpfc_io_buf *lpfc_ncmd,
1310                       struct lpfc_nodelist *pnode,
1311                       struct lpfc_fc4_ctrl_stat *cstat)
1312 {
1313         struct lpfc_hba *phba = vport->phba;
1314         struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1315         struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq);
1316         union lpfc_wqe128 *wqe = &pwqeq->wqe;
1317         uint32_t req_len;
1318
1319         if (!NLP_CHK_NODE_ACT(pnode))
1320                 return -EINVAL;
1321
1322         /*
1323          * There are three possibilities here - use scatter-gather segment, use
1324          * the single mapping, or neither.
1325          */
1326         if (nCmd->sg_cnt) {
1327                 if (nCmd->io_dir == NVMEFC_FCP_WRITE) {
1328                         /* From the iwrite template, initialize words 7 - 11 */
1329                         memcpy(&wqe->words[7],
1330                                &lpfc_iwrite_cmd_template.words[7],
1331                                sizeof(uint32_t) * 5);
1332
1333                         /* Word 4 */
1334                         wqe->fcp_iwrite.total_xfer_len = nCmd->payload_length;
1335
1336                         /* Word 5 */
1337                         if ((phba->cfg_nvme_enable_fb) &&
1338                             (pnode->nlp_flag & NLP_FIRSTBURST)) {
1339                                 req_len = lpfc_ncmd->nvmeCmd->payload_length;
1340                                 if (req_len < pnode->nvme_fb_size)
1341                                         wqe->fcp_iwrite.initial_xfer_len =
1342                                                 req_len;
1343                                 else
1344                                         wqe->fcp_iwrite.initial_xfer_len =
1345                                                 pnode->nvme_fb_size;
1346                         } else {
1347                                 wqe->fcp_iwrite.initial_xfer_len = 0;
1348                         }
1349                         cstat->output_requests++;
1350                 } else {
1351                         /* From the iread template, initialize words 7 - 11 */
1352                         memcpy(&wqe->words[7],
1353                                &lpfc_iread_cmd_template.words[7],
1354                                sizeof(uint32_t) * 5);
1355
1356                         /* Word 4 */
1357                         wqe->fcp_iread.total_xfer_len = nCmd->payload_length;
1358
1359                         /* Word 5 */
1360                         wqe->fcp_iread.rsrvd5 = 0;
1361
1362                         cstat->input_requests++;
1363                 }
1364         } else {
1365                 /* From the icmnd template, initialize words 4 - 11 */
1366                 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
1367                        sizeof(uint32_t) * 8);
1368                 cstat->control_requests++;
1369         }
1370
1371         if (pnode->nlp_nvme_info & NLP_NVME_NSLER)
1372                 bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
1373         /*
1374          * Finish initializing those WQE fields that are independent
1375          * of the nvme_cmnd request_buffer
1376          */
1377
1378         /* Word 3 */
1379         bf_set(payload_offset_len, &wqe->fcp_icmd,
1380                (nCmd->rsplen + nCmd->cmdlen));
1381
1382         /* Word 6 */
1383         bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
1384                phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
1385         bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
1386
1387         /* Word 8 */
1388         wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
1389
1390         /* Word 9 */
1391         bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
1392
1393         /* Words 13 14 15 are for PBDE support */
1394
1395         pwqeq->vport = vport;
1396         return 0;
1397 }
1398
1399
1400 /**
1401  * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO
1402  * @vport: pointer to a host virtual N_Port data structure
1403  * @lpfcn_cmd: Pointer to lpfc scsi command
1404  *
1405  * Driver registers this routine as it io request handler.  This
1406  * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1407  * data structure to the rport indicated in @lpfc_nvme_rport.
1408  *
1409  * Return value :
1410  *   0 - Success
1411  *   TODO: What are the failure codes.
1412  **/
1413 static int
1414 lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
1415                       struct lpfc_io_buf *lpfc_ncmd)
1416 {
1417         struct lpfc_hba *phba = vport->phba;
1418         struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1419         union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe;
1420         struct sli4_sge *sgl = lpfc_ncmd->dma_sgl;
1421         struct sli4_hybrid_sgl *sgl_xtra = NULL;
1422         struct scatterlist *data_sg;
1423         struct sli4_sge *first_data_sgl;
1424         struct ulp_bde64 *bde;
1425         dma_addr_t physaddr = 0;
1426         uint32_t num_bde = 0;
1427         uint32_t dma_len = 0;
1428         uint32_t dma_offset = 0;
1429         int nseg, i, j;
1430         bool lsp_just_set = false;
1431
1432         /* Fix up the command and response DMA stuff. */
1433         lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
1434
1435         /*
1436          * There are three possibilities here - use scatter-gather segment, use
1437          * the single mapping, or neither.
1438          */
1439         if (nCmd->sg_cnt) {
1440                 /*
1441                  * Jump over the cmd and rsp SGEs.  The fix routine
1442                  * has already adjusted for this.
1443                  */
1444                 sgl += 2;
1445
1446                 first_data_sgl = sgl;
1447                 lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
1448                 if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
1449                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1450                                         "6058 Too many sg segments from "
1451                                         "NVME Transport.  Max %d, "
1452                                         "nvmeIO sg_cnt %d\n",
1453                                         phba->cfg_nvme_seg_cnt + 1,
1454                                         lpfc_ncmd->seg_cnt);
1455                         lpfc_ncmd->seg_cnt = 0;
1456                         return 1;
1457                 }
1458
1459                 /*
1460                  * The driver established a maximum scatter-gather segment count
1461                  * during probe that limits the number of sg elements in any
1462                  * single nvme command.  Just run through the seg_cnt and format
1463                  * the sge's.
1464                  */
1465                 nseg = nCmd->sg_cnt;
1466                 data_sg = nCmd->first_sgl;
1467
1468                 /* for tracking the segment boundaries */
1469                 j = 2;
1470                 for (i = 0; i < nseg; i++) {
1471                         if (data_sg == NULL) {
1472                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1473                                                 "6059 dptr err %d, nseg %d\n",
1474                                                 i, nseg);
1475                                 lpfc_ncmd->seg_cnt = 0;
1476                                 return 1;
1477                         }
1478
1479                         sgl->word2 = 0;
1480                         if ((num_bde + 1) == nseg) {
1481                                 bf_set(lpfc_sli4_sge_last, sgl, 1);
1482                                 bf_set(lpfc_sli4_sge_type, sgl,
1483                                        LPFC_SGE_TYPE_DATA);
1484                         } else {
1485                                 bf_set(lpfc_sli4_sge_last, sgl, 0);
1486
1487                                 /* expand the segment */
1488                                 if (!lsp_just_set &&
1489                                     !((j + 1) % phba->border_sge_num) &&
1490                                     ((nseg - 1) != i)) {
1491                                         /* set LSP type */
1492                                         bf_set(lpfc_sli4_sge_type, sgl,
1493                                                LPFC_SGE_TYPE_LSP);
1494
1495                                         sgl_xtra = lpfc_get_sgl_per_hdwq(
1496                                                         phba, lpfc_ncmd);
1497
1498                                         if (unlikely(!sgl_xtra)) {
1499                                                 lpfc_ncmd->seg_cnt = 0;
1500                                                 return 1;
1501                                         }
1502                                         sgl->addr_lo = cpu_to_le32(putPaddrLow(
1503                                                        sgl_xtra->dma_phys_sgl));
1504                                         sgl->addr_hi = cpu_to_le32(putPaddrHigh(
1505                                                        sgl_xtra->dma_phys_sgl));
1506
1507                                 } else {
1508                                         bf_set(lpfc_sli4_sge_type, sgl,
1509                                                LPFC_SGE_TYPE_DATA);
1510                                 }
1511                         }
1512
1513                         if (!(bf_get(lpfc_sli4_sge_type, sgl) &
1514                                      LPFC_SGE_TYPE_LSP)) {
1515                                 if ((nseg - 1) == i)
1516                                         bf_set(lpfc_sli4_sge_last, sgl, 1);
1517
1518                                 physaddr = data_sg->dma_address;
1519                                 dma_len = data_sg->length;
1520                                 sgl->addr_lo = cpu_to_le32(
1521                                                          putPaddrLow(physaddr));
1522                                 sgl->addr_hi = cpu_to_le32(
1523                                                         putPaddrHigh(physaddr));
1524
1525                                 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1526                                 sgl->word2 = cpu_to_le32(sgl->word2);
1527                                 sgl->sge_len = cpu_to_le32(dma_len);
1528
1529                                 dma_offset += dma_len;
1530                                 data_sg = sg_next(data_sg);
1531
1532                                 sgl++;
1533
1534                                 lsp_just_set = false;
1535                         } else {
1536                                 sgl->word2 = cpu_to_le32(sgl->word2);
1537
1538                                 sgl->sge_len = cpu_to_le32(
1539                                                      phba->cfg_sg_dma_buf_size);
1540
1541                                 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
1542                                 i = i - 1;
1543
1544                                 lsp_just_set = true;
1545                         }
1546
1547                         j++;
1548                 }
1549                 if (phba->cfg_enable_pbde) {
1550                         /* Use PBDE support for first SGL only, offset == 0 */
1551                         /* Words 13-15 */
1552                         bde = (struct ulp_bde64 *)
1553                                 &wqe->words[13];
1554                         bde->addrLow = first_data_sgl->addr_lo;
1555                         bde->addrHigh = first_data_sgl->addr_hi;
1556                         bde->tus.f.bdeSize =
1557                                 le32_to_cpu(first_data_sgl->sge_len);
1558                         bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1559                         bde->tus.w = cpu_to_le32(bde->tus.w);
1560                         /* wqe_pbde is 1 in template */
1561                 } else {
1562                         memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
1563                         bf_set(wqe_pbde, &wqe->generic.wqe_com, 0);
1564                 }
1565
1566         } else {
1567                 lpfc_ncmd->seg_cnt = 0;
1568
1569                 /* For this clause to be valid, the payload_length
1570                  * and sg_cnt must zero.
1571                  */
1572                 if (nCmd->payload_length != 0) {
1573                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1574                                         "6063 NVME DMA Prep Err: sg_cnt %d "
1575                                         "payload_length x%x\n",
1576                                         nCmd->sg_cnt, nCmd->payload_length);
1577                         return 1;
1578                 }
1579         }
1580         return 0;
1581 }
1582
1583 /**
1584  * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO
1585  * @lpfc_pnvme: Pointer to the driver's nvme instance data
1586  * @lpfc_nvme_lport: Pointer to the driver's local port data
1587  * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1588  * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1589  * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1590  *
1591  * Driver registers this routine as it io request handler.  This
1592  * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1593  * data structure to the rport
1594  indicated in @lpfc_nvme_rport.
1595  *
1596  * Return value :
1597  *   0 - Success
1598  *   TODO: What are the failure codes.
1599  **/
1600 static int
1601 lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1602                         struct nvme_fc_remote_port *pnvme_rport,
1603                         void *hw_queue_handle,
1604                         struct nvmefc_fcp_req *pnvme_fcreq)
1605 {
1606         int ret = 0;
1607         int expedite = 0;
1608         int idx, cpu;
1609         struct lpfc_nvme_lport *lport;
1610         struct lpfc_fc4_ctrl_stat *cstat;
1611         struct lpfc_vport *vport;
1612         struct lpfc_hba *phba;
1613         struct lpfc_nodelist *ndlp;
1614         struct lpfc_io_buf *lpfc_ncmd;
1615         struct lpfc_nvme_rport *rport;
1616         struct lpfc_nvme_qhandle *lpfc_queue_info;
1617         struct lpfc_nvme_fcpreq_priv *freqpriv;
1618         struct nvme_common_command *sqe;
1619 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1620         uint64_t start = 0;
1621 #endif
1622
1623         /* Validate pointers. LLDD fault handling with transport does
1624          * have timing races.
1625          */
1626         lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1627         if (unlikely(!lport)) {
1628                 ret = -EINVAL;
1629                 goto out_fail;
1630         }
1631
1632         vport = lport->vport;
1633
1634         if (unlikely(!hw_queue_handle)) {
1635                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1636                                  "6117 Fail IO, NULL hw_queue_handle\n");
1637                 atomic_inc(&lport->xmt_fcp_err);
1638                 ret = -EBUSY;
1639                 goto out_fail;
1640         }
1641
1642         phba = vport->phba;
1643
1644         if (unlikely(vport->load_flag & FC_UNLOADING)) {
1645                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1646                                  "6124 Fail IO, Driver unload\n");
1647                 atomic_inc(&lport->xmt_fcp_err);
1648                 ret = -ENODEV;
1649                 goto out_fail;
1650         }
1651
1652         freqpriv = pnvme_fcreq->private;
1653         if (unlikely(!freqpriv)) {
1654                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1655                                  "6158 Fail IO, NULL request data\n");
1656                 atomic_inc(&lport->xmt_fcp_err);
1657                 ret = -EINVAL;
1658                 goto out_fail;
1659         }
1660
1661 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1662         if (phba->ktime_on)
1663                 start = ktime_get_ns();
1664 #endif
1665         rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
1666         lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle;
1667
1668         /*
1669          * Catch race where our node has transitioned, but the
1670          * transport is still transitioning.
1671          */
1672         ndlp = rport->ndlp;
1673         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1674                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1675                                  "6053 Busy IO, ndlp not ready: rport x%px "
1676                                   "ndlp x%px, DID x%06x\n",
1677                                  rport, ndlp, pnvme_rport->port_id);
1678                 atomic_inc(&lport->xmt_fcp_err);
1679                 ret = -EBUSY;
1680                 goto out_fail;
1681         }
1682
1683         /* The remote node has to be a mapped target or it's an error. */
1684         if ((ndlp->nlp_type & NLP_NVME_TARGET) &&
1685             (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
1686                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1687                                  "6036 Fail IO, DID x%06x not ready for "
1688                                  "IO. State x%x, Type x%x Flg x%x\n",
1689                                  pnvme_rport->port_id,
1690                                  ndlp->nlp_state, ndlp->nlp_type,
1691                                  ndlp->upcall_flags);
1692                 atomic_inc(&lport->xmt_fcp_bad_ndlp);
1693                 ret = -EBUSY;
1694                 goto out_fail;
1695
1696         }
1697
1698         /* Currently only NVME Keep alive commands should be expedited
1699          * if the driver runs out of a resource. These should only be
1700          * issued on the admin queue, qidx 0
1701          */
1702         if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) {
1703                 sqe = &((struct nvme_fc_cmd_iu *)
1704                         pnvme_fcreq->cmdaddr)->sqe.common;
1705                 if (sqe->opcode == nvme_admin_keep_alive)
1706                         expedite = 1;
1707         }
1708
1709         /* The node is shared with FCP IO, make sure the IO pending count does
1710          * not exceed the programmed depth.
1711          */
1712         if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
1713                 if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
1714                     !expedite) {
1715                         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1716                                          "6174 Fail IO, ndlp qdepth exceeded: "
1717                                          "idx %d DID %x pend %d qdepth %d\n",
1718                                          lpfc_queue_info->index, ndlp->nlp_DID,
1719                                          atomic_read(&ndlp->cmd_pending),
1720                                          ndlp->cmd_qdepth);
1721                         atomic_inc(&lport->xmt_fcp_qdepth);
1722                         ret = -EBUSY;
1723                         goto out_fail;
1724                 }
1725         }
1726
1727         /* Lookup Hardware Queue index based on fcp_io_sched module parameter */
1728         if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
1729                 idx = lpfc_queue_info->index;
1730         } else {
1731                 cpu = raw_smp_processor_id();
1732                 idx = phba->sli4_hba.cpu_map[cpu].hdwq;
1733         }
1734
1735         lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, idx, expedite);
1736         if (lpfc_ncmd == NULL) {
1737                 atomic_inc(&lport->xmt_fcp_noxri);
1738                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1739                                  "6065 Fail IO, driver buffer pool is empty: "
1740                                  "idx %d DID %x\n",
1741                                  lpfc_queue_info->index, ndlp->nlp_DID);
1742                 ret = -EBUSY;
1743                 goto out_fail;
1744         }
1745 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1746         if (start) {
1747                 lpfc_ncmd->ts_cmd_start = start;
1748                 lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd;
1749         } else {
1750                 lpfc_ncmd->ts_cmd_start = 0;
1751         }
1752 #endif
1753
1754         /*
1755          * Store the data needed by the driver to issue, abort, and complete
1756          * an IO.
1757          * Do not let the IO hang out forever.  There is no midlayer issuing
1758          * an abort so inform the FW of the maximum IO pending time.
1759          */
1760         freqpriv->nvme_buf = lpfc_ncmd;
1761         lpfc_ncmd->nvmeCmd = pnvme_fcreq;
1762         lpfc_ncmd->ndlp = ndlp;
1763         lpfc_ncmd->qidx = lpfc_queue_info->qidx;
1764
1765         /*
1766          * Issue the IO on the WQ indicated by index in the hw_queue_handle.
1767          * This identfier was create in our hardware queue create callback
1768          * routine. The driver now is dependent on the IO queue steering from
1769          * the transport.  We are trusting the upper NVME layers know which
1770          * index to use and that they have affinitized a CPU to this hardware
1771          * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ.
1772          */
1773         lpfc_ncmd->cur_iocbq.hba_wqidx = idx;
1774         cstat = &phba->sli4_hba.hdwq[idx].nvme_cstat;
1775
1776         lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat);
1777         ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
1778         if (ret) {
1779                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1780                                  "6175 Fail IO, Prep DMA: "
1781                                  "idx %d DID %x\n",
1782                                  lpfc_queue_info->index, ndlp->nlp_DID);
1783                 atomic_inc(&lport->xmt_fcp_err);
1784                 ret = -ENOMEM;
1785                 goto out_free_nvme_buf;
1786         }
1787
1788         lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
1789                          lpfc_ncmd->cur_iocbq.sli4_xritag,
1790                          lpfc_queue_info->index, ndlp->nlp_DID);
1791
1792         ret = lpfc_sli4_issue_wqe(phba, lpfc_ncmd->hdwq, &lpfc_ncmd->cur_iocbq);
1793         if (ret) {
1794                 atomic_inc(&lport->xmt_fcp_wqerr);
1795                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1796                                  "6113 Fail IO, Could not issue WQE err %x "
1797                                  "sid: x%x did: x%x oxid: x%x\n",
1798                                  ret, vport->fc_myDID, ndlp->nlp_DID,
1799                                  lpfc_ncmd->cur_iocbq.sli4_xritag);
1800                 goto out_free_nvme_buf;
1801         }
1802
1803         if (phba->cfg_xri_rebalancing)
1804                 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_ncmd->hdwq_no);
1805
1806 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1807         if (lpfc_ncmd->ts_cmd_start)
1808                 lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
1809
1810         if (phba->hdwqstat_on & LPFC_CHECK_NVME_IO) {
1811                 cpu = raw_smp_processor_id();
1812                 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
1813                 lpfc_ncmd->cpu = cpu;
1814                 if (idx != cpu)
1815                         lpfc_printf_vlog(vport,
1816                                          KERN_INFO, LOG_NVME_IOERR,
1817                                         "6702 CPU Check cmd: "
1818                                         "cpu %d wq %d\n",
1819                                         lpfc_ncmd->cpu,
1820                                         lpfc_queue_info->index);
1821         }
1822 #endif
1823         return 0;
1824
1825  out_free_nvme_buf:
1826         if (lpfc_ncmd->nvmeCmd->sg_cnt) {
1827                 if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
1828                         cstat->output_requests--;
1829                 else
1830                         cstat->input_requests--;
1831         } else
1832                 cstat->control_requests--;
1833         lpfc_release_nvme_buf(phba, lpfc_ncmd);
1834  out_fail:
1835         return ret;
1836 }
1837
1838 /**
1839  * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
1840  * @phba: Pointer to HBA context object
1841  * @cmdiocb: Pointer to command iocb object.
1842  * @rspiocb: Pointer to response iocb object.
1843  *
1844  * This is the callback function for any NVME FCP IO that was aborted.
1845  *
1846  * Return value:
1847  *   None
1848  **/
1849 void
1850 lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1851                            struct lpfc_wcqe_complete *abts_cmpl)
1852 {
1853         lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1854                         "6145 ABORT_XRI_CN completing on rpi x%x "
1855                         "original iotag x%x, abort cmd iotag x%x "
1856                         "req_tag x%x, status x%x, hwstatus x%x\n",
1857                         cmdiocb->iocb.un.acxri.abortContextTag,
1858                         cmdiocb->iocb.un.acxri.abortIoTag,
1859                         cmdiocb->iotag,
1860                         bf_get(lpfc_wcqe_c_request_tag, abts_cmpl),
1861                         bf_get(lpfc_wcqe_c_status, abts_cmpl),
1862                         bf_get(lpfc_wcqe_c_hw_status, abts_cmpl));
1863         lpfc_sli_release_iocbq(phba, cmdiocb);
1864 }
1865
1866 /**
1867  * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS
1868  * @lpfc_pnvme: Pointer to the driver's nvme instance data
1869  * @lpfc_nvme_lport: Pointer to the driver's local port data
1870  * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1871  * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1872  * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1873  *
1874  * Driver registers this routine as its nvme request io abort handler.  This
1875  * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq
1876  * data structure to the rport indicated in @lpfc_nvme_rport.  This routine
1877  * is executed asynchronously - one the target is validated as "MAPPED" and
1878  * ready for IO, the driver issues the abort request and returns.
1879  *
1880  * Return value:
1881  *   None
1882  **/
1883 static void
1884 lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1885                     struct nvme_fc_remote_port *pnvme_rport,
1886                     void *hw_queue_handle,
1887                     struct nvmefc_fcp_req *pnvme_fcreq)
1888 {
1889         struct lpfc_nvme_lport *lport;
1890         struct lpfc_vport *vport;
1891         struct lpfc_hba *phba;
1892         struct lpfc_io_buf *lpfc_nbuf;
1893         struct lpfc_iocbq *abts_buf;
1894         struct lpfc_iocbq *nvmereq_wqe;
1895         struct lpfc_nvme_fcpreq_priv *freqpriv;
1896         unsigned long flags;
1897         int ret_val;
1898
1899         /* Validate pointers. LLDD fault handling with transport does
1900          * have timing races.
1901          */
1902         lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1903         if (unlikely(!lport))
1904                 return;
1905
1906         vport = lport->vport;
1907
1908         if (unlikely(!hw_queue_handle)) {
1909                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1910                                  "6129 Fail Abort, HW Queue Handle NULL.\n");
1911                 return;
1912         }
1913
1914         phba = vport->phba;
1915         freqpriv = pnvme_fcreq->private;
1916
1917         if (unlikely(!freqpriv))
1918                 return;
1919         if (vport->load_flag & FC_UNLOADING)
1920                 return;
1921
1922         /* Announce entry to new IO submit field. */
1923         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1924                          "6002 Abort Request to rport DID x%06x "
1925                          "for nvme_fc_req x%px\n",
1926                          pnvme_rport->port_id,
1927                          pnvme_fcreq);
1928
1929         /* If the hba is getting reset, this flag is set.  It is
1930          * cleared when the reset is complete and rings reestablished.
1931          */
1932         spin_lock_irqsave(&phba->hbalock, flags);
1933         /* driver queued commands are in process of being flushed */
1934         if (phba->hba_flag & HBA_IOQ_FLUSH) {
1935                 spin_unlock_irqrestore(&phba->hbalock, flags);
1936                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1937                                  "6139 Driver in reset cleanup - flushing "
1938                                  "NVME Req now.  hba_flag x%x\n",
1939                                  phba->hba_flag);
1940                 return;
1941         }
1942
1943         lpfc_nbuf = freqpriv->nvme_buf;
1944         if (!lpfc_nbuf) {
1945                 spin_unlock_irqrestore(&phba->hbalock, flags);
1946                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1947                                  "6140 NVME IO req has no matching lpfc nvme "
1948                                  "io buffer.  Skipping abort req.\n");
1949                 return;
1950         } else if (!lpfc_nbuf->nvmeCmd) {
1951                 spin_unlock_irqrestore(&phba->hbalock, flags);
1952                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1953                                  "6141 lpfc NVME IO req has no nvme_fcreq "
1954                                  "io buffer.  Skipping abort req.\n");
1955                 return;
1956         }
1957         nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
1958
1959         /* Guard against IO completion being called at same time */
1960         spin_lock(&lpfc_nbuf->buf_lock);
1961
1962         /*
1963          * The lpfc_nbuf and the mapped nvme_fcreq in the driver's
1964          * state must match the nvme_fcreq passed by the nvme
1965          * transport.  If they don't match, it is likely the driver
1966          * has already completed the NVME IO and the nvme transport
1967          * has not seen it yet.
1968          */
1969         if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
1970                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1971                                  "6143 NVME req mismatch: "
1972                                  "lpfc_nbuf x%px nvmeCmd x%px, "
1973                                  "pnvme_fcreq x%px.  Skipping Abort xri x%x\n",
1974                                  lpfc_nbuf, lpfc_nbuf->nvmeCmd,
1975                                  pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1976                 goto out_unlock;
1977         }
1978
1979         /* Don't abort IOs no longer on the pending queue. */
1980         if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
1981                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1982                                  "6142 NVME IO req x%px not queued - skipping "
1983                                  "abort req xri x%x\n",
1984                                  pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1985                 goto out_unlock;
1986         }
1987
1988         atomic_inc(&lport->xmt_fcp_abort);
1989         lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
1990                          nvmereq_wqe->sli4_xritag,
1991                          nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
1992
1993         /* Outstanding abort is in progress */
1994         if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
1995                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1996                                  "6144 Outstanding NVME I/O Abort Request "
1997                                  "still pending on nvme_fcreq x%px, "
1998                                  "lpfc_ncmd %px xri x%x\n",
1999                                  pnvme_fcreq, lpfc_nbuf,
2000                                  nvmereq_wqe->sli4_xritag);
2001                 goto out_unlock;
2002         }
2003
2004         abts_buf = __lpfc_sli_get_iocbq(phba);
2005         if (!abts_buf) {
2006                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2007                                  "6136 No available abort wqes. Skipping "
2008                                  "Abts req for nvme_fcreq x%px xri x%x\n",
2009                                  pnvme_fcreq, nvmereq_wqe->sli4_xritag);
2010                 goto out_unlock;
2011         }
2012
2013         /* Ready - mark outstanding as aborted by driver. */
2014         nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
2015
2016         lpfc_nvme_prep_abort_wqe(abts_buf, nvmereq_wqe->sli4_xritag, 0);
2017
2018         /* ABTS WQE must go to the same WQ as the WQE to be aborted */
2019         abts_buf->iocb_flag |= LPFC_IO_NVME;
2020         abts_buf->hba_wqidx = nvmereq_wqe->hba_wqidx;
2021         abts_buf->vport = vport;
2022         abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
2023         ret_val = lpfc_sli4_issue_wqe(phba, lpfc_nbuf->hdwq, abts_buf);
2024         spin_unlock(&lpfc_nbuf->buf_lock);
2025         spin_unlock_irqrestore(&phba->hbalock, flags);
2026         if (ret_val) {
2027                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2028                                  "6137 Failed abts issue_wqe with status x%x "
2029                                  "for nvme_fcreq x%px.\n",
2030                                  ret_val, pnvme_fcreq);
2031                 lpfc_sli_release_iocbq(phba, abts_buf);
2032                 return;
2033         }
2034
2035         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
2036                          "6138 Transport Abort NVME Request Issued for "
2037                          "ox_id x%x on reqtag x%x\n",
2038                          nvmereq_wqe->sli4_xritag,
2039                          abts_buf->iotag);
2040         return;
2041
2042 out_unlock:
2043         spin_unlock(&lpfc_nbuf->buf_lock);
2044         spin_unlock_irqrestore(&phba->hbalock, flags);
2045         return;
2046 }
2047
2048 /* Declare and initialization an instance of the FC NVME template. */
2049 static struct nvme_fc_port_template lpfc_nvme_template = {
2050         /* initiator-based functions */
2051         .localport_delete  = lpfc_nvme_localport_delete,
2052         .remoteport_delete = lpfc_nvme_remoteport_delete,
2053         .create_queue = lpfc_nvme_create_queue,
2054         .delete_queue = lpfc_nvme_delete_queue,
2055         .ls_req       = lpfc_nvme_ls_req,
2056         .fcp_io       = lpfc_nvme_fcp_io_submit,
2057         .ls_abort     = lpfc_nvme_ls_abort,
2058         .fcp_abort    = lpfc_nvme_fcp_abort,
2059         .xmt_ls_rsp   = lpfc_nvme_xmt_ls_rsp,
2060
2061         .max_hw_queues = 1,
2062         .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
2063         .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
2064         .dma_boundary = 0xFFFFFFFF,
2065
2066         /* Sizes of additional private data for data structures.
2067          * No use for the last two sizes at this time.
2068          */
2069         .local_priv_sz = sizeof(struct lpfc_nvme_lport),
2070         .remote_priv_sz = sizeof(struct lpfc_nvme_rport),
2071         .lsrqst_priv_sz = 0,
2072         .fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv),
2073 };
2074
2075 /**
2076  * lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA
2077  * @phba: The HBA for which this call is being executed.
2078  *
2079  * This routine removes a nvme buffer from head of @hdwq io_buf_list
2080  * and returns to caller.
2081  *
2082  * Return codes:
2083  *   NULL - Error
2084  *   Pointer to lpfc_nvme_buf - Success
2085  **/
2086 static struct lpfc_io_buf *
2087 lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
2088                   int idx, int expedite)
2089 {
2090         struct lpfc_io_buf *lpfc_ncmd;
2091         struct lpfc_sli4_hdw_queue *qp;
2092         struct sli4_sge *sgl;
2093         struct lpfc_iocbq *pwqeq;
2094         union lpfc_wqe128 *wqe;
2095
2096         lpfc_ncmd = lpfc_get_io_buf(phba, NULL, idx, expedite);
2097
2098         if (lpfc_ncmd) {
2099                 pwqeq = &(lpfc_ncmd->cur_iocbq);
2100                 wqe = &pwqeq->wqe;
2101
2102                 /* Setup key fields in buffer that may have been changed
2103                  * if other protocols used this buffer.
2104                  */
2105                 pwqeq->iocb_flag = LPFC_IO_NVME;
2106                 pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
2107                 lpfc_ncmd->start_time = jiffies;
2108                 lpfc_ncmd->flags = 0;
2109
2110                 /* Rsp SGE will be filled in when we rcv an IO
2111                  * from the NVME Layer to be sent.
2112                  * The cmd is going to be embedded so we need a SKIP SGE.
2113                  */
2114                 sgl = lpfc_ncmd->dma_sgl;
2115                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2116                 bf_set(lpfc_sli4_sge_last, sgl, 0);
2117                 sgl->word2 = cpu_to_le32(sgl->word2);
2118                 /* Fill in word 3 / sgl_len during cmd submission */
2119
2120                 /* Initialize 64 bytes only */
2121                 memset(wqe, 0, sizeof(union lpfc_wqe));
2122
2123                 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
2124                         atomic_inc(&ndlp->cmd_pending);
2125                         lpfc_ncmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
2126                 }
2127
2128         } else {
2129                 qp = &phba->sli4_hba.hdwq[idx];
2130                 qp->empty_io_bufs++;
2131         }
2132
2133         return  lpfc_ncmd;
2134 }
2135
2136 /**
2137  * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list.
2138  * @phba: The Hba for which this call is being executed.
2139  * @lpfc_ncmd: The nvme buffer which is being released.
2140  *
2141  * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba
2142  * lpfc_io_buf_list list. For SLI4 XRI's are tied to the nvme buffer
2143  * and cannot be reused for at least RA_TOV amount of time if it was
2144  * aborted.
2145  **/
2146 static void
2147 lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
2148 {
2149         struct lpfc_sli4_hdw_queue *qp;
2150         unsigned long iflag = 0;
2151
2152         if ((lpfc_ncmd->flags & LPFC_SBUF_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
2153                 atomic_dec(&lpfc_ncmd->ndlp->cmd_pending);
2154
2155         lpfc_ncmd->ndlp = NULL;
2156         lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
2157
2158         qp = lpfc_ncmd->hdwq;
2159         if (unlikely(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
2160                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2161                                 "6310 XB release deferred for "
2162                                 "ox_id x%x on reqtag x%x\n",
2163                                 lpfc_ncmd->cur_iocbq.sli4_xritag,
2164                                 lpfc_ncmd->cur_iocbq.iotag);
2165
2166                 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
2167                 list_add_tail(&lpfc_ncmd->list,
2168                         &qp->lpfc_abts_io_buf_list);
2169                 qp->abts_nvme_io_bufs++;
2170                 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
2171         } else
2172                 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp);
2173 }
2174
2175 /**
2176  * lpfc_nvme_create_localport - Create/Bind an nvme localport instance.
2177  * @pvport - the lpfc_vport instance requesting a localport.
2178  *
2179  * This routine is invoked to create an nvme localport instance to bind
2180  * to the nvme_fc_transport.  It is called once during driver load
2181  * like lpfc_create_shost after all other services are initialized.
2182  * It requires a vport, vpi, and wwns at call time.  Other localport
2183  * parameters are modified as the driver's FCID and the Fabric WWN
2184  * are established.
2185  *
2186  * Return codes
2187  *      0 - successful
2188  *      -ENOMEM - no heap memory available
2189  *      other values - from nvme registration upcall
2190  **/
2191 int
2192 lpfc_nvme_create_localport(struct lpfc_vport *vport)
2193 {
2194         int ret = 0;
2195         struct lpfc_hba  *phba = vport->phba;
2196         struct nvme_fc_port_info nfcp_info;
2197         struct nvme_fc_local_port *localport;
2198         struct lpfc_nvme_lport *lport;
2199
2200         /* Initialize this localport instance.  The vport wwn usage ensures
2201          * that NPIV is accounted for.
2202          */
2203         memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info));
2204         nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR;
2205         nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
2206         nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
2207
2208         /* We need to tell the transport layer + 1 because it takes page
2209          * alignment into account. When space for the SGL is allocated we
2210          * allocate + 3, one for cmd, one for rsp and one for this alignment
2211          */
2212         lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
2213
2214         /* Advertise how many hw queues we support based on cfg_hdw_queue,
2215          * which will not exceed cpu count.
2216          */
2217         lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
2218
2219         if (!IS_ENABLED(CONFIG_NVME_FC))
2220                 return ret;
2221
2222         /* localport is allocated from the stack, but the registration
2223          * call allocates heap memory as well as the private area.
2224          */
2225
2226         ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
2227                                          &vport->phba->pcidev->dev, &localport);
2228         if (!ret) {
2229                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
2230                                  "6005 Successfully registered local "
2231                                  "NVME port num %d, localP x%px, private "
2232                                  "x%px, sg_seg %d\n",
2233                                  localport->port_num, localport,
2234                                  localport->private,
2235                                  lpfc_nvme_template.max_sgl_segments);
2236
2237                 /* Private is our lport size declared in the template. */
2238                 lport = (struct lpfc_nvme_lport *)localport->private;
2239                 vport->localport = localport;
2240                 lport->vport = vport;
2241                 vport->nvmei_support = 1;
2242
2243                 atomic_set(&lport->xmt_fcp_noxri, 0);
2244                 atomic_set(&lport->xmt_fcp_bad_ndlp, 0);
2245                 atomic_set(&lport->xmt_fcp_qdepth, 0);
2246                 atomic_set(&lport->xmt_fcp_err, 0);
2247                 atomic_set(&lport->xmt_fcp_wqerr, 0);
2248                 atomic_set(&lport->xmt_fcp_abort, 0);
2249                 atomic_set(&lport->xmt_ls_abort, 0);
2250                 atomic_set(&lport->xmt_ls_err, 0);
2251                 atomic_set(&lport->cmpl_fcp_xb, 0);
2252                 atomic_set(&lport->cmpl_fcp_err, 0);
2253                 atomic_set(&lport->cmpl_ls_xb, 0);
2254                 atomic_set(&lport->cmpl_ls_err, 0);
2255
2256                 atomic_set(&lport->fc4NvmeLsRequests, 0);
2257                 atomic_set(&lport->fc4NvmeLsCmpls, 0);
2258         }
2259
2260         return ret;
2261 }
2262
2263 #if (IS_ENABLED(CONFIG_NVME_FC))
2264 /* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg.
2265  *
2266  * The driver has to wait for the host nvme transport to callback
2267  * indicating the localport has successfully unregistered all
2268  * resources.  Since this is an uninterruptible wait, loop every ten
2269  * seconds and print a message indicating no progress.
2270  *
2271  * An uninterruptible wait is used because of the risk of transport-to-
2272  * driver state mismatch.
2273  */
2274 static void
2275 lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
2276                            struct lpfc_nvme_lport *lport,
2277                            struct completion *lport_unreg_cmp)
2278 {
2279         u32 wait_tmo;
2280         int ret, i, pending = 0;
2281         struct lpfc_sli_ring  *pring;
2282         struct lpfc_hba  *phba = vport->phba;
2283
2284         /* Host transport has to clean up and confirm requiring an indefinite
2285          * wait. Print a message if a 10 second wait expires and renew the
2286          * wait. This is unexpected.
2287          */
2288         wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
2289         while (true) {
2290                 ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
2291                 if (unlikely(!ret)) {
2292                         pending = 0;
2293                         for (i = 0; i < phba->cfg_hdw_queue; i++) {
2294                                 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
2295                                 if (!pring)
2296                                         continue;
2297                                 if (pring->txcmplq_cnt)
2298                                         pending += pring->txcmplq_cnt;
2299                         }
2300                         lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2301                                          "6176 Lport x%px Localport x%px wait "
2302                                          "timed out. Pending %d. Renewing.\n",
2303                                          lport, vport->localport, pending);
2304                         continue;
2305                 }
2306                 break;
2307         }
2308         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
2309                          "6177 Lport x%px Localport x%px Complete Success\n",
2310                          lport, vport->localport);
2311 }
2312 #endif
2313
2314 /**
2315  * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
2316  * @pnvme: pointer to lpfc nvme data structure.
2317  *
2318  * This routine is invoked to destroy all lports bound to the phba.
2319  * The lport memory was allocated by the nvme fc transport and is
2320  * released there.  This routine ensures all rports bound to the
2321  * lport have been disconnected.
2322  *
2323  **/
2324 void
2325 lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2326 {
2327 #if (IS_ENABLED(CONFIG_NVME_FC))
2328         struct nvme_fc_local_port *localport;
2329         struct lpfc_nvme_lport *lport;
2330         int ret;
2331         DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp);
2332
2333         if (vport->nvmei_support == 0)
2334                 return;
2335
2336         localport = vport->localport;
2337         lport = (struct lpfc_nvme_lport *)localport->private;
2338
2339         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2340                          "6011 Destroying NVME localport x%px\n",
2341                          localport);
2342
2343         /* lport's rport list is clear.  Unregister
2344          * lport and release resources.
2345          */
2346         lport->lport_unreg_cmp = &lport_unreg_cmp;
2347         ret = nvme_fc_unregister_localport(localport);
2348
2349         /* Wait for completion.  This either blocks
2350          * indefinitely or succeeds
2351          */
2352         lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp);
2353         vport->localport = NULL;
2354
2355         /* Regardless of the unregister upcall response, clear
2356          * nvmei_support.  All rports are unregistered and the
2357          * driver will clean up.
2358          */
2359         vport->nvmei_support = 0;
2360         if (ret == 0) {
2361                 lpfc_printf_vlog(vport,
2362                                  KERN_INFO, LOG_NVME_DISC,
2363                                  "6009 Unregistered lport Success\n");
2364         } else {
2365                 lpfc_printf_vlog(vport,
2366                                  KERN_INFO, LOG_NVME_DISC,
2367                                  "6010 Unregistered lport "
2368                                  "Failed, status x%x\n",
2369                                  ret);
2370         }
2371 #endif
2372 }
2373
2374 void
2375 lpfc_nvme_update_localport(struct lpfc_vport *vport)
2376 {
2377 #if (IS_ENABLED(CONFIG_NVME_FC))
2378         struct nvme_fc_local_port *localport;
2379         struct lpfc_nvme_lport *lport;
2380
2381         localport = vport->localport;
2382         if (!localport) {
2383                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2384                                  "6710 Update NVME fail. No localport\n");
2385                 return;
2386         }
2387         lport = (struct lpfc_nvme_lport *)localport->private;
2388         if (!lport) {
2389                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2390                                  "6171 Update NVME fail. localP x%px, No lport\n",
2391                                  localport);
2392                 return;
2393         }
2394         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2395                          "6012 Update NVME lport x%px did x%x\n",
2396                          localport, vport->fc_myDID);
2397
2398         localport->port_id = vport->fc_myDID;
2399         if (localport->port_id == 0)
2400                 localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY;
2401         else
2402                 localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;
2403
2404         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2405                          "6030 bound lport x%px to DID x%06x\n",
2406                          lport, localport->port_id);
2407 #endif
2408 }
2409
2410 int
2411 lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2412 {
2413 #if (IS_ENABLED(CONFIG_NVME_FC))
2414         int ret = 0;
2415         struct nvme_fc_local_port *localport;
2416         struct lpfc_nvme_lport *lport;
2417         struct lpfc_nvme_rport *rport;
2418         struct lpfc_nvme_rport *oldrport;
2419         struct nvme_fc_remote_port *remote_port;
2420         struct nvme_fc_port_info rpinfo;
2421         struct lpfc_nodelist *prev_ndlp = NULL;
2422         struct fc_rport *srport = ndlp->rport;
2423
2424         lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
2425                          "6006 Register NVME PORT. DID x%06x nlptype x%x\n",
2426                          ndlp->nlp_DID, ndlp->nlp_type);
2427
2428         localport = vport->localport;
2429         if (!localport)
2430                 return 0;
2431
2432         lport = (struct lpfc_nvme_lport *)localport->private;
2433
2434         /* NVME rports are not preserved across devloss.
2435          * Just register this instance.  Note, rpinfo->dev_loss_tmo
2436          * is left 0 to indicate accept transport defaults.  The
2437          * driver communicates port role capabilities consistent
2438          * with the PRLI response data.
2439          */
2440         memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info));
2441         rpinfo.port_id = ndlp->nlp_DID;
2442         if (ndlp->nlp_type & NLP_NVME_TARGET)
2443                 rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
2444         if (ndlp->nlp_type & NLP_NVME_INITIATOR)
2445                 rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
2446
2447         if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
2448                 rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
2449
2450         rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
2451         rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
2452         if (srport)
2453                 rpinfo.dev_loss_tmo = srport->dev_loss_tmo;
2454         else
2455                 rpinfo.dev_loss_tmo = vport->cfg_devloss_tmo;
2456
2457         spin_lock_irq(&vport->phba->hbalock);
2458         oldrport = lpfc_ndlp_get_nrport(ndlp);
2459         if (oldrport) {
2460                 prev_ndlp = oldrport->ndlp;
2461                 spin_unlock_irq(&vport->phba->hbalock);
2462         } else {
2463                 spin_unlock_irq(&vport->phba->hbalock);
2464                 lpfc_nlp_get(ndlp);
2465         }
2466
2467         ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
2468         if (!ret) {
2469                 /* If the ndlp already has an nrport, this is just
2470                  * a resume of the existing rport.  Else this is a
2471                  * new rport.
2472                  */
2473                 /* Guard against an unregister/reregister
2474                  * race that leaves the WAIT flag set.
2475                  */
2476                 spin_lock_irq(&vport->phba->hbalock);
2477                 ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
2478                 spin_unlock_irq(&vport->phba->hbalock);
2479                 rport = remote_port->private;
2480                 if (oldrport) {
2481
2482                         /* Sever the ndlp<->rport association
2483                          * before dropping the ndlp ref from
2484                          * register.
2485                          */
2486                         spin_lock_irq(&vport->phba->hbalock);
2487                         ndlp->nrport = NULL;
2488                         ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
2489                         spin_unlock_irq(&vport->phba->hbalock);
2490                         rport->ndlp = NULL;
2491                         rport->remoteport = NULL;
2492
2493                         /* Reference only removed if previous NDLP is no longer
2494                          * active. It might be just a swap and removing the
2495                          * reference would cause a premature cleanup.
2496                          */
2497                         if (prev_ndlp && prev_ndlp != ndlp) {
2498                                 if ((!NLP_CHK_NODE_ACT(prev_ndlp)) ||
2499                                     (!prev_ndlp->nrport))
2500                                         lpfc_nlp_put(prev_ndlp);
2501                         }
2502                 }
2503
2504                 /* Clean bind the rport to the ndlp. */
2505                 rport->remoteport = remote_port;
2506                 rport->lport = lport;
2507                 rport->ndlp = ndlp;
2508                 spin_lock_irq(&vport->phba->hbalock);
2509                 ndlp->nrport = rport;
2510                 spin_unlock_irq(&vport->phba->hbalock);
2511                 lpfc_printf_vlog(vport, KERN_INFO,
2512                                  LOG_NVME_DISC | LOG_NODE,
2513                                  "6022 Bind lport x%px to remoteport x%px "
2514                                  "rport x%px WWNN 0x%llx, "
2515                                  "Rport WWPN 0x%llx DID "
2516                                  "x%06x Role x%x, ndlp %p prev_ndlp x%px\n",
2517                                  lport, remote_port, rport,
2518                                  rpinfo.node_name, rpinfo.port_name,
2519                                  rpinfo.port_id, rpinfo.port_role,
2520                                  ndlp, prev_ndlp);
2521         } else {
2522                 lpfc_printf_vlog(vport, KERN_ERR,
2523                                  LOG_TRACE_EVENT,
2524                                  "6031 RemotePort Registration failed "
2525                                  "err: %d, DID x%06x\n",
2526                                  ret, ndlp->nlp_DID);
2527         }
2528
2529         return ret;
2530 #else
2531         return 0;
2532 #endif
2533 }
2534
2535 /**
2536  * lpfc_nvme_rescan_port - Check to see if we should rescan this remoteport
2537  *
2538  * If the ndlp represents an NVME Target, that we are logged into,
2539  * ping the NVME FC Transport layer to initiate a device rescan
2540  * on this remote NPort.
2541  */
2542 void
2543 lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2544 {
2545 #if (IS_ENABLED(CONFIG_NVME_FC))
2546         struct lpfc_nvme_rport *nrport;
2547         struct nvme_fc_remote_port *remoteport = NULL;
2548
2549         spin_lock_irq(&vport->phba->hbalock);
2550         nrport = lpfc_ndlp_get_nrport(ndlp);
2551         if (nrport)
2552                 remoteport = nrport->remoteport;
2553         spin_unlock_irq(&vport->phba->hbalock);
2554
2555         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2556                          "6170 Rescan NPort DID x%06x type x%x "
2557                          "state x%x nrport x%px remoteport x%px\n",
2558                          ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state,
2559                          nrport, remoteport);
2560
2561         if (!nrport || !remoteport)
2562                 goto rescan_exit;
2563
2564         /* Only rescan if we are an NVME target in the MAPPED state */
2565         if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY &&
2566             ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
2567                 nvme_fc_rescan_remoteport(remoteport);
2568
2569                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2570                                  "6172 NVME rescanned DID x%06x "
2571                                  "port_state x%x\n",
2572                                  ndlp->nlp_DID, remoteport->port_state);
2573         }
2574         return;
2575  rescan_exit:
2576         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2577                          "6169 Skip NVME Rport Rescan, NVME remoteport "
2578                          "unregistered\n");
2579 #endif
2580 }
2581
2582 /* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
2583  *
2584  * There is no notion of Devloss or rport recovery from the current
2585  * nvme_transport perspective.  Loss of an rport just means IO cannot
2586  * be sent and recovery is completely up to the initator.
2587  * For now, the driver just unbinds the DID and port_role so that
2588  * no further IO can be issued.  Changes are planned for later.
2589  *
2590  * Notes - the ndlp reference count is not decremented here since
2591  * since there is no nvme_transport api for devloss.  Node ref count
2592  * is only adjusted in driver unload.
2593  */
2594 void
2595 lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2596 {
2597 #if (IS_ENABLED(CONFIG_NVME_FC))
2598         int ret;
2599         struct nvme_fc_local_port *localport;
2600         struct lpfc_nvme_lport *lport;
2601         struct lpfc_nvme_rport *rport;
2602         struct nvme_fc_remote_port *remoteport = NULL;
2603
2604         localport = vport->localport;
2605
2606         /* This is fundamental error.  The localport is always
2607          * available until driver unload.  Just exit.
2608          */
2609         if (!localport)
2610                 return;
2611
2612         lport = (struct lpfc_nvme_lport *)localport->private;
2613         if (!lport)
2614                 goto input_err;
2615
2616         spin_lock_irq(&vport->phba->hbalock);
2617         rport = lpfc_ndlp_get_nrport(ndlp);
2618         if (rport)
2619                 remoteport = rport->remoteport;
2620         spin_unlock_irq(&vport->phba->hbalock);
2621         if (!remoteport)
2622                 goto input_err;
2623
2624         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2625                          "6033 Unreg nvme remoteport x%px, portname x%llx, "
2626                          "port_id x%06x, portstate x%x port type x%x\n",
2627                          remoteport, remoteport->port_name,
2628                          remoteport->port_id, remoteport->port_state,
2629                          ndlp->nlp_type);
2630
2631         /* Sanity check ndlp type.  Only call for NVME ports. Don't
2632          * clear any rport state until the transport calls back.
2633          */
2634
2635         if (ndlp->nlp_type & NLP_NVME_TARGET) {
2636                 /* No concern about the role change on the nvme remoteport.
2637                  * The transport will update it.
2638                  */
2639                 ndlp->upcall_flags |= NLP_WAIT_FOR_UNREG;
2640
2641                 /* Don't let the host nvme transport keep sending keep-alives
2642                  * on this remoteport. Vport is unloading, no recovery. The
2643                  * return values is ignored.  The upcall is a courtesy to the
2644                  * transport.
2645                  */
2646                 if (vport->load_flag & FC_UNLOADING)
2647                         (void)nvme_fc_set_remoteport_devloss(remoteport, 0);
2648
2649                 ret = nvme_fc_unregister_remoteport(remoteport);
2650                 if (ret != 0) {
2651                         lpfc_nlp_put(ndlp);
2652                         lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2653                                          "6167 NVME unregister failed %d "
2654                                          "port_state x%x\n",
2655                                          ret, remoteport->port_state);
2656                 }
2657         }
2658         return;
2659
2660  input_err:
2661 #endif
2662         lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2663                          "6168 State error: lport x%px, rport x%px FCID x%06x\n",
2664                          vport->localport, ndlp->rport, ndlp->nlp_DID);
2665 }
2666
2667 /**
2668  * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
2669  * @phba: pointer to lpfc hba data structure.
2670  * @axri: pointer to the fcp xri abort wcqe structure.
2671  * @lpfc_ncmd: The nvme job structure for the request being aborted.
2672  *
2673  * This routine is invoked by the worker thread to process a SLI4 fast-path
2674  * NVME aborted xri.  Aborted NVME IO commands are completed to the transport
2675  * here.
2676  **/
2677 void
2678 lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
2679                            struct sli4_wcqe_xri_aborted *axri,
2680                            struct lpfc_io_buf *lpfc_ncmd)
2681 {
2682         uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
2683         struct nvmefc_fcp_req *nvme_cmd = NULL;
2684         struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp;
2685
2686
2687         if (ndlp)
2688                 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
2689
2690         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2691                         "6311 nvme_cmd %p xri x%x tag x%x abort complete and "
2692                         "xri released\n",
2693                         lpfc_ncmd->nvmeCmd, xri,
2694                         lpfc_ncmd->cur_iocbq.iotag);
2695
2696         /* Aborted NVME commands are required to not complete
2697          * before the abort exchange command fully completes.
2698          * Once completed, it is available via the put list.
2699          */
2700         if (lpfc_ncmd->nvmeCmd) {
2701                 nvme_cmd = lpfc_ncmd->nvmeCmd;
2702                 nvme_cmd->done(nvme_cmd);
2703                 lpfc_ncmd->nvmeCmd = NULL;
2704         }
2705         lpfc_release_nvme_buf(phba, lpfc_ncmd);
2706 }
2707
2708 /**
2709  * lpfc_nvme_wait_for_io_drain - Wait for all NVME wqes to complete
2710  * @phba: Pointer to HBA context object.
2711  *
2712  * This function flushes all wqes in the nvme rings and frees all resources
2713  * in the txcmplq. This function does not issue abort wqes for the IO
2714  * commands in txcmplq, they will just be returned with
2715  * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
2716  * slot has been permanently disabled.
2717  **/
2718 void
2719 lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
2720 {
2721         struct lpfc_sli_ring  *pring;
2722         u32 i, wait_cnt = 0;
2723
2724         if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq)
2725                 return;
2726
2727         /* Cycle through all IO rings and make sure all outstanding
2728          * WQEs have been removed from the txcmplqs.
2729          */
2730         for (i = 0; i < phba->cfg_hdw_queue; i++) {
2731                 if (!phba->sli4_hba.hdwq[i].io_wq)
2732                         continue;
2733                 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
2734
2735                 if (!pring)
2736                         continue;
2737
2738                 /* Retrieve everything on the txcmplq */
2739                 while (!list_empty(&pring->txcmplq)) {
2740                         msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
2741                         wait_cnt++;
2742
2743                         /* The sleep is 10mS.  Every ten seconds,
2744                          * dump a message.  Something is wrong.
2745                          */
2746                         if ((wait_cnt % 1000) == 0) {
2747                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2748                                                 "6178 NVME IO not empty, "
2749                                                 "cnt %d\n", wait_cnt);
2750                         }
2751                 }
2752         }
2753 }
2754
2755 void
2756 lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn)
2757 {
2758 #if (IS_ENABLED(CONFIG_NVME_FC))
2759         struct lpfc_io_buf *lpfc_ncmd;
2760         struct nvmefc_fcp_req *nCmd;
2761         struct lpfc_nvme_fcpreq_priv *freqpriv;
2762
2763         if (!pwqeIn->context1) {
2764                 lpfc_sli_release_iocbq(phba, pwqeIn);
2765                 return;
2766         }
2767         /* For abort iocb just return, IO iocb will do a done call */
2768         if (bf_get(wqe_cmnd, &pwqeIn->wqe.gen_req.wqe_com) ==
2769             CMD_ABORT_XRI_CX) {
2770                 lpfc_sli_release_iocbq(phba, pwqeIn);
2771                 return;
2772         }
2773         lpfc_ncmd = (struct lpfc_io_buf *)pwqeIn->context1;
2774
2775         spin_lock(&lpfc_ncmd->buf_lock);
2776         if (!lpfc_ncmd->nvmeCmd) {
2777                 spin_unlock(&lpfc_ncmd->buf_lock);
2778                 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2779                 return;
2780         }
2781
2782         nCmd = lpfc_ncmd->nvmeCmd;
2783         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2784                         "6194 NVME Cancel xri %x\n",
2785                         lpfc_ncmd->cur_iocbq.sli4_xritag);
2786
2787         nCmd->transferred_length = 0;
2788         nCmd->rcv_rsplen = 0;
2789         nCmd->status = NVME_SC_INTERNAL;
2790         freqpriv = nCmd->private;
2791         freqpriv->nvme_buf = NULL;
2792         lpfc_ncmd->nvmeCmd = NULL;
2793
2794         spin_unlock(&lpfc_ncmd->buf_lock);
2795         nCmd->done(nCmd);
2796
2797         /* Call release with XB=1 to queue the IO into the abort list. */
2798         lpfc_release_nvme_buf(phba, lpfc_ncmd);
2799 #endif
2800 }